mirror of
https://github.com/ViViDboarder/bitwarden_rs.git
synced 2024-06-02 09:32:34 +00:00
Compare commits
357 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
175f2aeace | ||
|
feefe69094 | ||
|
46df3ee7cd | ||
|
bb945ad01b | ||
|
de86aa671e | ||
|
e38771bbbd | ||
|
a3f9a8d7dc | ||
|
4b6bc6ef66 | ||
|
455a23361f | ||
|
1a8ec04733 | ||
|
4e60df7a08 | ||
|
219a9d9f5e | ||
|
48baf723a4 | ||
|
6530904883 | ||
|
d15d24f4ff | ||
|
8d992d637e | ||
|
6ebc83c3b7 | ||
|
b32f4451ee | ||
|
99142c7552 | ||
|
db710bb931 | ||
|
a9e9a397d8 | ||
|
d46a6ac687 | ||
|
1eb5495802 | ||
|
7cf8809d77 | ||
|
043aa27aa3 | ||
|
9824d94a1c | ||
|
e8ef76b8f9 | ||
|
be1ddb4203 | ||
|
caddf21fca | ||
|
5379329ef7 | ||
|
6faaeaae66 | ||
|
3fed323385 | ||
|
58a928547d | ||
|
558410c5bd | ||
|
0dc0decaa7 | ||
|
d11d663c5c | ||
|
771233176f | ||
|
ed70b07d81 | ||
|
e25fc7083d | ||
|
fa364c3f2c | ||
|
b5f9fe4d3b | ||
|
013d4c28b2 | ||
|
63acc8619b | ||
|
ec920b5756 | ||
|
95caaf2a40 | ||
|
7099f8bee8 | ||
|
b41a0d840c | ||
|
c577ade90e | ||
|
257b143df1 | ||
|
34ee326ce9 | ||
|
090104ce1b | ||
|
3305d5dc92 | ||
|
296063e135 | ||
|
b9daa59e5d | ||
|
5bdcfe128d | ||
|
1842a796fb | ||
|
ce99e5c583 | ||
|
0c96c2d305 | ||
|
5796b6b554 | ||
|
c7ab27c86f | ||
|
8c03746a67 | ||
|
8746d36845 | ||
|
448e6ac917 | ||
|
729c9cff41 | ||
|
22b9c80007 | ||
|
ab4355cfed | ||
|
948dc82228 | ||
|
bc74fd23e7 | ||
|
37776241be | ||
|
feba41ec88 | ||
|
6a8f42da8a | ||
|
670d8cb83a | ||
|
2f7fbde789 | ||
|
c698bca2b9 | ||
|
0b6a003a8b | ||
|
c64560016e | ||
|
978be0b4a9 | ||
|
b58bff1178 | ||
|
2f3e18caa9 | ||
|
6a291040bd | ||
|
dbc082dc75 | ||
|
32a0dd09bf | ||
|
f847c6e225 | ||
|
99da5fbebb | ||
|
6a0d024c69 | ||
|
b24929a243 | ||
|
9a47821642 | ||
|
d69968313b | ||
|
3c377d97dc | ||
|
ea15218197 | ||
|
0eee907c88 | ||
|
c877583979 | ||
|
844cf70345 | ||
|
a0d92a167c | ||
|
d7b0d6f9f5 | ||
|
4c3b328aca | ||
|
260ffee093 | ||
|
c59cfe3371 | ||
|
0822c0c128 | ||
|
57a88f0a1b | ||
|
87393409f9 | ||
|
062f5e4712 | ||
|
aaba1e8368 | ||
|
ff2684dfee | ||
|
6b5fa201aa | ||
|
7167e443ca | ||
|
175d647e47 | ||
|
4c324e1160 | ||
|
0365b7c6a4 | ||
|
19889187a5 | ||
|
9571277c44 | ||
|
a202da9e23 | ||
|
e5a77a477d | ||
|
c05dc50f53 | ||
|
3bbdbb832c | ||
|
d9684bef6b | ||
|
db0c45c172 | ||
|
ad4393e3f7 | ||
|
f83a8a36d1 | ||
|
0e9eba8c8b | ||
|
d5c760960a | ||
|
2c6ef2bc68 | ||
|
7032ae5587 | ||
|
eba22c2d94 | ||
|
11cc9ae0c0 | ||
|
fb648db47d | ||
|
959283d333 | ||
|
385c2227e7 | ||
|
6d9f03e84b | ||
|
6a972e4b19 | ||
|
171b174ce9 | ||
|
93b7ded1e6 | ||
|
29c6b145ca | ||
|
a7a479623c | ||
|
83dff9ae6e | ||
|
6b2cc5a3ee | ||
|
5247e0d773 | ||
|
05b308b8b4 | ||
|
9621278fca | ||
|
570d6c8bf9 | ||
|
ad48e9ed0f | ||
|
f724addf9a | ||
|
aa20974703 | ||
|
a846f6c610 | ||
|
c218c34812 | ||
|
2626e66873 | ||
|
81e0e1b339 | ||
|
fd1354d00e | ||
|
071a3b2a32 | ||
|
32cfaab5ee | ||
|
d348f12a0e | ||
|
11845d9f5b | ||
|
de70fbf88a | ||
|
0b04caab78 | ||
|
4c78c5a9c9 | ||
|
73f0841f17 | ||
|
4559e85daa | ||
|
bbef332e25 | ||
|
1e950c7dbc | ||
|
f14e19a3d8 | ||
|
668d5c23dc | ||
|
fb6f96f5c3 | ||
|
6e6e34ff18 | ||
|
790146bfac | ||
|
af625930d6 | ||
|
a28ebcb401 | ||
|
77e47ddd1f | ||
|
5b620ba6cd | ||
|
d5f9b33f66 | ||
|
596c9b8691 | ||
|
d4357eb55a | ||
|
b37f0dfde3 | ||
|
624791e09a | ||
|
f9a73a9bbe | ||
|
35868dd72c | ||
|
979d010dc2 | ||
|
b34d548246 | ||
|
a87646b8cb | ||
|
a2411eef56 | ||
|
52ed8e4d75 | ||
|
24c914799d | ||
|
db53511855 | ||
|
325691e588 | ||
|
fac3cb687d | ||
|
afbf1db331 | ||
|
1aefaec297 | ||
|
f1d3fb5d40 | ||
|
ac2723f898 | ||
|
2fffaec226 | ||
|
5c54dfee3a | ||
|
967d2d78ec | ||
|
1aa5e0d4dc | ||
|
b47cf97409 | ||
|
5e802f8aa3 | ||
|
0bdeb02a31 | ||
|
b03698fadb | ||
|
39d1a09704 | ||
|
a447e4e7ef | ||
|
4eee6e7aee | ||
|
b6fde857a7 | ||
|
3c66deb5cc | ||
|
4146612a32 | ||
|
a314933557 | ||
|
c5d7e3f2bc | ||
|
c95a2881b5 | ||
|
4c3727b4a3 | ||
|
a1f304dff7 | ||
|
a8870eef0d | ||
|
afaebc6cf3 | ||
|
8f4a1f4fc2 | ||
|
0807783388 | ||
|
80d4061d14 | ||
|
dc2f8e5c85 | ||
|
aee1ea032b | ||
|
484e82fb9f | ||
|
322a08edfb | ||
|
08afc312c3 | ||
|
5571a5d8ed | ||
|
6a8c65493f | ||
|
dfdf4473ea | ||
|
8bbbff7567 | ||
|
42e37ebea1 | ||
|
632f4d5453 | ||
|
6c5e35ce5c | ||
|
4ff15f6dc2 | ||
|
ec8028aef2 | ||
|
63cbd9ef9c | ||
|
9cca64003a | ||
|
819d5e2dc8 | ||
|
3b06ab296b | ||
|
0de52c6c99 | ||
|
e3b00b59a7 | ||
|
5a390a973f | ||
|
1ee8e44912 | ||
|
86685c1cd2 | ||
|
e3feba2a2c | ||
|
0a68de6c24 | ||
|
4be8dae626 | ||
|
e4d08836e2 | ||
|
c2a324e5da | ||
|
77f95146d6 | ||
|
6cd8512bbd | ||
|
843604c9e7 | ||
|
7407b8326a | ||
|
adf47827c9 | ||
|
5471088e93 | ||
|
4e85a1dee1 | ||
|
ec60839064 | ||
|
d4bfa1a189 | ||
|
862d401077 | ||
|
255a06382d | ||
|
bbb0484d03 | ||
|
93346bc05d | ||
|
fdf50f0064 | ||
|
ccf6ee79d0 | ||
|
91dd19473d | ||
|
c06162b22f | ||
|
7a6a3e4160 | ||
|
94341f9f3f | ||
|
ff19fb3426 | ||
|
baac8d9627 | ||
|
669b101e6a | ||
|
935f38692f | ||
|
d2d9fb08cc | ||
|
b85d548879 | ||
|
35f30088b2 | ||
|
dce054e632 | ||
|
ba725e1c25 | ||
|
b837348b25 | ||
|
7d9c7017c9 | ||
|
d6b9b8bf0c | ||
|
bd09fe1a3d | ||
|
bcbe6177b8 | ||
|
9b1d07365e | ||
|
37b212427c | ||
|
078234d8b3 | ||
|
3ce0c3d1a5 | ||
|
2ee07ea1d8 | ||
|
40c339db9b | ||
|
402c1cd06c | ||
|
819f340f39 | ||
|
1b4b40c95d | ||
|
afd9f4e278 | ||
|
47a9461f39 | ||
|
c6f64d8368 | ||
|
edabf19ddf | ||
|
a30d5f4cf9 | ||
|
3fa78e7bb1 | ||
|
a8a7e4f9a5 | ||
|
5d3b765a23 | ||
|
70f3ab8ec3 | ||
|
b6612e90ca | ||
|
161cccca30 | ||
|
84dc2eda1f | ||
|
390d10d656 | ||
|
1f775f4414 | ||
|
cc404b4edc | ||
|
536672ac1b | ||
|
e41e7c07db | ||
|
f1d3b03c60 | ||
|
2ebff958a4 | ||
|
edfdda86ae | ||
|
97fb7b5b96 | ||
|
f6de144cbb | ||
|
5a974c7b94 | ||
|
5f61607419 | ||
|
7439aeb63e | ||
|
cd8907542a | ||
|
8a5450e830 | ||
|
ad9f2b2d8e | ||
|
2f4a9865e1 | ||
|
0a3008e753 | ||
|
29a0795219 | ||
|
63459c5f72 | ||
|
916e96b143 | ||
|
325039c316 | ||
|
c5b97f4146 | ||
|
03233429f4 | ||
|
0a72c4b6db | ||
|
8867626de8 | ||
|
f5916ec396 | ||
|
ebb36235a7 | ||
|
def174a517 | ||
|
2798f623d4 | ||
|
480ba933fa | ||
|
3d1ee9ef62 | ||
|
5352321fe1 | ||
|
c4101162d6 | ||
|
632d55265b | ||
|
e277f7d1c1 | ||
|
ff7b4a3d38 | ||
|
d212dfe735 | ||
|
84ed185579 | ||
|
c0ba3406ef | ||
|
e196ba6e86 | ||
|
76743aee48 | ||
|
9ebca99290 | ||
|
a734ad2d36 | ||
|
baf7d1be4e | ||
|
31bcd1bf7c | ||
|
a3b30ed65a | ||
|
402c857d17 | ||
|
def858854b | ||
|
f6761ac30e | ||
|
f8e49ea3f4 | ||
|
f6a4a2127b | ||
|
446fc3f1f8 | ||
|
146525db91 | ||
|
1698b43f9b | ||
|
078b21db85 | ||
|
43adcde094 | ||
|
7a0bb18dcf | ||
|
47a5a4e1fc | ||
|
0f0e5876ae | ||
|
43aa75dc89 | ||
|
8280d200ea | ||
|
f250c54813 |
|
@ -3,6 +3,7 @@ target
|
|||
|
||||
# Data folder
|
||||
data
|
||||
.env
|
||||
|
||||
# IDE files
|
||||
.vscode
|
||||
|
@ -10,5 +11,15 @@ data
|
|||
*.iml
|
||||
|
||||
# Documentation
|
||||
.github
|
||||
*.md
|
||||
*.txt
|
||||
*.yml
|
||||
*.yaml
|
||||
|
||||
# Docker folders
|
||||
hooks
|
||||
tools
|
||||
|
||||
# Web vault
|
||||
web-vault
|
112
.env.template
112
.env.template
|
@ -1,14 +1,28 @@
|
|||
## Bitwarden_RS Configuration File
|
||||
## Uncomment any of the following lines to change the defaults
|
||||
##
|
||||
## Be aware that most of these settings will be overridden if they were changed
|
||||
## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json .
|
||||
|
||||
## Main data folder
|
||||
# DATA_FOLDER=data
|
||||
|
||||
## Database URL
|
||||
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
||||
## When using MySQL, this it is the URL to the DB, including username and password:
|
||||
## Format: mysql://[user[:password]@]host/database_name
|
||||
# DATABASE_URL=data/db.sqlite3
|
||||
## When using MySQL, specify an appropriate connection URI.
|
||||
## Details: https://docs.diesel.rs/diesel/mysql/struct.MysqlConnection.html
|
||||
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
||||
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
||||
## or keyword/value connection string.
|
||||
## Details:
|
||||
## - https://docs.diesel.rs/diesel/pg/struct.PgConnection.html
|
||||
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
||||
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
||||
|
||||
## Database max connections
|
||||
## Define the size of the connection pool used for connecting to the database.
|
||||
# DATABASE_MAX_CONNS=10
|
||||
|
||||
## Individual folders, these override %DATA_FOLDER%
|
||||
# RSA_KEY_FILENAME=data/rsa_key
|
||||
|
@ -44,6 +58,10 @@
|
|||
## Enable extended logging, which shows timestamps and targets in the logs
|
||||
# EXTENDED_LOGGING=true
|
||||
|
||||
## Timestamp format used in extended logging.
|
||||
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
|
||||
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
||||
|
||||
## Logging to file
|
||||
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
|
||||
# LOG_FILE=/path/to/log
|
||||
|
@ -56,7 +74,7 @@
|
|||
## Log level
|
||||
## Change the verbosity of the log output
|
||||
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||
## Setting it to "trace" or "debug" would also show logs for mounted
|
||||
## Setting it to "trace" or "debug" would also show logs for mounted
|
||||
## routes and static file, websocket and alive requests
|
||||
# LOG_LEVEL=Info
|
||||
|
||||
|
@ -68,6 +86,10 @@
|
|||
## cause performance degradation or might render the service unable to start.
|
||||
# ENABLE_DB_WAL=true
|
||||
|
||||
## Database connection retries
|
||||
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
||||
# DB_CONNECTION_RETRIES=15
|
||||
|
||||
## Disable icon downloading
|
||||
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
|
||||
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
||||
|
@ -82,10 +104,11 @@
|
|||
## Icon blacklist Regex
|
||||
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||
# ICON_BLACKLIST_REGEX=192\.168\.1\.[0-9].*^
|
||||
## NOTE: Always enclose this regex withing single quotes!
|
||||
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
||||
|
||||
## Any IP which is not defined as a global IP will be blacklisted.
|
||||
## Usefull to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
||||
|
||||
## Disable 2FA remember
|
||||
|
@ -93,6 +116,18 @@
|
|||
## Note that the checkbox would still be present, but ignored.
|
||||
# DISABLE_2FA_REMEMBER=false
|
||||
|
||||
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
||||
# EMAIL_ATTEMPTS_LIMIT=3
|
||||
|
||||
## Token expiration time
|
||||
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
||||
# EMAIL_EXPIRATION_TIME=600
|
||||
|
||||
## Email token size
|
||||
## Number of digits in an email token (min: 6, max: 19).
|
||||
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
||||
# EMAIL_TOKEN_SIZE=6
|
||||
|
||||
## Controls if new users can register
|
||||
# SIGNUPS_ALLOWED=true
|
||||
|
||||
|
@ -114,6 +149,14 @@
|
|||
## even if SIGNUPS_ALLOWED is set to false
|
||||
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
||||
|
||||
## Controls which users can create new orgs.
|
||||
## Blank or 'all' means all users can create orgs (this is the default):
|
||||
# ORG_CREATION_USERS=
|
||||
## 'none' means no users can create orgs:
|
||||
# ORG_CREATION_USERS=none
|
||||
## A comma-separated list means only those users can create orgs:
|
||||
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
||||
|
||||
## Token for the admin interface, preferably use a long random string
|
||||
## One option is to use 'openssl rand -base64 48'
|
||||
## If not set, the admin panel is disabled
|
||||
|
@ -125,6 +168,16 @@
|
|||
|
||||
## Invitations org admins to invite users, even when signups are disabled
|
||||
# INVITATIONS_ALLOWED=true
|
||||
## Name shown in the invitation emails that don't come from a specific organization
|
||||
# INVITATION_ORG_NAME=Bitwarden_RS
|
||||
|
||||
## Per-organization attachment limit (KB)
|
||||
## Limit in kilobytes for an organization attachments, once the limit is exceeded it won't be possible to upload more
|
||||
# ORG_ATTACHMENT_LIMIT=
|
||||
## Per-user attachment limit (KB).
|
||||
## Limit in kilobytes for a users attachments, once the limit is exceeded it won't be possible to upload more
|
||||
# USER_ATTACHMENT_LIMIT=
|
||||
|
||||
|
||||
## Controls the PBBKDF password iterations to apply on the server
|
||||
## The change only applies when the password is changed
|
||||
|
@ -140,6 +193,13 @@
|
|||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||
# DOMAIN=https://bw.domain.tld:8443
|
||||
|
||||
## Allowed iframe ancestors (Know the risks!)
|
||||
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
||||
## Allows other domains to embed the web vault into an iframe, useful for embedding into secure intranets
|
||||
## This adds the configured value to the 'Content-Security-Policy' headers 'frame-ancestors' value.
|
||||
## Multiple values must be separated with a whitespace.
|
||||
# ALLOWED_IFRAME_ANCESTORS=
|
||||
|
||||
## Yubico (Yubikey) Settings
|
||||
## Set your Client ID and Secret Key for Yubikey OTP
|
||||
## You can generate it here: https://upgrade.yubico.com/getapikey/
|
||||
|
@ -162,7 +222,7 @@
|
|||
## Authenticator Settings
|
||||
## Disable authenticator time drifted codes to be valid.
|
||||
## TOTP codes of the previous and next 30 seconds will be invalid
|
||||
##
|
||||
##
|
||||
## According to the RFC6238 (https://tools.ietf.org/html/rfc6238),
|
||||
## we allow by default the TOTP code which was valid one step back and one in the future.
|
||||
## This can however allow attackers to be a bit more lucky with there attempts because there are 3 valid codes.
|
||||
|
@ -183,11 +243,45 @@
|
|||
# SMTP_HOST=smtp.domain.tld
|
||||
# SMTP_FROM=bitwarden-rs@domain.tld
|
||||
# SMTP_FROM_NAME=Bitwarden_RS
|
||||
# SMTP_PORT=587
|
||||
# SMTP_SSL=true
|
||||
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 is outdated and used with Implicit TLS.
|
||||
# SMTP_SSL=true # (Explicit) - This variable by default configures Explicit STARTTLS, it will upgrade an insecure connection to a secure one. Unless SMTP_EXPLICIT_TLS is set to true. Either port 587 or 25 are default.
|
||||
# SMTP_EXPLICIT_TLS=true # (Implicit) - N.B. This variable configures Implicit TLS. It's currently mislabelled (see bug #851) - SMTP_SSL Needs to be set to true for this option to work. Usually port 465 is used here.
|
||||
# SMTP_USERNAME=username
|
||||
# SMTP_PASSWORD=password
|
||||
# SMTP_AUTH_MECHANISM="Plain"
|
||||
# SMTP_TIMEOUT=15
|
||||
|
||||
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
||||
## Possible values: ["Plain", "Login", "Xoauth2"].
|
||||
## Multiple options need to be separated by a comma ','.
|
||||
# SMTP_AUTH_MECHANISM="Plain"
|
||||
|
||||
## Server name sent during the SMTP HELO
|
||||
## By default this value should be is on the machine's hostname,
|
||||
## but might need to be changed in case it trips some anti-spam filters
|
||||
# HELO_NAME=
|
||||
|
||||
## SMTP debugging
|
||||
## When set to true this will output very detailed SMTP messages.
|
||||
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||
# SMTP_DEBUG=false
|
||||
|
||||
## Accept Invalid Hostnames
|
||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
||||
|
||||
## Accept Invalid Certificates
|
||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
||||
# SMTP_ACCEPT_INVALID_CERTS=false
|
||||
|
||||
## Require new device emails. When a user logs in an email is required to be sent.
|
||||
## If sending the email fails the login attempt will fail!!
|
||||
# REQUIRE_DEVICE_EMAIL=false
|
||||
|
||||
## HIBP Api Key
|
||||
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
||||
# HIBP_API_KEY=
|
||||
|
||||
# vim: syntax=ini
|
||||
|
|
1
.github/FUNDING.yml
vendored
1
.github/FUNDING.yml
vendored
|
@ -1 +1,2 @@
|
|||
github: dani-garcia
|
||||
custom: ["https://paypal.me/DaniGG"]
|
||||
|
|
|
@ -1,23 +1,41 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
<!--
|
||||
# ###
|
||||
NOTE: Please update to the latest version of bitwarden_rs before reporting an issue!
|
||||
This saves you and us a lot of time and troubleshooting.
|
||||
See: https://github.com/dani-garcia/bitwarden_rs/issues/1180
|
||||
# ###
|
||||
-->
|
||||
|
||||
|
||||
<!--
|
||||
Please fill out the following template to make solving your problem easier and faster for us.
|
||||
This is only a guideline. If you think that parts are unneccessary for your issue, feel free to remove them.
|
||||
This is only a guideline. If you think that parts are unnecessary for your issue, feel free to remove them.
|
||||
|
||||
Remember to hide/obfuscate personal and confidential information,
|
||||
such as names, global IP/DNS adresses and especially passwords, if neccessary.
|
||||
Remember to hide/obfuscate personal and confidential information,
|
||||
such as names, global IP/DNS addresses and especially passwords, if necessary.
|
||||
-->
|
||||
|
||||
### Subject of the issue
|
||||
<!-- Describe your issue here.-->
|
||||
|
||||
### Your environment
|
||||
<!-- The version number, obtained from the logs or the admin page -->
|
||||
* Bitwarden_rs version:
|
||||
<!-- The version number, obtained from the logs or the admin diagnostics page -->
|
||||
<!-- Remember to check your issue on the latest version first! -->
|
||||
* Bitwarden_rs version:
|
||||
<!-- How the server was installed: Docker image / package / built from source -->
|
||||
* Install method:
|
||||
* Install method:
|
||||
* Clients used: <!-- if applicable -->
|
||||
* Reverse proxy and version: <!-- if applicable -->
|
||||
* Version of mysql/postgresql: <!-- if applicable -->
|
||||
* Other relevant information:
|
||||
* Other relevant information:
|
||||
|
||||
### Steps to reproduce
|
||||
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
|
11
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
11
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: better for forum
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# Please submit all your feature requests to the forum
|
||||
Link: https://bitwardenrs.discourse.group/c/feature-requests
|
11
.github/ISSUE_TEMPLATE/help-with-installation-configuration.md
vendored
Normal file
11
.github/ISSUE_TEMPLATE/help-with-installation-configuration.md
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
name: Help with installation/configuration
|
||||
about: Any questions about the setup of bitwarden_rs
|
||||
title: ''
|
||||
labels: better for forum
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# Please submit all your third party help requests to the forum
|
||||
Link: https://bitwardenrs.discourse.group/c/help
|
11
.github/ISSUE_TEMPLATE/help-with-proxy-database-nas-setup.md
vendored
Normal file
11
.github/ISSUE_TEMPLATE/help-with-proxy-database-nas-setup.md
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
name: Help with proxy/database/NAS setup
|
||||
about: Any questions about third party software
|
||||
title: ''
|
||||
labels: better for forum
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# Please submit all your third party help requests to the forum
|
||||
Link: https://bitwardenrs.discourse.group/c/third-party-help
|
125
.github/workflows/build.yml
vendored
Normal file
125
.github/workflows/build.yml
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
# Ignore when there are only changes done too one of these paths
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- "azure-pipelines.yml"
|
||||
- "docker/**"
|
||||
- "hooks/**"
|
||||
- "tools/**"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
channel:
|
||||
- nightly
|
||||
# - stable
|
||||
target-triple:
|
||||
- x86_64-unknown-linux-gnu
|
||||
# - x86_64-unknown-linux-musl
|
||||
include:
|
||||
- target-triple: x86_64-unknown-linux-gnu
|
||||
host-triple: x86_64-unknown-linux-gnu
|
||||
features: "sqlite,mysql,postgresql"
|
||||
channel: nightly
|
||||
os: ubuntu-18.04
|
||||
ext:
|
||||
# - target-triple: x86_64-unknown-linux-gnu
|
||||
# host-triple: x86_64-unknown-linux-gnu
|
||||
# features: "sqlite,mysql,postgresql"
|
||||
# channel: stable
|
||||
# os: ubuntu-18.04
|
||||
# ext:
|
||||
# - target-triple: x86_64-unknown-linux-musl
|
||||
# host-triple: x86_64-unknown-linux-gnu
|
||||
# features: "sqlite,postgresql"
|
||||
# channel: nightly
|
||||
# os: ubuntu-18.04
|
||||
# ext:
|
||||
# - target-triple: x86_64-unknown-linux-musl
|
||||
# host-triple: x86_64-unknown-linux-gnu
|
||||
# features: "sqlite,postgresql"
|
||||
# channel: stable
|
||||
# os: ubuntu-18.04
|
||||
# ext:
|
||||
|
||||
name: Building ${{ matrix.channel }}-${{ matrix.target-triple }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
# Install musl-tools when needed
|
||||
- name: Install musl tools
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends musl-dev musl-tools cmake
|
||||
if: matrix.target-triple == 'x86_64-unknown-linux-musl'
|
||||
# End Install musl-tools when needed
|
||||
|
||||
|
||||
# Install dependencies
|
||||
- name: Install dependencies Ubuntu
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
|
||||
if: startsWith( matrix.os, 'ubuntu' )
|
||||
# End Install dependencies
|
||||
|
||||
|
||||
# Enable Rust Caching
|
||||
- uses: Swatinem/rust-cache@v1
|
||||
# End Enable Rust Caching
|
||||
|
||||
|
||||
# Uses the rust-toolchain file to determine version
|
||||
- name: 'Install ${{ matrix.channel }}-${{ matrix.host-triple }} for target: ${{ matrix.target-triple }}'
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
target: ${{ matrix.target-triple }}
|
||||
# End Uses the rust-toolchain file to determine version
|
||||
|
||||
|
||||
# Run cargo tests (In release mode to speed up cargo build afterwards)
|
||||
- name: '`cargo test --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||
# End Run cargo tests
|
||||
|
||||
|
||||
# Build the binary
|
||||
- name: '`cargo build --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||
# End Build the binary
|
||||
|
||||
|
||||
# Upload artifact to Github Actions
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: bitwarden_rs-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||
path: target/${{ matrix.target-triple }}/release/bitwarden_rs${{ matrix.ext }}
|
||||
# End Upload artifact to Github Actions
|
||||
|
||||
|
||||
## This is not used at the moment
|
||||
## We could start using this when we can build static binaries
|
||||
# Upload to github actions release
|
||||
# - name: Release
|
||||
# uses: Shopify/upload-to-release@1
|
||||
# if: startsWith(github.ref, 'refs/tags/')
|
||||
# with:
|
||||
# name: bitwarden_rs-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||
# path: target/${{ matrix.target-triple }}/release/bitwarden_rs${{ matrix.ext }}
|
||||
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# End Upload to github actions release
|
34
.github/workflows/hadolint.yml
vendored
Normal file
34
.github/workflows/hadolint.yml
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
name: Hadolint
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
# Ignore when there are only changes done too one of these paths
|
||||
paths:
|
||||
- "docker/**"
|
||||
|
||||
jobs:
|
||||
hadolint:
|
||||
name: Validate Dockerfile syntax
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
# Download hadolint
|
||||
- name: Download hadolint
|
||||
shell: bash
|
||||
run: |
|
||||
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
|
||||
sudo chmod +x /usr/local/bin/hadolint
|
||||
env:
|
||||
HADOLINT_VERSION: 1.19.0
|
||||
# End Download hadolint
|
||||
|
||||
# Test Dockerfiles
|
||||
- name: Run hadolint
|
||||
shell: bash
|
||||
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored | xargs hadolint
|
||||
# End Test Dockerfiles
|
70
.github/workflows/rust-win.yml.disabled
vendored
70
.github/workflows/rust-win.yml.disabled
vendored
|
@ -1,70 +0,0 @@
|
|||
name: build-windows
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: windows-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
db-backend: [sqlite, mysql, postgresql]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
- name: Cache choco cache
|
||||
uses: actions/cache@v1.0.3
|
||||
with:
|
||||
path: ~\AppData\Local\Temp\chocolatey
|
||||
key: ${{ runner.os }}-choco-cache
|
||||
|
||||
- name: Install dependencies
|
||||
run: choco install openssl sqlite postgresql12 mysql
|
||||
|
||||
- name: Cache cargo registry
|
||||
uses: actions/cache@v1.0.3
|
||||
with:
|
||||
path: ~/.cargo/registry
|
||||
key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
|
||||
- name: Cache cargo index
|
||||
uses: actions/cache@v1.0.3
|
||||
with:
|
||||
path: ~/.cargo/git
|
||||
key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
|
||||
- name: Cache cargo build
|
||||
uses: actions/cache@v1.0.3
|
||||
with:
|
||||
path: target
|
||||
key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Install latest nightly
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
profile: minimal
|
||||
target: x86_64-pc-windows-msvc
|
||||
|
||||
- name: Build
|
||||
run: cargo.exe build --verbose --features ${{ matrix.db-backend }} --release --target x86_64-pc-windows-msvc
|
||||
env:
|
||||
OPENSSL_DIR: C:\Program Files\OpenSSL-Win64\
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test --features ${{ matrix.db-backend }}
|
||||
|
||||
- name: Upload windows artifact
|
||||
uses: actions/upload-artifact@v1.0.0
|
||||
with:
|
||||
name: x86_64-pc-windows-msvc-${{ matrix.db-backend }}-bitwarden_rs
|
||||
path: target/release/bitwarden_rs.exe
|
||||
|
||||
- name: Release
|
||||
uses: Shopify/upload-to-release@1.0.0
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
name: x86_64-pc-windows-msvc-${{ matrix.db-backend }}-bitwarden_rs
|
||||
path: target/release/bitwarden_rs.exe
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
149
.github/workflows/workspace.yml
vendored
149
.github/workflows/workspace.yml
vendored
|
@ -1,149 +0,0 @@
|
|||
name: Workflow
|
||||
|
||||
on:
|
||||
push:
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
db-backend: [sqlite, mysql, postgresql]
|
||||
target:
|
||||
- x86_64-unknown-linux-gnu
|
||||
# - x86_64-unknown-linux-musl
|
||||
- x86_64-apple-darwin
|
||||
# - x86_64-pc-windows-msvc
|
||||
include:
|
||||
- target: x86_64-unknown-linux-gnu
|
||||
os: ubuntu-latest
|
||||
ext:
|
||||
# - target: x86_64-unknown-linux-musl
|
||||
# os: ubuntu-latest
|
||||
# ext:
|
||||
- target: x86_64-apple-darwin
|
||||
os: macOS-latest
|
||||
ext:
|
||||
# - target: x86_64-pc-windows-msvc
|
||||
# os: windows-latest
|
||||
# ext: .exe
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
# - name: Cache choco cache
|
||||
# uses: actions/cache@v1.0.3
|
||||
# if: matrix.os == 'windows-latest'
|
||||
# with:
|
||||
# path: ~\AppData\Local\Temp\chocolatey
|
||||
# key: ${{ runner.os }}-choco-cache-${{ matrix.db-backend }}
|
||||
|
||||
- name: Cache vcpkg installed
|
||||
uses: actions/cache@v1.0.3
|
||||
if: matrix.os == 'windows-latest'
|
||||
with:
|
||||
path: $VCPKG_ROOT/installed
|
||||
key: ${{ runner.os }}-vcpkg-cache-${{ matrix.db-backend }}
|
||||
env:
|
||||
VCPKG_ROOT: 'C:\vcpkg'
|
||||
|
||||
- name: Cache vcpkg downloads
|
||||
uses: actions/cache@v1.0.3
|
||||
if: matrix.os == 'windows-latest'
|
||||
with:
|
||||
path: $VCPKG_ROOT/downloads
|
||||
key: ${{ runner.os }}-vcpkg-cache-${{ matrix.db-backend }}
|
||||
env:
|
||||
VCPKG_ROOT: 'C:\vcpkg'
|
||||
|
||||
# - name: Cache homebrew
|
||||
# uses: actions/cache@v1.0.3
|
||||
# if: matrix.os == 'macOS-latest'
|
||||
# with:
|
||||
# path: ~/Library/Caches/Homebrew
|
||||
# key: ${{ runner.os }}-brew-cache
|
||||
|
||||
# - name: Cache apt
|
||||
# uses: actions/cache@v1.0.3
|
||||
# if: matrix.os == 'ubuntu-latest'
|
||||
# with:
|
||||
# path: /var/cache/apt/archives
|
||||
# key: ${{ runner.os }}-apt-cache
|
||||
|
||||
# Install dependencies
|
||||
- name: Install dependencies macOS
|
||||
run: brew update; brew install openssl sqlite libpq mysql
|
||||
if: matrix.os == 'macOS-latest'
|
||||
|
||||
- name: Install dependencies Ubuntu
|
||||
run: sudo apt-get update && sudo apt-get install --no-install-recommends openssl sqlite libpq-dev libmysql++-dev
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install dependencies Windows
|
||||
run: vcpkg integrate install; vcpkg install sqlite3:x64-windows openssl:x64-windows libpq:x64-windows libmysql:x64-windows
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
VCPKG_ROOT: 'C:\vcpkg'
|
||||
# End Install dependencies
|
||||
|
||||
# Install rust nightly toolchain
|
||||
- name: Cache cargo registry
|
||||
uses: actions/cache@v1.0.3
|
||||
with:
|
||||
path: ~/.cargo/registry
|
||||
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
|
||||
- name: Cache cargo index
|
||||
uses: actions/cache@v1.0.3
|
||||
with:
|
||||
path: ~/.cargo/git
|
||||
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
|
||||
- name: Cache cargo build
|
||||
uses: actions/cache@v1.0.3
|
||||
with:
|
||||
path: target
|
||||
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Install latest nightly
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
profile: minimal
|
||||
target: ${{ matrix.target }}
|
||||
|
||||
# Build
|
||||
- name: Build Win
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: cargo.exe build --features ${{ matrix.db-backend }} --release --target ${{ matrix.target }}
|
||||
env:
|
||||
RUSTFLAGS: -Ctarget-feature=+crt-static
|
||||
VCPKG_ROOT: 'C:\vcpkg'
|
||||
|
||||
- name: Build macOS / Ubuntu
|
||||
if: matrix.os == 'macOS-latest' || matrix.os == 'ubuntu-latest'
|
||||
run: cargo build --verbose --features ${{ matrix.db-backend }} --release --target ${{ matrix.target }}
|
||||
|
||||
# Test
|
||||
- name: Run tests
|
||||
run: cargo test --features ${{ matrix.db-backend }}
|
||||
|
||||
# Upload & Release
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v1.0.0
|
||||
with:
|
||||
name: bitwarden_rs-${{ matrix.db-backend }}-${{ matrix.target }}${{ matrix.ext }}
|
||||
path: target/${{ matrix.target }}/release/bitwarden_rs${{ matrix.ext }}
|
||||
|
||||
- name: Release
|
||||
uses: Shopify/upload-to-release@1.0.0
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
name: bitwarden_rs-${{ matrix.db-backend }}-${{ matrix.target }}${{ matrix.ext }}
|
||||
path: target/${{ matrix.target }}/release/bitwarden_rs${{ matrix.ext }}
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
21
.travis.yml
21
.travis.yml
|
@ -1,21 +0,0 @@
|
|||
dist: xenial
|
||||
|
||||
env:
|
||||
global:
|
||||
- HADOLINT_VERSION=1.17.1
|
||||
|
||||
language: rust
|
||||
rust: nightly
|
||||
cache: cargo
|
||||
|
||||
before_install:
|
||||
- sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint
|
||||
- sudo chmod +rx /usr/local/bin/hadolint
|
||||
- rustup set profile minimal
|
||||
|
||||
# Nothing to install
|
||||
install: true
|
||||
script:
|
||||
- git ls-files --exclude='Dockerfile*' --ignored | xargs --max-lines=1 hadolint
|
||||
- cargo test --features "sqlite"
|
||||
- cargo test --features "mysql"
|
3396
Cargo.lock
generated
3396
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
99
Cargo.toml
99
Cargo.toml
|
@ -14,8 +14,14 @@ build = "build.rs"
|
|||
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
|
||||
enable_syslog = []
|
||||
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
||||
postgresql = ["diesel/postgres", "diesel_migrations/postgres", "openssl"]
|
||||
postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
|
||||
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
||||
# Enable to use a vendored and statically linked openssl
|
||||
vendored_openssl = ["openssl/vendored"]
|
||||
|
||||
# Enable unstable features, requires nightly
|
||||
# Currently only used to enable rusts official ip support
|
||||
unstable = []
|
||||
|
||||
[target."cfg(not(windows))".dependencies]
|
||||
syslog = "4.0.1"
|
||||
|
@ -26,101 +32,106 @@ rocket = { version = "0.5.0-dev", features = ["tls"], default-features = false }
|
|||
rocket_contrib = "0.5.0-dev"
|
||||
|
||||
# HTTP client
|
||||
reqwest = "0.9.24"
|
||||
reqwest = { version = "0.10.10", features = ["blocking", "json"] }
|
||||
|
||||
# multipart/form-data support
|
||||
multipart = { version = "0.16.1", features = ["server"], default-features = false }
|
||||
multipart = { version = "0.17.0", features = ["server"], default-features = false }
|
||||
|
||||
# WebSockets library
|
||||
ws = "0.9.1"
|
||||
ws = { version = "0.10.0", package = "parity-ws" }
|
||||
|
||||
# MessagePack library
|
||||
rmpv = "0.4.3"
|
||||
rmpv = "0.4.6"
|
||||
|
||||
# Concurrent hashmap implementation
|
||||
chashmap = "2.2.2"
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.104"
|
||||
serde_json = "1.0.44"
|
||||
serde = "1.0.118"
|
||||
serde_derive = "1.0.118"
|
||||
serde_json = "1.0.60"
|
||||
|
||||
# Logging
|
||||
log = "0.4.8"
|
||||
fern = { version = "0.5.9", features = ["syslog-4"] }
|
||||
log = "0.4.11"
|
||||
fern = { version = "0.6.0", features = ["syslog-4"] }
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "1.4.3", features = [ "chrono", "r2d2"] }
|
||||
diesel = { version = "1.4.5", features = [ "chrono", "r2d2"] }
|
||||
diesel_migrations = "1.4.0"
|
||||
|
||||
# Bundled SQLite
|
||||
libsqlite3-sys = { version = "0.16.0", features = ["bundled"], optional = true }
|
||||
# Bundled SQLite
|
||||
libsqlite3-sys = { version = "0.18.0", features = ["bundled"], optional = true }
|
||||
|
||||
# Crypto library
|
||||
ring = "0.14.6"
|
||||
# Crypto-related libraries
|
||||
rand = "0.7.3"
|
||||
ring = "0.16.19"
|
||||
|
||||
# UUID generation
|
||||
uuid = { version = "0.8.1", features = ["v4"] }
|
||||
|
||||
# Date and time library for Rust
|
||||
chrono = "0.4.10"
|
||||
# Date and time libraries
|
||||
chrono = "0.4.19"
|
||||
chrono-tz = "0.5.3"
|
||||
time = "0.2.23"
|
||||
|
||||
# TOTP library
|
||||
oath = "0.10.2"
|
||||
|
||||
# Data encoding library
|
||||
data-encoding = "2.1.2"
|
||||
data-encoding = "2.3.1"
|
||||
|
||||
# JWT library
|
||||
jsonwebtoken = "6.0.1"
|
||||
jsonwebtoken = "7.2.0"
|
||||
|
||||
# U2F library
|
||||
u2f = "0.1.6"
|
||||
u2f = "0.2.0"
|
||||
|
||||
# Yubico Library
|
||||
yubico = { version = "0.7.1", features = ["online-tokio"], default-features = false }
|
||||
yubico = { version = "0.9.1", features = ["online-tokio"], default-features = false }
|
||||
|
||||
# A `dotenv` implementation for Rust
|
||||
dotenv = { version = "0.15.0", default-features = false }
|
||||
|
||||
# Lazy static macro
|
||||
lazy_static = "1.4.0"
|
||||
|
||||
# More derives
|
||||
derive_more = "0.99.2"
|
||||
# Lazy initialization
|
||||
once_cell = "1.5.2"
|
||||
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.10"
|
||||
num-derive = "0.3.0"
|
||||
num-traits = "0.2.14"
|
||||
num-derive = "0.3.3"
|
||||
|
||||
# Email libraries
|
||||
lettre = "0.9.2"
|
||||
lettre_email = "0.9.2"
|
||||
native-tls = "0.2.3"
|
||||
quoted_printable = "0.4.1"
|
||||
lettre = { version = "0.10.0-alpha.4", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
|
||||
newline-converter = "0.1.0"
|
||||
|
||||
# Template library
|
||||
handlebars = "=2.0.2"
|
||||
handlebars = { version = "3.5.1", features = ["dir_source"] }
|
||||
|
||||
# For favicon extraction from main website
|
||||
soup = "0.4.1"
|
||||
regex = "1.3.1"
|
||||
soup = "0.5.0"
|
||||
regex = "1.4.2"
|
||||
data-url = "0.1.0"
|
||||
|
||||
# Required for SSL support for PostgreSQL
|
||||
openssl = { version = "0.10.26", optional = true }
|
||||
# Used by U2F, JWT and Postgres
|
||||
openssl = "0.10.31"
|
||||
|
||||
# URL encoding library
|
||||
percent-encoding = "2.1.0"
|
||||
# Punycode conversion
|
||||
idna = "0.2.0"
|
||||
|
||||
# CLI argument parsing
|
||||
structopt = "0.3.21"
|
||||
|
||||
# Logging panics to logfile instead stderr only
|
||||
backtrace = "0.3.55"
|
||||
|
||||
# Macro ident concatenation
|
||||
paste = "1.0.4"
|
||||
|
||||
[patch.crates-io]
|
||||
# Use newest ring
|
||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'b95b6765e1cc8be7c1e7eaef8a9d9ad940b0ac13' }
|
||||
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'b95b6765e1cc8be7c1e7eaef8a9d9ad940b0ac13' }
|
||||
|
||||
# Use git version for timeout fix #706
|
||||
lettre = { git = 'https://github.com/lettre/lettre', rev = '24d694db3be017d82b1cdc8bf9da601420b31bb0' }
|
||||
lettre_email = { git = 'https://github.com/lettre/lettre', rev = '24d694db3be017d82b1cdc8bf9da601420b31bb0' }
|
||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||
|
||||
# For favicon extraction from main website
|
||||
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '7f1bd6ce1c2fde599a757302a843a60e714c5f72' }
|
||||
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '540ede02d0771824c0c80ff9f57fe8eff38b1291' }
|
||||
|
|
|
@ -1 +1 @@
|
|||
docker/amd64/sqlite/Dockerfile
|
||||
docker/amd64/Dockerfile
|
11
README.md
11
README.md
|
@ -13,7 +13,7 @@ Image is based on [Rust implementation of Bitwarden API](https://github.com/dani
|
|||
|
||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
||||
|
||||
#### ⚠️**IMPORTANT**⚠️: When using this server, please report any Bitwarden related bug-reports or suggestions [here](https://github.com/dani-garcia/bitwarden_rs/issues/new), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels.
|
||||
#### ⚠️**IMPORTANT**⚠️: When using this server, please report any bugs or suggestions to us directly (look at the bottom of this page for ways to get in touch), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels.
|
||||
|
||||
---
|
||||
|
||||
|
@ -21,14 +21,13 @@ Image is based on [Rust implementation of Bitwarden API](https://github.com/dani
|
|||
|
||||
Basically full implementation of Bitwarden API is provided including:
|
||||
|
||||
* Basic single user functionality
|
||||
* Organizations support
|
||||
* Attachments
|
||||
* Vault API support
|
||||
* Serving the static files for Vault interface
|
||||
* Website icons API
|
||||
* Authenticator and U2F support
|
||||
* YubiKey OTP
|
||||
* YubiKey and Duo support
|
||||
|
||||
## Installation
|
||||
Pull the docker image and mount a volume from the host for persistent storage:
|
||||
|
@ -49,12 +48,14 @@ If you have an available domain name, you can get HTTPS certificates with [Let's
|
|||
See the [bitwarden_rs wiki](https://github.com/dani-garcia/bitwarden_rs/wiki) for more information on how to configure and run the bitwarden_rs server.
|
||||
|
||||
## Get in touch
|
||||
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please [use the forum](https://bitwardenrs.discourse.group/).
|
||||
|
||||
To ask a question, [raising an issue](https://github.com/dani-garcia/bitwarden_rs/issues/new) is fine. Please also report any bugs spotted here.
|
||||
If you spot any bugs or crashes with bitwarden_rs itself, please [create an issue](https://github.com/dani-garcia/bitwarden_rs/issues/). Make sure there aren't any similar issues open, though!
|
||||
|
||||
If you prefer to chat, we're usually hanging around at [#bitwarden_rs:matrix.org](https://matrix.to/#/#bitwarden_rs:matrix.org) room on Matrix. Feel free to join us!
|
||||
|
||||
### Sponsors
|
||||
Thanks for your contribution to the project!
|
||||
|
||||
- [@Skaronator](https://github.com/Skaronator)
|
||||
- [@ChonoN](https://github.com/ChonoN)
|
||||
- [@themightychris](https://github.com/themightychris)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
pool:
|
||||
vmImage: 'Ubuntu-16.04'
|
||||
vmImage: 'Ubuntu-18.04'
|
||||
|
||||
steps:
|
||||
- script: |
|
||||
|
@ -10,16 +10,13 @@ steps:
|
|||
|
||||
- script: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libmysql++-dev
|
||||
displayName: Install libmysql
|
||||
sudo apt-get install -y --no-install-recommends build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
|
||||
displayName: 'Install build libraries.'
|
||||
|
||||
- script: |
|
||||
rustc -Vv
|
||||
cargo -V
|
||||
displayName: Query rust and cargo versions
|
||||
|
||||
- script : cargo test --features "sqlite"
|
||||
displayName: 'Test project with sqlite backend'
|
||||
|
||||
- script : cargo test --features "mysql"
|
||||
displayName: 'Test project with mysql backend'
|
||||
- script : cargo test --features "sqlite,mysql,postgresql"
|
||||
displayName: 'Test project with sqlite, mysql and postgresql backends'
|
||||
|
|
31
build.rs
31
build.rs
|
@ -1,17 +1,24 @@
|
|||
use std::process::Command;
|
||||
use std::env;
|
||||
|
||||
fn main() {
|
||||
#[cfg(all(feature = "sqlite", feature = "mysql"))]
|
||||
compile_error!("Can't enable both sqlite and mysql at the same time");
|
||||
#[cfg(all(feature = "sqlite", feature = "postgresql"))]
|
||||
compile_error!("Can't enable both sqlite and postgresql at the same time");
|
||||
#[cfg(all(feature = "mysql", feature = "postgresql"))]
|
||||
compile_error!("Can't enable both mysql and postgresql at the same time");
|
||||
fn main() {
|
||||
// This allow using #[cfg(sqlite)] instead of #[cfg(feature = "sqlite")], which helps when trying to add them through macros
|
||||
#[cfg(feature = "sqlite")]
|
||||
println!("cargo:rustc-cfg=sqlite");
|
||||
#[cfg(feature = "mysql")]
|
||||
println!("cargo:rustc-cfg=mysql");
|
||||
#[cfg(feature = "postgresql")]
|
||||
println!("cargo:rustc-cfg=postgresql");
|
||||
|
||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
|
||||
|
||||
read_git_info().ok();
|
||||
|
||||
if let Ok(version) = env::var("BWRS_VERSION") {
|
||||
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||
} else {
|
||||
read_git_info().ok();
|
||||
}
|
||||
}
|
||||
|
||||
fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
||||
|
@ -54,14 +61,16 @@ fn read_git_info() -> Result<(), std::io::Error> {
|
|||
} else {
|
||||
format!("{}-{}", last_tag, rev_short)
|
||||
};
|
||||
println!("cargo:rustc-env=GIT_VERSION={}", version);
|
||||
|
||||
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||
|
||||
// To access these values, use:
|
||||
// env!("GIT_EXACT_TAG")
|
||||
// env!("GIT_LAST_TAG")
|
||||
// env!("GIT_BRANCH")
|
||||
// env!("GIT_REV")
|
||||
// env!("GIT_VERSION")
|
||||
// env!("BWRS_VERSION")
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
263
docker/Dockerfile.j2
Normal file
263
docker/Dockerfile.j2
Normal file
|
@ -0,0 +1,263 @@
|
|||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
|
||||
{% set build_stage_base_image = "rust:1.48" %}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "amd64" in target_file %}
|
||||
{% set build_stage_base_image = "clux/muslrust:nightly-2020-11-22" %}
|
||||
{% set runtime_stage_base_image = "alpine:3.12" %}
|
||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
||||
{% elif "arm32v7" in target_file %}
|
||||
{% set build_stage_base_image = "messense/rust-musl-cross:armv7-musleabihf" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.12" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
||||
{% endif %}
|
||||
{% elif "amd64" in target_file %}
|
||||
{% set runtime_stage_base_image = "debian:buster-slim" %}
|
||||
{% elif "arm64v8" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %}
|
||||
{% set package_arch_name = "arm64" %}
|
||||
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
||||
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
||||
{% elif "arm32v6" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %}
|
||||
{% set package_arch_name = "armel" %}
|
||||
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
||||
{% elif "arm32v7" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %}
|
||||
{% set package_arch_name = "armhf" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
||||
{% endif %}
|
||||
{% if package_arch_name is defined %}
|
||||
{% set package_arch_prefix = ":" + package_arch_name %}
|
||||
{% else %}
|
||||
{% set package_arch_prefix = "" %}
|
||||
{% endif %}
|
||||
{% if package_arch_target is defined %}
|
||||
{% set package_arch_target_param = " --target=" + package_arch_target %}
|
||||
{% else %}
|
||||
{% set package_arch_target_param = "" %}
|
||||
{% endif %}
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
{% set vault_image_hash = "sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0" %}
|
||||
{% raw %}
|
||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.17.1
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.17.1
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0
|
||||
{% endraw %}
|
||||
FROM bitwardenrs/web-vault@{{ vault_image_hash }} as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM {{ build_stage_base_image }} as build
|
||||
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "amd64" in target_file %}
|
||||
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||
ARG DB=sqlite,postgresql
|
||||
{% set features = "sqlite,postgresql" %}
|
||||
{% else %}
|
||||
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||
ARG DB=sqlite
|
||||
{% set features = "sqlite" %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
{% set features = "sqlite,mysql,postgresql" %}
|
||||
{% endif %}
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
{% if "alpine" in target_file %}
|
||||
ENV USER "root"
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
{% elif "arm" in target_file %}
|
||||
# Install required build libs for {{ package_arch_name }} architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture {{ package_arch_name }} \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev{{ package_arch_prefix }} \
|
||||
libc6-dev{{ package_arch_prefix }} \
|
||||
libpq5{{ package_arch_prefix }} \
|
||||
libpq-dev \
|
||||
libmariadb-dev{{ package_arch_prefix }} \
|
||||
libmariadb-dev-compat{{ package_arch_prefix }}
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-{{ package_cross_compiler }} \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.{{ package_arch_target }}]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> ~/.cargo/config \
|
||||
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
{% endif -%}
|
||||
|
||||
{% if "amd64" in target_file and "alpine" not in target_file %}
|
||||
# Install DB packages
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev{{ package_arch_prefix }} \
|
||||
libpq-dev{{ package_arch_prefix }} \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
{% endif %}
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
{% if "alpine" not in target_file %}
|
||||
{% if "arm" in target_file %}
|
||||
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the {{ package_arch_prefix }} version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||
apt-get download libmariadb-dev-compat:amd64 && \
|
||||
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||
rm -rvf ./libmariadb-dev-compat*.deb
|
||||
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5{{ package_arch_prefix }} package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
RUN ln -sfnr /usr/lib/{{ package_cross_compiler }}/libpq.so.5 /usr/lib/{{ package_cross_compiler }}/libpq.so
|
||||
|
||||
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
||||
{% endif -%}
|
||||
{% endif %}
|
||||
{% if package_arch_target is defined %}
|
||||
RUN rustup target add {{ package_arch_target }}
|
||||
{% endif %}
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "arm32v7" in target_file %}
|
||||
RUN musl-strip target/{{ package_arch_target }}/release/bitwarden_rs
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM {{ runtime_stage_base_image }}
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
{% if "alpine" in runtime_stage_base_image %}
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
{% endif %}
|
||||
|
||||
{% if "amd64" not in target_file %}
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
{% endif %}
|
||||
# Install needed libraries
|
||||
{% if "alpine" in runtime_stage_base_image %}
|
||||
RUN apk add --no-cache \
|
||||
openssl \
|
||||
curl \
|
||||
{% if "sqlite" in features %}
|
||||
sqlite \
|
||||
{% endif %}
|
||||
{% if "mysql" in features %}
|
||||
mariadb-connector-c \
|
||||
{% endif %}
|
||||
{% if "postgresql" in features %}
|
||||
postgresql-libs \
|
||||
{% endif %}
|
||||
ca-certificates
|
||||
{% else %}
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
sqlite3 \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
{% endif %}
|
||||
{% if "alpine" in target_file and "arm32v7" in target_file %}
|
||||
RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community catatonit
|
||||
{% endif %}
|
||||
|
||||
RUN mkdir /data
|
||||
{% if "amd64" not in target_file %}
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
{% endif %}
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
{% if package_arch_target is defined %}
|
||||
COPY --from=build /app/target/{{ package_arch_target }}/release/bitwarden_rs .
|
||||
{% else %}
|
||||
COPY --from=build /app/target/release/bitwarden_rs .
|
||||
{% endif %}
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
{% if "alpine" in target_file and "arm32v7" in target_file %}
|
||||
CMD ["catatonit", "/start.sh"]
|
||||
{% else %}
|
||||
CMD ["/start.sh"]
|
||||
{% endif %}
|
9
docker/Makefile
Normal file
9
docker/Makefile
Normal file
|
@ -0,0 +1,9 @@
|
|||
OBJECTS := $(shell find -mindepth 2 -name 'Dockerfile*')
|
||||
|
||||
all: $(OBJECTS)
|
||||
|
||||
%/Dockerfile: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
||||
|
||||
%/Dockerfile.alpine: Dockerfile.j2 render_template
|
||||
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
|
3
docker/README.md
Normal file
3
docker/README.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
The arch-specific directory names follow the arch identifiers used by the Docker official images:
|
||||
|
||||
https://github.com/docker-library/official-images/blob/master/README.md#architectures-other-than-amd64
|
|
@ -1,110 +0,0 @@
|
|||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.11 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.12.0b"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
|
||||
# set mysql backend
|
||||
ARG DB=mysql
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-aarch64-linux-gnu \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Prepare openssl arm64 libs
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
libc6-dev:arm64 \
|
||||
libmariadb-dev:arm64
|
||||
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Build
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-debian:buster
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadbclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
|
@ -1,109 +0,0 @@
|
|||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.11 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.12.0b"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward comaptibility
|
||||
ARG DB=sqlite
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-aarch64-linux-gnu \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Prepare openssl arm64 libs
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
libc6-dev:arm64
|
||||
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Build
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-debian:buster
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
sqlite3 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
|
@ -1,50 +1,43 @@
|
|||
# Using multistage build:
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.11 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.12.0b"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.17.1
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.17.1
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0
|
||||
FROM bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
FROM rust:1.48 as build
|
||||
|
||||
# set mysql backend
|
||||
ARG DB=postgresql
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Using bundled SQLite, no need to install it
|
||||
# RUN apt-get update && apt-get install -y\
|
||||
# --no-install-recommends \
|
||||
# sqlite3\
|
||||
# && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install MySQL package
|
||||
# Install DB packages
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev \
|
||||
libpq-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin app
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
|
@ -52,6 +45,7 @@ COPY ./Cargo.* ./
|
|||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
|
@ -85,6 +79,7 @@ RUN apt-get update && apt-get install -y \
|
|||
ca-certificates \
|
||||
curl \
|
||||
sqlite3 \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
@ -97,12 +92,14 @@ EXPOSE 3012
|
|||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build app/target/release/bitwarden_rs .
|
||||
COPY --from=build /app/target/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
||||
CMD ["/start.sh"]
|
||||
|
100
docker/amd64/Dockerfile.alpine
Normal file
100
docker/amd64/Dockerfile.alpine
Normal file
|
@ -0,0 +1,100 @@
|
|||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.17.1
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.17.1
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0
|
||||
FROM bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM clux/muslrust:nightly-2020-11-22 as build
|
||||
|
||||
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||
ARG DB=sqlite,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
ENV USER "root"
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.12
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
# Install needed libraries
|
||||
RUN apk add --no-cache \
|
||||
openssl \
|
||||
curl \
|
||||
sqlite \
|
||||
postgresql-libs \
|
||||
ca-certificates
|
||||
|
||||
RUN mkdir /data
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/start.sh"]
|
||||
|
|
@ -1,101 +0,0 @@
|
|||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.11 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.12.0b"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
|
||||
# set mysql backend
|
||||
ARG DB=mysql
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Install MySQL package
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmariadb-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:buster-slim
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadbclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build app/target/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
|
@ -1,89 +0,0 @@
|
|||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.11 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.12.0b"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# Musl build image for statically compiled binary
|
||||
FROM clux/muslrust:nightly-2019-12-19 as build
|
||||
|
||||
# set mysql backend
|
||||
ARG DB=mysql
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
ENV USER "root"
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libmysqlclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
RUN rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Build
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.11
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
# Install needed libraries
|
||||
RUN apk add --no-cache \
|
||||
openssl \
|
||||
mariadb-connector-c \
|
||||
curl \
|
||||
ca-certificates
|
||||
|
||||
RUN mkdir /data
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
|
@ -1,90 +0,0 @@
|
|||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.11 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.12.0b"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# Musl build image for statically compiled binary
|
||||
FROM clux/muslrust:nightly-2019-12-19 as build
|
||||
|
||||
# set postgresql backend
|
||||
ARG DB=postgresql
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
ENV USER "root"
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libpq-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
RUN rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Build
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.11
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
# Install needed libraries
|
||||
RUN apk add --no-cache \
|
||||
openssl \
|
||||
postgresql-libs \
|
||||
curl \
|
||||
sqlite \
|
||||
ca-certificates
|
||||
|
||||
RUN mkdir /data
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
|
@ -1,95 +0,0 @@
|
|||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.11 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.12.0b"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward comaptibility
|
||||
ARG DB=sqlite
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:buster-slim
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
sqlite3 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build app/target/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
|
@ -1,84 +0,0 @@
|
|||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.11 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.12.0b"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# Musl build image for statically compiled binary
|
||||
FROM clux/muslrust:nightly-2019-12-19 as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward comaptibility
|
||||
ARG DB=sqlite
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
ENV USER "root"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
RUN rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Build
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.11
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
# Install needed libraries
|
||||
RUN apk add --no-cache \
|
||||
openssl \
|
||||
curl \
|
||||
sqlite \
|
||||
ca-certificates
|
||||
|
||||
RUN mkdir /data
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
||||
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
151
docker/arm32v6/Dockerfile
Normal file
151
docker/arm32v6/Dockerfile
Normal file
|
@ -0,0 +1,151 @@
|
|||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.17.1
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.17.1
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0
|
||||
FROM bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.48 as build
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Install required build libs for armel architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
libc6-dev:armel \
|
||||
libpq5:armel \
|
||||
libpq-dev \
|
||||
libmariadb-dev:armel \
|
||||
libmariadb-dev-compat:armel
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabi \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armel version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||
apt-get download libmariadb-dev-compat:amd64 && \
|
||||
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||
rm -rvf ./libmariadb-dev-compat*.deb
|
||||
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:armel package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
RUN ln -sfnr /usr/lib/arm-linux-gnueabi/libpq.so.5 /usr/lib/arm-linux-gnueabi/libpq.so
|
||||
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
RUN rustup target add arm-unknown-linux-gnueabi
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-debian:buster
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
sqlite3 \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/start.sh"]
|
||||
|
151
docker/arm32v7/Dockerfile
Normal file
151
docker/arm32v7/Dockerfile
Normal file
|
@ -0,0 +1,151 @@
|
|||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.17.1
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.17.1
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0
|
||||
FROM bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.48 as build
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Install required build libs for armhf architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
libc6-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libpq-dev \
|
||||
libmariadb-dev:armhf \
|
||||
libmariadb-dev-compat:armhf
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armhf version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||
apt-get download libmariadb-dev-compat:amd64 && \
|
||||
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||
rm -rvf ./libmariadb-dev-compat*.deb
|
||||
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:armhf package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
RUN ln -sfnr /usr/lib/arm-linux-gnueabihf/libpq.so.5 /usr/lib/arm-linux-gnueabihf/libpq.so
|
||||
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-debian:buster
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
sqlite3 \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/start.sh"]
|
||||
|
106
docker/arm32v7/Dockerfile.alpine
Normal file
106
docker/arm32v7/Dockerfile.alpine
Normal file
|
@ -0,0 +1,106 @@
|
|||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.17.1
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.17.1
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0
|
||||
FROM bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM messense/rust-musl-cross:armv7-musleabihf as build
|
||||
|
||||
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||
ARG DB=sqlite
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
ENV USER "root"
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add armv7-unknown-linux-musleabihf
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/bitwarden_rs
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-alpine:3.12
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apk add --no-cache \
|
||||
openssl \
|
||||
curl \
|
||||
sqlite \
|
||||
ca-certificates
|
||||
RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community catatonit
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["catatonit", "/start.sh"]
|
||||
|
151
docker/arm64v8/Dockerfile
Normal file
151
docker/arm64v8/Dockerfile
Normal file
|
@ -0,0 +1,151 @@
|
|||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
|
||||
# This hash is extracted from the docker web-vault builds and it's preferred over a simple tag because it's immutable.
|
||||
# It can be viewed in multiple ways:
|
||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
||||
# - From the console, with the following commands:
|
||||
# docker pull bitwardenrs/web-vault:v2.17.1
|
||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.17.1
|
||||
#
|
||||
# - To do the opposite, and get the tag from the hash, you can do:
|
||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0
|
||||
FROM bitwardenrs/web-vault@sha256:dcb7884dc5845b3842ff2204fe77482000b771495c6c359297ec3c03330d65e0 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.48 as build
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
# Install required build libs for arm64 architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
libc6-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libpq-dev \
|
||||
libmariadb-dev:arm64 \
|
||||
libmariadb-dev-compat:arm64
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-aarch64-linux-gnu \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config \
|
||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :arm64 version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||
apt-get download libmariadb-dev-compat:amd64 && \
|
||||
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||
rm -rvf ./libmariadb-dev-compat*.deb
|
||||
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:arm64 package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
RUN ln -sfnr /usr/lib/aarch64-linux-gnu/libpq.so.5 /usr/lib/aarch64-linux-gnu/libpq.so
|
||||
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
RUN find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-debian:buster
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
sqlite3 \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/start.sh"]
|
||||
|
|
@ -1,110 +0,0 @@
|
|||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.11 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.12.0b"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
|
||||
# set mysql backend
|
||||
ARG DB=mysql
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabi \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Prepare openssl armel libs
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
libc6-dev:armel \
|
||||
libmariadb-dev:armel
|
||||
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Build
|
||||
RUN rustup target add arm-unknown-linux-gnueabi
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-debian:buster
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadbclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
|
@ -1,109 +0,0 @@
|
|||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.11 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.12.0b"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward comaptibility
|
||||
ARG DB=sqlite
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabi \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Prepare openssl armel libs
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
libc6-dev:armel
|
||||
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Build
|
||||
RUN rustup target add arm-unknown-linux-gnueabi
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-debian:buster
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
sqlite3 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
|
@ -1,111 +0,0 @@
|
|||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.11 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.12.0b"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
|
||||
# set mysql backend
|
||||
ARG DB=mysql
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Prepare openssl armhf libs
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
libc6-dev:armhf \
|
||||
libmariadb-dev:armhf
|
||||
|
||||
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Build
|
||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-debian:buster
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libmariadbclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
|
@ -1,109 +0,0 @@
|
|||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
FROM alpine:3.11 as vault
|
||||
|
||||
ENV VAULT_VERSION "v2.12.0b"
|
||||
|
||||
ENV URL "https://github.com/dani-garcia/bw_web_builds/releases/download/$VAULT_VERSION/bw_web_$VAULT_VERSION.tar.gz"
|
||||
|
||||
RUN apk add --no-cache --upgrade \
|
||||
curl \
|
||||
tar
|
||||
|
||||
RUN mkdir /web-vault
|
||||
WORKDIR /web-vault
|
||||
|
||||
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
|
||||
|
||||
RUN curl -L $URL | tar xz
|
||||
RUN ls
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# We need to use the Rust build image, because
|
||||
# we need the Rust compiler and Cargo tooling
|
||||
FROM rust:1.40 as build
|
||||
|
||||
# set sqlite as default for DB ARG for backward comaptibility
|
||||
ARG DB=sqlite
|
||||
|
||||
# Don't download rust docs
|
||||
RUN rustup set profile minimal
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
&& mkdir -p ~/.cargo \
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
|
||||
|
||||
ENV CARGO_HOME "/root/.cargo"
|
||||
ENV USER "root"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Prepare openssl armhf libs
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||
/etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
libc6-dev:armhf
|
||||
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Build
|
||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-debian:buster
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Install needed libraries
|
||||
RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
sqlite3 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /data
|
||||
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
||||
|
||||
COPY docker/healthcheck.sh ./healthcheck.sh
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s CMD sh healthcheck.sh || exit 1
|
||||
|
||||
# Configures the startup!
|
||||
WORKDIR /
|
||||
CMD ["/bitwarden_rs"]
|
59
docker/healthcheck.sh
Normal file → Executable file
59
docker/healthcheck.sh
Normal file → Executable file
|
@ -1,8 +1,53 @@
|
|||
#!/usr/bin/env sh
|
||||
#!/bin/sh
|
||||
|
||||
if [ -z "$ROCKET_TLS"]
|
||||
then
|
||||
curl --fail http://localhost:${ROCKET_PORT:-"80"}/alive || exit 1
|
||||
else
|
||||
curl --insecure --fail https://localhost:${ROCKET_PORT:-"80"}/alive || exit 1
|
||||
fi
|
||||
# Use the value of the corresponding env var (if present),
|
||||
# or a default value otherwise.
|
||||
: ${DATA_FOLDER:="data"}
|
||||
: ${ROCKET_PORT:="80"}
|
||||
|
||||
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
||||
|
||||
# Given a config key, return the corresponding config value from the
|
||||
# config file. If the key doesn't exist, return an empty string.
|
||||
get_config_val() {
|
||||
local key="$1"
|
||||
# Extract a line of the form:
|
||||
# "domain": "https://bw.example.com/path",
|
||||
grep "\"${key}\":" "${CONFIG_FILE}" |
|
||||
# To extract just the value (https://bw.example.com/path), delete:
|
||||
# (1) everything up to and including the first ':',
|
||||
# (2) whitespace and '"' from the front,
|
||||
# (3) ',' and '"' from the back.
|
||||
sed -e 's/[^:]\+://' -e 's/^[ "]\+//' -e 's/[,"]\+$//'
|
||||
}
|
||||
|
||||
# Extract the base path from a domain URL. For example:
|
||||
# - `` -> ``
|
||||
# - `https://bw.example.com` -> ``
|
||||
# - `https://bw.example.com/` -> ``
|
||||
# - `https://bw.example.com/path` -> `/path`
|
||||
# - `https://bw.example.com/multi/path` -> `/multi/path`
|
||||
get_base_path() {
|
||||
echo "$1" |
|
||||
# Delete:
|
||||
# (1) everything up to and including '://',
|
||||
# (2) everything up to '/',
|
||||
# (3) trailing '/' from the back.
|
||||
sed -e 's|.*://||' -e 's|[^/]\+||' -e 's|/*$||'
|
||||
}
|
||||
|
||||
# Read domain URL from config.json, if present.
|
||||
if [ -r "${CONFIG_FILE}" ]; then
|
||||
domain="$(get_config_val 'domain')"
|
||||
if [ -n "${domain}" ]; then
|
||||
# config.json 'domain' overrides the DOMAIN env var.
|
||||
DOMAIN="${domain}"
|
||||
fi
|
||||
fi
|
||||
|
||||
base_path="$(get_base_path "${DOMAIN}")"
|
||||
if [ -n "${ROCKET_TLS}" ]; then
|
||||
s='s'
|
||||
fi
|
||||
curl --insecure --fail --silent --show-error \
|
||||
"http${s}://localhost:${ROCKET_PORT}${base_path}/alive" || exit 1
|
||||
|
|
17
docker/render_template
Executable file
17
docker/render_template
Executable file
|
@ -0,0 +1,17 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import os, argparse, json
|
||||
|
||||
import jinja2
|
||||
|
||||
args_parser = argparse.ArgumentParser()
|
||||
args_parser.add_argument('template_file', help='Jinja2 template file to render.')
|
||||
args_parser.add_argument('render_vars', help='JSON-encoded data to pass to the templating engine.')
|
||||
cli_args = args_parser.parse_args()
|
||||
|
||||
render_vars = json.loads(cli_args.render_vars)
|
||||
environment = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader(os.getcwd()),
|
||||
trim_blocks=True,
|
||||
)
|
||||
print(environment.get_template(cli_args.template_file).render(render_vars))
|
15
docker/start.sh
Executable file
15
docker/start.sh
Executable file
|
@ -0,0 +1,15 @@
|
|||
#!/bin/sh
|
||||
|
||||
if [ -r /etc/bitwarden_rs.sh ]; then
|
||||
. /etc/bitwarden_rs.sh
|
||||
fi
|
||||
|
||||
if [ -d /etc/bitwarden_rs.d ]; then
|
||||
for f in /etc/bitwarden_rs.d/*.sh; do
|
||||
if [ -r $f ]; then
|
||||
. $f
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
exec /bitwarden_rs "${@}"
|
20
hooks/README.md
Normal file
20
hooks/README.md
Normal file
|
@ -0,0 +1,20 @@
|
|||
The hooks in this directory are used to create multi-arch images using Docker Hub automated builds.
|
||||
|
||||
Docker Hub hooks provide these predefined [environment variables](https://docs.docker.com/docker-hub/builds/advanced/#environment-variables-for-building-and-testing):
|
||||
|
||||
* `SOURCE_BRANCH`: the name of the branch or the tag that is currently being tested.
|
||||
* `SOURCE_COMMIT`: the SHA1 hash of the commit being tested.
|
||||
* `COMMIT_MSG`: the message from the commit being tested and built.
|
||||
* `DOCKER_REPO`: the name of the Docker repository being built.
|
||||
* `DOCKERFILE_PATH`: the dockerfile currently being built.
|
||||
* `DOCKER_TAG`: the Docker repository tag being built.
|
||||
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
||||
|
||||
The current multi-arch image build relies on the original bitwarden_rs Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/database/OS combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
||||
|
||||
## References
|
||||
|
||||
* https://docs.docker.com/docker-hub/builds/advanced/
|
||||
* https://docs.docker.com/engine/reference/commandline/manifest/
|
||||
* https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/
|
||||
* https://success.docker.com/article/how-do-i-authenticate-with-the-v2-api
|
19
hooks/arches.sh
Normal file
19
hooks/arches.sh
Normal file
|
@ -0,0 +1,19 @@
|
|||
# The default Debian-based images support these arches for all database connections
|
||||
#
|
||||
# Other images (Alpine-based) currently
|
||||
# support only a subset of these.
|
||||
arches=(
|
||||
amd64
|
||||
arm32v6
|
||||
arm32v7
|
||||
arm64v8
|
||||
)
|
||||
|
||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||
# The Alpine build currently only works for amd64.
|
||||
os_suffix=.alpine
|
||||
arches=(
|
||||
amd64
|
||||
arm32v7
|
||||
)
|
||||
fi
|
14
hooks/build
Executable file
14
hooks/build
Executable file
|
@ -0,0 +1,14 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo ">>> Building images..."
|
||||
|
||||
source ./hooks/arches.sh
|
||||
|
||||
set -ex
|
||||
|
||||
for arch in "${arches[@]}"; do
|
||||
docker build \
|
||||
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
||||
-f docker/${arch}/Dockerfile${os_suffix} \
|
||||
.
|
||||
done
|
117
hooks/push
Executable file
117
hooks/push
Executable file
|
@ -0,0 +1,117 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo ">>> Pushing images..."
|
||||
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
|
||||
declare -A annotations=(
|
||||
[amd64]="--os linux --arch amd64"
|
||||
[arm32v6]="--os linux --arch arm --variant v6"
|
||||
[arm32v7]="--os linux --arch arm --variant v7"
|
||||
[arm64v8]="--os linux --arch arm64 --variant v8"
|
||||
)
|
||||
|
||||
source ./hooks/arches.sh
|
||||
|
||||
set -ex
|
||||
|
||||
declare -A images
|
||||
for arch in ${arches[@]}; do
|
||||
images[$arch]="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
||||
done
|
||||
|
||||
# Push the images that were just built; manifest list creation fails if the
|
||||
# images (manifests) referenced don't already exist in the Docker registry.
|
||||
for image in "${images[@]}"; do
|
||||
docker push "${image}"
|
||||
done
|
||||
|
||||
manifest_lists=("${DOCKER_REPO}:${DOCKER_TAG}")
|
||||
|
||||
# If the Docker tag starts with a version number, assume the latest release is
|
||||
# being pushed. Add an extra manifest (`latest` or `alpine`, as appropriate)
|
||||
# to make it easier for users to track the latest release.
|
||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||
manifest_lists+=(${DOCKER_REPO}:alpine)
|
||||
else
|
||||
manifest_lists+=(${DOCKER_REPO}:latest)
|
||||
|
||||
# Add an extra `latest-arm32v6` tag; Docker can't seem to properly
|
||||
# auto-select that image on Armv6 platforms like Raspberry Pi 1 and Zero
|
||||
# (https://github.com/moby/moby/issues/41017).
|
||||
#
|
||||
# Add this tag only for the SQLite image, as the MySQL and PostgreSQL
|
||||
# builds don't currently work on non-amd64 arches.
|
||||
#
|
||||
# TODO: Also add an `alpine-arm32v6` tag if multi-arch support for
|
||||
# Alpine-based bitwarden_rs images is implemented before this Docker
|
||||
# issue is fixed.
|
||||
if [[ ${DOCKER_REPO} == *server ]]; then
|
||||
docker tag "${DOCKER_REPO}:${DOCKER_TAG}-arm32v6" "${DOCKER_REPO}:latest-arm32v6"
|
||||
docker push "${DOCKER_REPO}:latest-arm32v6"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
for manifest_list in "${manifest_lists[@]}"; do
|
||||
# Create the (multi-arch) manifest list of arch-specific images.
|
||||
docker manifest create ${manifest_list} ${images[@]}
|
||||
|
||||
# Make sure each image manifest is annotated with the correct arch info.
|
||||
# Docker does not auto-detect the arch of each cross-compiled image, so
|
||||
# everything would appear as `linux/amd64` otherwise.
|
||||
for arch in "${arches[@]}"; do
|
||||
docker manifest annotate ${annotations[$arch]} ${manifest_list} ${images[$arch]}
|
||||
done
|
||||
|
||||
# Push the manifest list.
|
||||
docker manifest push --purge ${manifest_list}
|
||||
done
|
||||
|
||||
# Avoid logging credentials and tokens.
|
||||
set +ex
|
||||
|
||||
# Delete the arch-specific tags, if credentials for doing so are available.
|
||||
# Note that `DOCKER_PASSWORD` must be the actual user password. Passing a JWT
|
||||
# obtained using a personal access token results in a 403 error with
|
||||
# {"detail": "access to the resource is forbidden with personal access token"}
|
||||
if [[ -z "${DOCKER_USERNAME}" || -z "${DOCKER_PASSWORD}" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Given a JSON input on stdin, extract the string value associated with the
|
||||
# specified key. This avoids an extra dependency on a tool like `jq`.
|
||||
extract() {
|
||||
local key="$1"
|
||||
# Extract "<key>":"<val>" (assumes key/val won't contain double quotes).
|
||||
# The colon may have whitespace on either side.
|
||||
grep -o "\"${key}\"[[:space:]]*:[[:space:]]*\"[^\"]\+\"" |
|
||||
# Extract just <val> by deleting the last '"', and then greedily deleting
|
||||
# everything up to '"'.
|
||||
sed -e 's/"$//' -e 's/.*"//'
|
||||
}
|
||||
|
||||
echo ">>> Getting API token..."
|
||||
jwt=$(curl -sS -X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"username\":\"${DOCKER_USERNAME}\",\"password\": \"${DOCKER_PASSWORD}\"}" \
|
||||
"https://hub.docker.com/v2/users/login" |
|
||||
extract 'token')
|
||||
|
||||
# Strip the registry portion from `index.docker.io/user/repo`.
|
||||
repo="${DOCKER_REPO#*/}"
|
||||
|
||||
for arch in ${arches[@]}; do
|
||||
# Don't delete the `arm32v6` tag; Docker can't seem to properly
|
||||
# auto-select that image on Armv6 platforms like Raspberry Pi 1 and Zero
|
||||
# (https://github.com/moby/moby/issues/41017).
|
||||
if [[ ${arch} == 'arm32v6' ]]; then
|
||||
continue
|
||||
fi
|
||||
tag="${DOCKER_TAG}-${arch}"
|
||||
echo ">>> Deleting '${repo}:${tag}'..."
|
||||
curl -sS -X DELETE \
|
||||
-H "Authorization: Bearer ${jwt}" \
|
||||
"https://hub.docker.com/v2/repositories/${repo}/tags/${tag}/"
|
||||
done
|
|
@ -0,0 +1 @@
|
|||
DROP TABLE org_policies;
|
|
@ -0,0 +1,9 @@
|
|||
CREATE TABLE org_policies (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
|
||||
atype INTEGER NOT NULL,
|
||||
enabled BOOLEAN NOT NULL,
|
||||
data TEXT NOT NULL,
|
||||
|
||||
UNIQUE (org_uuid, atype)
|
||||
);
|
|
@ -0,0 +1 @@
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
ALTER TABLE ciphers
|
||||
ADD COLUMN
|
||||
deleted_at DATETIME;
|
|
@ -0,0 +1,2 @@
|
|||
ALTER TABLE users_collections
|
||||
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT FALSE;
|
|
@ -0,0 +1,13 @@
|
|||
ALTER TABLE ciphers
|
||||
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
|
||||
-- Transfer favorite status for user-owned ciphers.
|
||||
UPDATE ciphers
|
||||
SET favorite = TRUE
|
||||
WHERE EXISTS (
|
||||
SELECT * FROM favorites
|
||||
WHERE favorites.user_uuid = ciphers.user_uuid
|
||||
AND favorites.cipher_uuid = ciphers.uuid
|
||||
);
|
||||
|
||||
DROP TABLE favorites;
|
|
@ -0,0 +1,16 @@
|
|||
CREATE TABLE favorites (
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users(uuid),
|
||||
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers(uuid),
|
||||
|
||||
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||
);
|
||||
|
||||
-- Transfer favorite status for user-owned ciphers.
|
||||
INSERT INTO favorites(user_uuid, cipher_uuid)
|
||||
SELECT user_uuid, uuid
|
||||
FROM ciphers
|
||||
WHERE favorite = TRUE
|
||||
AND user_uuid IS NOT NULL;
|
||||
|
||||
ALTER TABLE ciphers
|
||||
DROP COLUMN favorite;
|
|
@ -0,0 +1 @@
|
|||
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT 1;
|
|
@ -0,0 +1 @@
|
|||
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
|
@ -0,0 +1 @@
|
|||
DROP TABLE org_policies;
|
|
@ -0,0 +1,9 @@
|
|||
CREATE TABLE org_policies (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid),
|
||||
atype INTEGER NOT NULL,
|
||||
enabled BOOLEAN NOT NULL,
|
||||
data TEXT NOT NULL,
|
||||
|
||||
UNIQUE (org_uuid, atype)
|
||||
);
|
|
@ -0,0 +1 @@
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
ALTER TABLE ciphers
|
||||
ADD COLUMN
|
||||
deleted_at TIMESTAMP;
|
|
@ -0,0 +1,2 @@
|
|||
ALTER TABLE users_collections
|
||||
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT FALSE;
|
|
@ -0,0 +1,13 @@
|
|||
ALTER TABLE ciphers
|
||||
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
|
||||
-- Transfer favorite status for user-owned ciphers.
|
||||
UPDATE ciphers
|
||||
SET favorite = TRUE
|
||||
WHERE EXISTS (
|
||||
SELECT * FROM favorites
|
||||
WHERE favorites.user_uuid = ciphers.user_uuid
|
||||
AND favorites.cipher_uuid = ciphers.uuid
|
||||
);
|
||||
|
||||
DROP TABLE favorites;
|
|
@ -0,0 +1,16 @@
|
|||
CREATE TABLE favorites (
|
||||
user_uuid VARCHAR(40) NOT NULL REFERENCES users(uuid),
|
||||
cipher_uuid VARCHAR(40) NOT NULL REFERENCES ciphers(uuid),
|
||||
|
||||
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||
);
|
||||
|
||||
-- Transfer favorite status for user-owned ciphers.
|
||||
INSERT INTO favorites(user_uuid, cipher_uuid)
|
||||
SELECT user_uuid, uuid
|
||||
FROM ciphers
|
||||
WHERE favorite = TRUE
|
||||
AND user_uuid IS NOT NULL;
|
||||
|
||||
ALTER TABLE ciphers
|
||||
DROP COLUMN favorite;
|
|
@ -0,0 +1 @@
|
|||
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT true;
|
|
@ -0,0 +1 @@
|
|||
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
|
@ -0,0 +1 @@
|
|||
DROP TABLE org_policies;
|
|
@ -0,0 +1,9 @@
|
|||
CREATE TABLE org_policies (
|
||||
uuid TEXT NOT NULL PRIMARY KEY,
|
||||
org_uuid TEXT NOT NULL REFERENCES organizations (uuid),
|
||||
atype INTEGER NOT NULL,
|
||||
enabled BOOLEAN NOT NULL,
|
||||
data TEXT NOT NULL,
|
||||
|
||||
UNIQUE (org_uuid, atype)
|
||||
);
|
|
@ -0,0 +1 @@
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
ALTER TABLE ciphers
|
||||
ADD COLUMN
|
||||
deleted_at DATETIME;
|
|
@ -0,0 +1,2 @@
|
|||
ALTER TABLE users_collections
|
||||
ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT 0; -- FALSE
|
|
@ -0,0 +1,13 @@
|
|||
ALTER TABLE ciphers
|
||||
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT 0; -- FALSE
|
||||
|
||||
-- Transfer favorite status for user-owned ciphers.
|
||||
UPDATE ciphers
|
||||
SET favorite = 1
|
||||
WHERE EXISTS (
|
||||
SELECT * FROM favorites
|
||||
WHERE favorites.user_uuid = ciphers.user_uuid
|
||||
AND favorites.cipher_uuid = ciphers.uuid
|
||||
);
|
||||
|
||||
DROP TABLE favorites;
|
|
@ -0,0 +1,71 @@
|
|||
CREATE TABLE favorites (
|
||||
user_uuid TEXT NOT NULL REFERENCES users(uuid),
|
||||
cipher_uuid TEXT NOT NULL REFERENCES ciphers(uuid),
|
||||
|
||||
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||
);
|
||||
|
||||
-- Transfer favorite status for user-owned ciphers.
|
||||
INSERT INTO favorites(user_uuid, cipher_uuid)
|
||||
SELECT user_uuid, uuid
|
||||
FROM ciphers
|
||||
WHERE favorite = 1
|
||||
AND user_uuid IS NOT NULL;
|
||||
|
||||
-- Drop the `favorite` column from the `ciphers` table, using the 12-step
|
||||
-- procedure from <https://www.sqlite.org/lang_altertable.html#altertabrename>.
|
||||
-- Note that some steps aren't applicable and are omitted.
|
||||
|
||||
-- 1. If foreign key constraints are enabled, disable them using PRAGMA foreign_keys=OFF.
|
||||
--
|
||||
-- Diesel runs each migration in its own transaction. `PRAGMA foreign_keys`
|
||||
-- is a no-op within a transaction, so this step must be done outside of this
|
||||
-- file, before starting the Diesel migrations.
|
||||
|
||||
-- 2. Start a transaction.
|
||||
--
|
||||
-- Diesel already runs each migration in its own transaction.
|
||||
|
||||
-- 4. Use CREATE TABLE to construct a new table "new_X" that is in the
|
||||
-- desired revised format of table X. Make sure that the name "new_X" does
|
||||
-- not collide with any existing table name, of course.
|
||||
|
||||
CREATE TABLE new_ciphers(
|
||||
uuid TEXT NOT NULL PRIMARY KEY,
|
||||
created_at DATETIME NOT NULL,
|
||||
updated_at DATETIME NOT NULL,
|
||||
user_uuid TEXT REFERENCES users(uuid),
|
||||
organization_uuid TEXT REFERENCES organizations(uuid),
|
||||
atype INTEGER NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
notes TEXT,
|
||||
fields TEXT,
|
||||
data TEXT NOT NULL,
|
||||
password_history TEXT,
|
||||
deleted_at DATETIME
|
||||
);
|
||||
|
||||
-- 5. Transfer content from X into new_X using a statement like:
|
||||
-- INSERT INTO new_X SELECT ... FROM X.
|
||||
|
||||
INSERT INTO new_ciphers(uuid, created_at, updated_at, user_uuid, organization_uuid, atype,
|
||||
name, notes, fields, data, password_history, deleted_at)
|
||||
SELECT uuid, created_at, updated_at, user_uuid, organization_uuid, atype,
|
||||
name, notes, fields, data, password_history, deleted_at
|
||||
FROM ciphers;
|
||||
|
||||
-- 6. Drop the old table X: DROP TABLE X.
|
||||
|
||||
DROP TABLE ciphers;
|
||||
|
||||
-- 7. Change the name of new_X to X using: ALTER TABLE new_X RENAME TO X.
|
||||
|
||||
ALTER TABLE new_ciphers RENAME TO ciphers;
|
||||
|
||||
-- 11. Commit the transaction started in step 2.
|
||||
|
||||
-- 12. If foreign keys constraints were originally enabled, reenable them now.
|
||||
--
|
||||
-- `PRAGMA foreign_keys` is scoped to a database connection, and Diesel
|
||||
-- migrations are run in a separate database connection that is closed once
|
||||
-- the migrations finish.
|
|
@ -0,0 +1 @@
|
|||
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT 1;
|
|
@ -0,0 +1 @@
|
|||
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
|
@ -1 +1 @@
|
|||
nightly-2019-12-19
|
||||
nightly-2020-11-22
|
351
src/api/admin.rs
351
src/api/admin.rs
|
@ -1,45 +1,61 @@
|
|||
use once_cell::sync::Lazy;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::Value;
|
||||
use std::process::Command;
|
||||
|
||||
use rocket::http::{Cookie, Cookies, SameSite};
|
||||
use rocket::request::{self, FlashMessage, Form, FromRequest, Request};
|
||||
use rocket::response::{content::Html, Flash, Redirect};
|
||||
use rocket::{Outcome, Route};
|
||||
use rocket::{
|
||||
http::{Cookie, Cookies, SameSite},
|
||||
request::{self, FlashMessage, Form, FromRequest, Outcome, Request},
|
||||
response::{content::Html, Flash, Redirect},
|
||||
Route,
|
||||
};
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::api::{ApiResult, EmptyResult, JsonResult};
|
||||
use crate::auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp};
|
||||
use crate::config::ConfigBuilder;
|
||||
use crate::db::{backup_database, models::*, DbConn};
|
||||
use crate::error::Error;
|
||||
use crate::mail;
|
||||
use crate::CONFIG;
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult},
|
||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||
config::ConfigBuilder,
|
||||
db::{backup_database, models::*, DbConn, DbConnType},
|
||||
error::{Error, MapResult},
|
||||
mail,
|
||||
util::{get_display_size, format_naive_datetime_local},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
if CONFIG.admin_token().is_none() && !CONFIG.disable_admin_token() {
|
||||
if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() {
|
||||
return routes![admin_disabled];
|
||||
}
|
||||
|
||||
routes![
|
||||
admin_login,
|
||||
get_users,
|
||||
get_users_json,
|
||||
post_admin_login,
|
||||
admin_page,
|
||||
invite_user,
|
||||
logout,
|
||||
delete_user,
|
||||
deauth_user,
|
||||
disable_user,
|
||||
enable_user,
|
||||
remove_2fa,
|
||||
update_revision_users,
|
||||
post_config,
|
||||
delete_config,
|
||||
backup_db,
|
||||
test_smtp,
|
||||
users_overview,
|
||||
organizations_overview,
|
||||
diagnostics,
|
||||
]
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref CAN_BACKUP: bool = cfg!(feature = "sqlite") && Command::new("sqlite3").arg("-version").status().is_ok();
|
||||
}
|
||||
static CAN_BACKUP: Lazy<bool> = Lazy::new(|| {
|
||||
DbConnType::from_url(&CONFIG.database_url())
|
||||
.map(|t| t == DbConnType::sqlite)
|
||||
.unwrap_or(false)
|
||||
&& Command::new("sqlite3").arg("-version").status().is_ok()
|
||||
});
|
||||
|
||||
#[get("/")]
|
||||
fn admin_disabled() -> &'static str {
|
||||
|
@ -50,13 +66,48 @@ const COOKIE_NAME: &str = "BWRS_ADMIN";
|
|||
const ADMIN_PATH: &str = "/admin";
|
||||
|
||||
const BASE_TEMPLATE: &str = "admin/base";
|
||||
const VERSION: Option<&str> = option_env!("GIT_VERSION");
|
||||
const VERSION: Option<&str> = option_env!("BWRS_VERSION");
|
||||
|
||||
fn admin_path() -> String {
|
||||
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
|
||||
}
|
||||
|
||||
struct Referer(Option<String>);
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for Referer {
|
||||
type Error = ();
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
Outcome::Success(Referer(request.headers().get_one("Referer").map(str::to_string)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Used for `Location` response headers, which must specify an absolute URI
|
||||
/// (see https://tools.ietf.org/html/rfc2616#section-14.30).
|
||||
fn admin_url(referer: Referer) -> String {
|
||||
// If we get a referer use that to make it work when, DOMAIN is not set
|
||||
if let Some(mut referer) = referer.0 {
|
||||
if let Some(start_index) = referer.find(ADMIN_PATH) {
|
||||
referer.truncate(start_index + ADMIN_PATH.len());
|
||||
return referer;
|
||||
}
|
||||
}
|
||||
|
||||
if CONFIG.domain_set() {
|
||||
// Don't use CONFIG.domain() directly, since the user may want to keep a
|
||||
// trailing slash there, particularly when running under a subpath.
|
||||
format!("{}{}{}", CONFIG.domain_origin(), CONFIG.domain_path(), ADMIN_PATH)
|
||||
} else {
|
||||
// Last case, when no referer or domain set, technically invalid but better than nothing
|
||||
ADMIN_PATH.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/", rank = 2)]
|
||||
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
||||
// If there is an error, show it
|
||||
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
|
||||
let json = json!({"page_content": "admin/login", "version": VERSION, "error": msg});
|
||||
let json = json!({"page_content": "admin/login", "version": VERSION, "error": msg, "urlpath": CONFIG.domain_path()});
|
||||
|
||||
// Return the page
|
||||
let text = CONFIG.render_template(BASE_TEMPLATE, &json)?;
|
||||
|
@ -69,14 +120,19 @@ struct LoginForm {
|
|||
}
|
||||
|
||||
#[post("/", data = "<data>")]
|
||||
fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -> Result<Redirect, Flash<Redirect>> {
|
||||
fn post_admin_login(
|
||||
data: Form<LoginForm>,
|
||||
mut cookies: Cookies,
|
||||
ip: ClientIp,
|
||||
referer: Referer,
|
||||
) -> Result<Redirect, Flash<Redirect>> {
|
||||
let data = data.into_inner();
|
||||
|
||||
// If the token is invalid, redirect to login page
|
||||
if !_validate_token(&data.token) {
|
||||
error!("Invalid admin token. IP: {}", ip.ip);
|
||||
Err(Flash::error(
|
||||
Redirect::to(ADMIN_PATH),
|
||||
Redirect::to(admin_url(referer)),
|
||||
"Invalid admin token, please try again.",
|
||||
))
|
||||
} else {
|
||||
|
@ -85,14 +141,14 @@ fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -
|
|||
let jwt = encode_jwt(&claims);
|
||||
|
||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
||||
.path(ADMIN_PATH)
|
||||
.max_age(chrono::Duration::minutes(20))
|
||||
.path(admin_path())
|
||||
.max_age(time::Duration::minutes(20))
|
||||
.same_site(SameSite::Strict)
|
||||
.http_only(true)
|
||||
.finish();
|
||||
|
||||
cookies.add(cookie);
|
||||
Ok(Redirect::to(ADMIN_PATH))
|
||||
Ok(Redirect::to(admin_url(referer)))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -107,21 +163,69 @@ fn _validate_token(token: &str) -> bool {
|
|||
struct AdminTemplateData {
|
||||
page_content: String,
|
||||
version: Option<&'static str>,
|
||||
users: Vec<Value>,
|
||||
users: Option<Vec<Value>>,
|
||||
organizations: Option<Vec<Value>>,
|
||||
diagnostics: Option<Value>,
|
||||
config: Value,
|
||||
can_backup: bool,
|
||||
logged_in: bool,
|
||||
urlpath: String,
|
||||
}
|
||||
|
||||
impl AdminTemplateData {
|
||||
fn new(users: Vec<Value>) -> Self {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
page_content: String::from("admin/page"),
|
||||
page_content: String::from("admin/settings"),
|
||||
version: VERSION,
|
||||
users,
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
users: None,
|
||||
organizations: None,
|
||||
diagnostics: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn users(users: Vec<Value>) -> Self {
|
||||
Self {
|
||||
page_content: String::from("admin/users"),
|
||||
version: VERSION,
|
||||
users: Some(users),
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
organizations: None,
|
||||
diagnostics: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn organizations(organizations: Vec<Value>) -> Self {
|
||||
Self {
|
||||
page_content: String::from("admin/organizations"),
|
||||
version: VERSION,
|
||||
organizations: Some(organizations),
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
users: None,
|
||||
diagnostics: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn diagnostics(diagnostics: Value) -> Self {
|
||||
Self {
|
||||
page_content: String::from("admin/diagnostics"),
|
||||
version: VERSION,
|
||||
organizations: None,
|
||||
config: CONFIG.prepare_json(),
|
||||
can_backup: *CAN_BACKUP,
|
||||
logged_in: true,
|
||||
urlpath: CONFIG.domain_path(),
|
||||
users: None,
|
||||
diagnostics: Some(diagnostics),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -131,11 +235,8 @@ impl AdminTemplateData {
|
|||
}
|
||||
|
||||
#[get("/", rank = 1)]
|
||||
fn admin_page(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let users = User::get_all(&conn);
|
||||
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
||||
|
||||
let text = AdminTemplateData::new(users_json).render()?;
|
||||
fn admin_page(_token: AdminToken, _conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let text = AdminTemplateData::new().render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
|
@ -153,66 +254,101 @@ fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> Empt
|
|||
err!("User already exists")
|
||||
}
|
||||
|
||||
if !CONFIG.invitations_allowed() {
|
||||
err!("Invitations are not allowed")
|
||||
}
|
||||
|
||||
let mut user = User::new(email);
|
||||
user.save(&conn)?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
let org_name = "bitwarden_rs";
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &org_name, None)
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None)
|
||||
} else {
|
||||
let invitation = Invitation::new(data.email);
|
||||
invitation.save(&conn)
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/test/smtp", data = "<data>")]
|
||||
fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
||||
let data: InviteData = data.into_inner();
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_test(&data.email)
|
||||
} else {
|
||||
err!("Mail is not enabled")
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/logout")]
|
||||
fn logout(mut cookies: Cookies) -> Result<Redirect, ()> {
|
||||
fn logout(mut cookies: Cookies, referer: Referer) -> Result<Redirect, ()> {
|
||||
cookies.remove(Cookie::named(COOKIE_NAME));
|
||||
Ok(Redirect::to(ADMIN_PATH))
|
||||
Ok(Redirect::to(admin_url(referer)))
|
||||
}
|
||||
|
||||
#[get("/users")]
|
||||
fn get_users(_token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
fn get_users_json(_token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
let users = User::get_all(&conn);
|
||||
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
||||
|
||||
Ok(Json(Value::Array(users_json)))
|
||||
}
|
||||
|
||||
#[get("/users/overview")]
|
||||
fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let users = User::get_all(&conn);
|
||||
let dt_fmt = "%Y-%m-%d %H:%M:%S %Z";
|
||||
let users_json: Vec<Value> = users.iter()
|
||||
.map(|u| {
|
||||
let mut usr = u.to_json(&conn);
|
||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn));
|
||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn));
|
||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn) as i32));
|
||||
usr["user_enabled"] = json!(u.enabled);
|
||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, dt_fmt));
|
||||
usr["last_active"] = match u.last_active(&conn) {
|
||||
Some(dt) => json!(format_naive_datetime_local(&dt, dt_fmt)),
|
||||
None => json!("Never")
|
||||
};
|
||||
usr
|
||||
}).collect();
|
||||
|
||||
let text = AdminTemplateData::users(users_json).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/delete")]
|
||||
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let user = match User::find_by_uuid(&uuid, &conn) {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
|
||||
let user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||
user.delete(&conn)
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/deauth")]
|
||||
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = match User::find_by_uuid(&uuid, &conn) {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
|
||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
user.reset_security_stamp();
|
||||
|
||||
user.save(&conn)
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/disable")]
|
||||
fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
user.reset_security_stamp();
|
||||
user.enabled = false;
|
||||
|
||||
user.save(&conn)
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/enable")]
|
||||
fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||
user.enabled = true;
|
||||
|
||||
user.save(&conn)
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/remove-2fa")]
|
||||
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = match User::find_by_uuid(&uuid, &conn) {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
|
||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
||||
user.totp_recover = None;
|
||||
user.save(&conn)
|
||||
|
@ -223,6 +359,109 @@ fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
|||
User::update_all_revisions(&conn)
|
||||
}
|
||||
|
||||
#[get("/organizations/overview")]
|
||||
fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let organizations = Organization::get_all(&conn);
|
||||
let organizations_json: Vec<Value> = organizations.iter().map(|o| {
|
||||
let mut org = o.to_json();
|
||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn));
|
||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn));
|
||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn));
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn) as i32));
|
||||
org
|
||||
}).collect();
|
||||
|
||||
let text = AdminTemplateData::organizations(organizations_json).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct WebVaultVersion {
|
||||
version: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct GitRelease {
|
||||
tag_name: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct GitCommit {
|
||||
sha: String,
|
||||
}
|
||||
|
||||
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
use reqwest::{blocking::Client, header::USER_AGENT};
|
||||
use std::time::Duration;
|
||||
let github_api = Client::builder().build()?;
|
||||
|
||||
Ok(
|
||||
github_api.get(url)
|
||||
.timeout(Duration::from_secs(10))
|
||||
.header(USER_AGENT, "Bitwarden_RS")
|
||||
.send()?
|
||||
.error_for_status()?
|
||||
.json::<T>()?
|
||||
)
|
||||
}
|
||||
|
||||
#[get("/diagnostics")]
|
||||
fn diagnostics(_token: AdminToken, _conn: DbConn) -> ApiResult<Html<String>> {
|
||||
use std::net::ToSocketAddrs;
|
||||
use chrono::prelude::*;
|
||||
use crate::util::read_file_string;
|
||||
|
||||
let vault_version_path = format!("{}/{}", CONFIG.web_vault_folder(), "version.json");
|
||||
let vault_version_str = read_file_string(&vault_version_path)?;
|
||||
let web_vault_version: WebVaultVersion = serde_json::from_str(&vault_version_str)?;
|
||||
|
||||
let github_ips = ("github.com", 0).to_socket_addrs().map(|mut i| i.next());
|
||||
let (dns_resolved, dns_ok) = match github_ips {
|
||||
Ok(Some(a)) => (a.ip().to_string(), true),
|
||||
_ => ("Could not resolve domain name.".to_string(), false),
|
||||
};
|
||||
|
||||
// If the DNS Check failed, do not even attempt to check for new versions since we were not able to resolve github.com
|
||||
let (latest_release, latest_commit, latest_web_build) = if dns_ok {
|
||||
(
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bitwarden_rs/releases/latest") {
|
||||
Ok(r) => r.tag_name,
|
||||
_ => "-".to_string()
|
||||
},
|
||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/bitwarden_rs/commits/master") {
|
||||
Ok(mut c) => {
|
||||
c.sha.truncate(8);
|
||||
c.sha
|
||||
},
|
||||
_ => "-".to_string()
|
||||
},
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest") {
|
||||
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||
_ => "-".to_string()
|
||||
},
|
||||
)
|
||||
} else {
|
||||
("-".to_string(), "-".to_string(), "-".to_string())
|
||||
};
|
||||
|
||||
// Run the date check as the last item right before filling the json.
|
||||
// This should ensure that the time difference between the browser and the server is as minimal as possible.
|
||||
let dt = Utc::now();
|
||||
let server_time = dt.format("%Y-%m-%d %H:%M:%S UTC").to_string();
|
||||
|
||||
let diagnostics_json = json!({
|
||||
"dns_resolved": dns_resolved,
|
||||
"server_time": server_time,
|
||||
"web_vault_version": web_vault_version.version,
|
||||
"latest_release": latest_release,
|
||||
"latest_commit": latest_commit,
|
||||
"latest_web_build": latest_web_build,
|
||||
});
|
||||
|
||||
let text = AdminTemplateData::diagnostics(diagnostics_json).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[post("/config", data = "<data>")]
|
||||
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||
let data: ConfigBuilder = data.into_inner();
|
||||
|
|
|
@ -1,19 +1,15 @@
|
|||
use chrono::Utc;
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::db::models::*;
|
||||
use crate::db::DbConn;
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType},
|
||||
auth::{decode_delete, decode_invite, decode_verify_email, Headers},
|
||||
crypto,
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
};
|
||||
|
||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType};
|
||||
use crate::auth::{decode_delete, decode_invite, decode_verify_email, Headers};
|
||||
use crate::crypto;
|
||||
use crate::mail;
|
||||
|
||||
use crate::CONFIG;
|
||||
|
||||
use rocket::Route;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![
|
||||
register,
|
||||
profile,
|
||||
|
@ -36,6 +32,7 @@ pub fn routes() -> Vec<Route> {
|
|||
revision_date,
|
||||
password_hint,
|
||||
prelogin,
|
||||
verify_password,
|
||||
]
|
||||
}
|
||||
|
||||
|
@ -68,7 +65,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||
let mut user = match User::find_by_mail(&data.Email, &conn) {
|
||||
Some(user) => {
|
||||
if !user.password_hash.is_empty() {
|
||||
if CONFIG.signups_allowed() {
|
||||
if CONFIG.is_signup_allowed(&data.Email) {
|
||||
err!("User already exists")
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
|
@ -89,14 +86,17 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||
}
|
||||
|
||||
user
|
||||
} else if CONFIG.signups_allowed() {
|
||||
} else if CONFIG.is_signup_allowed(&data.Email) {
|
||||
err!("Account with this email already exists")
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
}
|
||||
}
|
||||
None => {
|
||||
if CONFIG.signups_allowed() || Invitation::take(&data.Email, &conn) || CONFIG.can_signup_user(&data.Email) {
|
||||
// Order is important here; the invitation check must come first
|
||||
// because the bitwarden_rs admin can invite anyone, regardless
|
||||
// of other signup restrictions.
|
||||
if Invitation::take(&data.Email, &conn) || CONFIG.is_signup_allowed(&data.Email) {
|
||||
User::new(data.Email.clone())
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
|
@ -115,7 +115,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||
user.client_kdf_type = client_kdf_type;
|
||||
}
|
||||
|
||||
user.set_password(&data.MasterPasswordHash);
|
||||
user.set_password(&data.MasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
|
||||
// Add extra fields if present
|
||||
|
@ -207,7 +207,12 @@ fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> Json
|
|||
user.public_key = Some(data.PublicKey);
|
||||
|
||||
user.save(&conn)?;
|
||||
Ok(Json(user.to_json(&conn)))
|
||||
|
||||
Ok(Json(json!({
|
||||
"PrivateKey": user.private_key,
|
||||
"PublicKey": user.public_key,
|
||||
"Object":"keys"
|
||||
})))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
|
@ -227,7 +232,7 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
|
|||
err!("Invalid password")
|
||||
}
|
||||
|
||||
user.set_password(&data.NewMasterPasswordHash);
|
||||
user.set_password(&data.NewMasterPasswordHash, Some("post_rotatekey"));
|
||||
user.akey = data.Key;
|
||||
user.save(&conn)
|
||||
}
|
||||
|
@ -254,7 +259,7 @@ fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) ->
|
|||
|
||||
user.client_kdf_iter = data.KdfIterations;
|
||||
user.client_kdf_type = data.Kdf;
|
||||
user.set_password(&data.NewMasterPasswordHash);
|
||||
user.set_password(&data.NewMasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
user.save(&conn)
|
||||
}
|
||||
|
@ -333,6 +338,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
|||
user.akey = data.Key;
|
||||
user.private_key = Some(data.PrivateKey);
|
||||
user.reset_security_stamp();
|
||||
user.reset_stamp_exception();
|
||||
|
||||
user.save(&conn)
|
||||
}
|
||||
|
@ -371,8 +377,8 @@ fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: Db
|
|||
err!("Email already in use");
|
||||
}
|
||||
|
||||
if !CONFIG.signups_allowed() && !CONFIG.can_signup_user(&data.NewEmail) {
|
||||
err!("Email cannot be changed to this address");
|
||||
if !CONFIG.is_email_domain_allowed(&data.NewEmail) {
|
||||
err!("Email domain not allowed");
|
||||
}
|
||||
|
||||
let token = crypto::generate_token(6)?;
|
||||
|
@ -440,7 +446,7 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
|||
user.email_new = None;
|
||||
user.email_new_token = None;
|
||||
|
||||
user.set_password(&data.NewMasterPasswordHash);
|
||||
user.set_password(&data.NewMasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
|
||||
user.save(&conn)
|
||||
|
@ -455,7 +461,7 @@ fn post_verify_email(headers: Headers, _conn: DbConn) -> EmptyResult {
|
|||
}
|
||||
|
||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) {
|
||||
error!("Error sending delete account email: {:#?}", e);
|
||||
error!("Error sending verify_email email: {:#?}", e);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -619,3 +625,20 @@ fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
|
|||
"KdfIterations": kdf_iter
|
||||
})))
|
||||
}
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct VerifyPasswordData {
|
||||
MasterPasswordHash: String,
|
||||
}
|
||||
|
||||
#[post("/accounts/verify-password", data = "<data>")]
|
||||
fn verify_password(data: JsonUpcase<VerifyPasswordData>, headers: Headers, _conn: DbConn) -> EmptyResult {
|
||||
let data: VerifyPasswordData = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -1,28 +1,33 @@
|
|||
use std::collections::{HashMap, HashSet};
|
||||
use std::path::Path;
|
||||
|
||||
use rocket::http::ContentType;
|
||||
use rocket::{request::Form, Data, Route};
|
||||
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use rocket::{http::ContentType, request::Form, Data, Route};
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use multipart::server::save::SavedData;
|
||||
use multipart::server::{Multipart, SaveResult};
|
||||
|
||||
use data_encoding::HEXLOWER;
|
||||
use multipart::server::{save::SavedData, Multipart, SaveResult};
|
||||
|
||||
use crate::db::models::*;
|
||||
use crate::db::DbConn;
|
||||
|
||||
use crate::crypto;
|
||||
|
||||
use crate::api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType};
|
||||
use crate::auth::Headers;
|
||||
|
||||
use crate::CONFIG;
|
||||
use crate::{
|
||||
api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{models::*, DbConn},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
// Note that many routes have an `admin` variant; this seems to be
|
||||
// because the stored procedure that upstream Bitwarden uses to determine
|
||||
// whether the user can edit a cipher doesn't take into account whether
|
||||
// the user is an org owner/admin. The `admin` variant first checks
|
||||
// whether the user is an owner/admin of the relevant org, and if so,
|
||||
// allows the operation unconditionally.
|
||||
//
|
||||
// bitwarden_rs factors in the org owner/admin status as part of
|
||||
// determining the write accessibility of a cipher, so most
|
||||
// admin/non-admin implementations can be shared.
|
||||
routes![
|
||||
sync,
|
||||
get_ciphers,
|
||||
|
@ -44,15 +49,24 @@ pub fn routes() -> Vec<Route> {
|
|||
post_cipher_admin,
|
||||
post_cipher_share,
|
||||
put_cipher_share,
|
||||
put_cipher_share_seleted,
|
||||
put_cipher_share_selected,
|
||||
post_cipher,
|
||||
put_cipher,
|
||||
delete_cipher_post,
|
||||
delete_cipher_post_admin,
|
||||
delete_cipher_put,
|
||||
delete_cipher_put_admin,
|
||||
delete_cipher,
|
||||
delete_cipher_admin,
|
||||
delete_cipher_selected,
|
||||
delete_cipher_selected_post,
|
||||
delete_cipher_selected_put,
|
||||
delete_cipher_selected_admin,
|
||||
delete_cipher_selected_post_admin,
|
||||
delete_cipher_selected_put_admin,
|
||||
restore_cipher_put,
|
||||
restore_cipher_put_admin,
|
||||
restore_cipher_selected,
|
||||
delete_all,
|
||||
move_cipher_selected,
|
||||
move_cipher_selected_put,
|
||||
|
@ -79,7 +93,10 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
|
|||
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
|
||||
let collections_json: Vec<Value> = collections.iter().map(Collection::to_json).collect();
|
||||
|
||||
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
|
||||
let policies = OrgPolicy::find_by_user(&headers.user.uuid, &conn);
|
||||
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||
|
||||
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
||||
let ciphers_json: Vec<Value> = ciphers
|
||||
.iter()
|
||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
||||
|
@ -95,6 +112,7 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
|
|||
"Profile": user_json,
|
||||
"Folders": folders_json,
|
||||
"Collections": collections_json,
|
||||
"Policies": policies_json,
|
||||
"Ciphers": ciphers_json,
|
||||
"Domains": domains_json,
|
||||
"Object": "sync"
|
||||
|
@ -103,7 +121,7 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
|
|||
|
||||
#[get("/ciphers")]
|
||||
fn get_ciphers(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
|
||||
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
||||
|
||||
let ciphers_json: Vec<Value> = ciphers
|
||||
.iter()
|
||||
|
@ -177,6 +195,14 @@ pub struct CipherData {
|
|||
#[serde(rename = "Attachments")]
|
||||
_Attachments: Option<Value>, // Unused, contains map of {id: filename}
|
||||
Attachments2: Option<HashMap<String, Attachments2Data>>,
|
||||
|
||||
// The revision datetime (in ISO 8601 format) of the client's local copy
|
||||
// of the cipher. This is used to prevent a client from updating a cipher
|
||||
// when it doesn't have the latest version, as that can result in data
|
||||
// loss. It's not an error when no value is provided; this can happen
|
||||
// when using older client versions, or if the operation doesn't involve
|
||||
// updating an existing cipher.
|
||||
LastKnownRevisionDate: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
|
@ -186,22 +212,35 @@ pub struct Attachments2Data {
|
|||
Key: String,
|
||||
}
|
||||
|
||||
/// Called when an org admin clones an org cipher.
|
||||
#[post("/ciphers/admin", data = "<data>")]
|
||||
fn post_ciphers_admin(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
let data: ShareCipherData = data.into_inner().data;
|
||||
post_ciphers_create(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
/// Called when creating a new org-owned cipher, or cloning a cipher (whether
|
||||
/// user- or org-owned). When cloning a cipher to a user-owned cipher,
|
||||
/// `organizationId` is null.
|
||||
#[post("/ciphers/create", data = "<data>")]
|
||||
fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
let mut data: ShareCipherData = data.into_inner().data;
|
||||
|
||||
let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone());
|
||||
cipher.user_uuid = Some(headers.user.uuid.clone());
|
||||
cipher.save(&conn)?;
|
||||
|
||||
// When cloning a cipher, the Bitwarden clients seem to set this field
|
||||
// based on the cipher being cloned (when creating a new cipher, it's set
|
||||
// to null as expected). However, `cipher.created_at` is initialized to
|
||||
// the current time, so the stale data check will end up failing down the
|
||||
// line. Since this function only creates new ciphers (whether by cloning
|
||||
// or otherwise), we can just ignore this field entirely.
|
||||
data.Cipher.LastKnownRevisionDate = None;
|
||||
|
||||
share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt)
|
||||
}
|
||||
|
||||
#[post("/ciphers/create", data = "<data>")]
|
||||
fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
post_ciphers_admin(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
/// Called when creating a new user-owned cipher.
|
||||
#[post("/ciphers", data = "<data>")]
|
||||
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
let data: CipherData = data.into_inner().data;
|
||||
|
@ -221,6 +260,17 @@ pub fn update_cipher_from_data(
|
|||
nt: &Notify,
|
||||
ut: UpdateType,
|
||||
) -> EmptyResult {
|
||||
// Check that the client isn't updating an existing cipher with stale data.
|
||||
if let Some(dt) = data.LastKnownRevisionDate {
|
||||
match NaiveDateTime::parse_from_str(&dt, "%+") { // ISO 8601 format
|
||||
Err(err) =>
|
||||
warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
||||
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 =>
|
||||
err!("The client copy of this cipher is out of date. Resync the client and try again."),
|
||||
Ok(_) => (),
|
||||
}
|
||||
}
|
||||
|
||||
if cipher.organization_uuid.is_some() && cipher.organization_uuid != data.OrganizationId {
|
||||
err!("Organization mismatch. Please resync the client before updating the cipher")
|
||||
}
|
||||
|
@ -264,7 +314,10 @@ pub fn update_cipher_from_data(
|
|||
};
|
||||
|
||||
if saved_att.cipher_uuid != cipher.uuid {
|
||||
err!("Attachment is not owned by the cipher")
|
||||
// Warn and break here since cloning ciphers provides attachment data but will not be cloned.
|
||||
// If we error out here it will break the whole cloning and causes empty ciphers to appear.
|
||||
warn!("Attachment is not owned by the cipher");
|
||||
break;
|
||||
}
|
||||
|
||||
saved_att.akey = Some(attachment.Key);
|
||||
|
@ -296,7 +349,6 @@ pub fn update_cipher_from_data(
|
|||
type_data["PasswordHistory"] = data.PasswordHistory.clone().unwrap_or(Value::Null);
|
||||
// TODO: ******* Backwards compat end **********
|
||||
|
||||
cipher.favorite = data.Favorite.unwrap_or(false);
|
||||
cipher.name = data.Name;
|
||||
cipher.notes = data.Notes;
|
||||
cipher.fields = data.Fields.map(|f| f.to_string());
|
||||
|
@ -305,6 +357,7 @@ pub fn update_cipher_from_data(
|
|||
|
||||
cipher.save(&conn)?;
|
||||
cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn)?;
|
||||
cipher.set_favorite(data.Favorite, &headers.user.uuid, &conn)?;
|
||||
|
||||
if ut != UpdateType::None {
|
||||
nt.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn));
|
||||
|
@ -367,6 +420,7 @@ fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbC
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Called when an org admin modifies an existing org cipher.
|
||||
#[put("/ciphers/<uuid>/admin", data = "<data>")]
|
||||
fn put_cipher_admin(
|
||||
uuid: String,
|
||||
|
@ -403,6 +457,11 @@ fn put_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn
|
|||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
|
||||
// TODO: Check if only the folder ID or favorite status is being changed.
|
||||
// These are per-user properties that technically aren't part of the
|
||||
// cipher itself, so the user shouldn't need write access to change these.
|
||||
// Interestingly, upstream Bitwarden doesn't properly handle this either.
|
||||
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||
err!("Cipher is not write accessible")
|
||||
}
|
||||
|
@ -536,7 +595,7 @@ struct ShareSelectedCipherData {
|
|||
}
|
||||
|
||||
#[put("/ciphers/share", data = "<data>")]
|
||||
fn put_cipher_share_seleted(
|
||||
fn put_cipher_share_selected(
|
||||
data: JsonUpcase<ShareSelectedCipherData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
|
@ -599,10 +658,13 @@ fn share_cipher_by_uuid(
|
|||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
|
||||
let mut shared_to_collection = false;
|
||||
|
||||
match data.Cipher.OrganizationId.clone() {
|
||||
None => err!("Organization id not provided"),
|
||||
// If we don't get an organization ID, we don't do anything
|
||||
// No error because this is used when using the Clone functionality
|
||||
None => {}
|
||||
Some(organization_uuid) => {
|
||||
let mut shared_to_collection = false;
|
||||
for uuid in &data.CollectionIds {
|
||||
match Collection::find_by_uuid_and_org(uuid, &organization_uuid, &conn) {
|
||||
None => err!("Invalid collection ID provided"),
|
||||
|
@ -616,19 +678,20 @@ fn share_cipher_by_uuid(
|
|||
}
|
||||
}
|
||||
}
|
||||
update_cipher_from_data(
|
||||
&mut cipher,
|
||||
data.Cipher,
|
||||
&headers,
|
||||
shared_to_collection,
|
||||
&conn,
|
||||
&nt,
|
||||
UpdateType::CipherUpdate,
|
||||
)?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
update_cipher_from_data(
|
||||
&mut cipher,
|
||||
data.Cipher,
|
||||
&headers,
|
||||
shared_to_collection,
|
||||
&conn,
|
||||
&nt,
|
||||
UpdateType::CipherUpdate,
|
||||
)?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||
}
|
||||
|
||||
#[post("/ciphers/<uuid>/attachment", format = "multipart/form-data", data = "<data>")]
|
||||
|
@ -642,20 +705,49 @@ fn post_attachment(
|
|||
) -> JsonResult {
|
||||
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
None => err_discard!("Cipher doesn't exist", data),
|
||||
};
|
||||
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||
err!("Cipher is not write accessible")
|
||||
err_discard!("Cipher is not write accessible", data)
|
||||
}
|
||||
|
||||
let mut params = content_type.params();
|
||||
let boundary_pair = params.next().expect("No boundary provided");
|
||||
let boundary = boundary_pair.1;
|
||||
|
||||
let size_limit = if let Some(ref user_uuid) = cipher.user_uuid {
|
||||
match CONFIG.user_attachment_limit() {
|
||||
Some(0) => err_discard!("Attachments are disabled", data),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, &conn);
|
||||
if left <= 0 {
|
||||
err_discard!("Attachment size limit reached! Delete some files to open space", data)
|
||||
}
|
||||
Some(left as u64)
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
} else if let Some(ref org_uuid) = cipher.organization_uuid {
|
||||
match CONFIG.org_attachment_limit() {
|
||||
Some(0) => err_discard!("Attachments are disabled", data),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, &conn);
|
||||
if left <= 0 {
|
||||
err_discard!("Attachment size limit reached! Delete some files to open space", data)
|
||||
}
|
||||
Some(left as u64)
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
} else {
|
||||
err_discard!("Cipher is neither owned by a user nor an organization", data);
|
||||
};
|
||||
|
||||
let base_path = Path::new(&CONFIG.attachments_folder()).join(&cipher.uuid);
|
||||
|
||||
let mut attachment_key = None;
|
||||
let mut error = None;
|
||||
|
||||
Multipart::with_body(data.open(), boundary)
|
||||
.foreach_entry(|mut field| {
|
||||
|
@ -674,18 +766,21 @@ fn post_attachment(
|
|||
let file_name = HEXLOWER.encode(&crypto::get_random(vec![0; 10]));
|
||||
let path = base_path.join(&file_name);
|
||||
|
||||
let size = match field.data.save().memory_threshold(0).size_limit(None).with_path(path) {
|
||||
let size = match field.data.save().memory_threshold(0).size_limit(size_limit).with_path(path.clone()) {
|
||||
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
||||
SaveResult::Full(other) => {
|
||||
error!("Attachment is not a file: {:?}", other);
|
||||
std::fs::remove_file(path).ok();
|
||||
error = Some(format!("Attachment is not a file: {:?}", other));
|
||||
return;
|
||||
}
|
||||
SaveResult::Partial(_, reason) => {
|
||||
error!("Partial result: {:?}", reason);
|
||||
std::fs::remove_file(path).ok();
|
||||
error = Some(format!("Attachment size limit exceeded with this file: {:?}", reason));
|
||||
return;
|
||||
}
|
||||
SaveResult::Error(e) => {
|
||||
error!("Error: {:?}", e);
|
||||
std::fs::remove_file(path).ok();
|
||||
error = Some(format!("Error: {:?}", e));
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
@ -699,6 +794,10 @@ fn post_attachment(
|
|||
})
|
||||
.expect("Error processing multipart data");
|
||||
|
||||
if let Some(ref e) = error {
|
||||
err!(e);
|
||||
}
|
||||
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||
|
@ -716,11 +815,7 @@ fn post_attachment_admin(
|
|||
post_attachment(uuid, data, content_type, headers, conn, nt)
|
||||
}
|
||||
|
||||
#[post(
|
||||
"/ciphers/<uuid>/attachment/<attachment_id>/share",
|
||||
format = "multipart/form-data",
|
||||
data = "<data>"
|
||||
)]
|
||||
#[post("/ciphers/<uuid>/attachment/<attachment_id>/share", format = "multipart/form-data", data = "<data>")]
|
||||
fn post_attachment_share(
|
||||
uuid: String,
|
||||
attachment_id: String,
|
||||
|
@ -774,50 +869,79 @@ fn delete_attachment_admin(
|
|||
|
||||
#[post("/ciphers/<uuid>/delete")]
|
||||
fn delete_cipher_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
|
||||
}
|
||||
|
||||
#[post("/ciphers/<uuid>/delete-admin")]
|
||||
fn delete_cipher_post_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/<uuid>/delete")]
|
||||
fn delete_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, true, &nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/<uuid>/delete-admin")]
|
||||
fn delete_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, true, &nt)
|
||||
}
|
||||
|
||||
#[delete("/ciphers/<uuid>")]
|
||||
fn delete_cipher(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
|
||||
}
|
||||
|
||||
#[delete("/ciphers/<uuid>/admin")]
|
||||
fn delete_cipher_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||
_delete_cipher_by_uuid(&uuid, &headers, &conn, false, &nt)
|
||||
}
|
||||
|
||||
#[delete("/ciphers", data = "<data>")]
|
||||
fn delete_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
let data: Value = data.into_inner().data;
|
||||
|
||||
let uuids = match data.get("Ids") {
|
||||
Some(ids) => match ids.as_array() {
|
||||
Some(ids) => ids.iter().filter_map(Value::as_str),
|
||||
None => err!("Posted ids field is not an array"),
|
||||
},
|
||||
None => err!("Request missing ids field"),
|
||||
};
|
||||
|
||||
for uuid in uuids {
|
||||
if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn, &nt) {
|
||||
return error;
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
_delete_multiple_ciphers(data, headers, conn, false, nt)
|
||||
}
|
||||
|
||||
#[post("/ciphers/delete", data = "<data>")]
|
||||
fn delete_cipher_selected_post(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_delete_multiple_ciphers(data, headers, conn, false, nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/delete", data = "<data>")]
|
||||
fn delete_cipher_selected_put(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_delete_multiple_ciphers(data, headers, conn, true, nt) // soft delete
|
||||
}
|
||||
|
||||
#[delete("/ciphers/admin", data = "<data>")]
|
||||
fn delete_cipher_selected_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
delete_cipher_selected(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
#[post("/ciphers/delete-admin", data = "<data>")]
|
||||
fn delete_cipher_selected_post_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
delete_cipher_selected_post(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/delete-admin", data = "<data>")]
|
||||
fn delete_cipher_selected_put_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
delete_cipher_selected_put(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/<uuid>/restore")]
|
||||
fn restore_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/<uuid>/restore-admin")]
|
||||
fn restore_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||
}
|
||||
|
||||
#[put("/ciphers/restore", data = "<data>")]
|
||||
fn restore_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
_restore_multiple_ciphers(data, headers, conn, nt)
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct MoveCipherData {
|
||||
|
@ -929,8 +1053,8 @@ fn delete_all(
|
|||
}
|
||||
}
|
||||
|
||||
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> EmptyResult {
|
||||
let cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, soft_delete: bool, nt: &Notify) -> EmptyResult {
|
||||
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
|
@ -939,8 +1063,72 @@ fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Not
|
|||
err!("Cipher can't be deleted by user")
|
||||
}
|
||||
|
||||
cipher.delete(&conn)?;
|
||||
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(&conn));
|
||||
if soft_delete {
|
||||
cipher.deleted_at = Some(Utc::now().naive_utc());
|
||||
cipher.save(&conn)?;
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||
} else {
|
||||
cipher.delete(&conn)?;
|
||||
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(&conn));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn _delete_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, soft_delete: bool, nt: Notify) -> EmptyResult {
|
||||
let data: Value = data.into_inner().data;
|
||||
|
||||
let uuids = match data.get("Ids") {
|
||||
Some(ids) => match ids.as_array() {
|
||||
Some(ids) => ids.iter().filter_map(Value::as_str),
|
||||
None => err!("Posted ids field is not an array"),
|
||||
},
|
||||
None => err!("Request missing ids field"),
|
||||
};
|
||||
|
||||
for uuid in uuids {
|
||||
if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &conn, soft_delete, &nt) {
|
||||
return error;
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> EmptyResult {
|
||||
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
|
||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||
err!("Cipher can't be restored by user")
|
||||
}
|
||||
|
||||
cipher.deleted_at = None;
|
||||
cipher.save(&conn)?;
|
||||
|
||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn _restore_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
let data: Value = data.into_inner().data;
|
||||
|
||||
let uuids = match data.get("Ids") {
|
||||
Some(ids) => match ids.as_array() {
|
||||
Some(ids) => ids.iter().filter_map(Value::as_str),
|
||||
None => err!("Posted ids field is not an array"),
|
||||
},
|
||||
None => err!("Request missing ids field"),
|
||||
};
|
||||
|
||||
for uuid in uuids {
|
||||
if let error @ Err(_) = _restore_cipher_by_uuid(uuid, &headers, &conn, &nt) {
|
||||
return error;
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
@ -1,15 +1,13 @@
|
|||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::db::models::*;
|
||||
use crate::db::DbConn;
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
||||
auth::Headers,
|
||||
db::{models::*, DbConn},
|
||||
};
|
||||
|
||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType};
|
||||
use crate::auth::Headers;
|
||||
|
||||
use rocket::Route;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![
|
||||
get_folders,
|
||||
get_folder,
|
||||
|
@ -50,7 +48,6 @@ fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
|||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
|
||||
pub struct FolderData {
|
||||
pub Name: String,
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ mod accounts;
|
|||
mod ciphers;
|
||||
mod folders;
|
||||
mod organizations;
|
||||
pub(crate) mod two_factor;
|
||||
pub mod two_factor;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut mod_routes = routes![
|
||||
|
@ -29,14 +29,15 @@ pub fn routes() -> Vec<Route> {
|
|||
// Move this somewhere else
|
||||
//
|
||||
use rocket::Route;
|
||||
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase};
|
||||
use crate::auth::Headers;
|
||||
use crate::db::DbConn;
|
||||
use crate::error::Error;
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase},
|
||||
auth::Headers,
|
||||
db::DbConn,
|
||||
error::Error,
|
||||
};
|
||||
|
||||
#[put("/devices/identifier/<uuid>/clear-token")]
|
||||
fn clear_device_token(uuid: String) -> EmptyResult {
|
||||
|
@ -146,10 +147,10 @@ fn hibp_breach(username: String) -> JsonResult {
|
|||
username
|
||||
);
|
||||
|
||||
use reqwest::{header::USER_AGENT, Client};
|
||||
use reqwest::{blocking::Client, header::USER_AGENT};
|
||||
|
||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||
let hibp_client = Client::builder().use_sys_proxy().build()?;
|
||||
let hibp_client = Client::builder().build()?;
|
||||
|
||||
let res = hibp_client
|
||||
.get(&url)
|
||||
|
@ -172,7 +173,7 @@ fn hibp_breach(username: String) -> JsonResult {
|
|||
"BreachDate": "2019-08-18T00:00:00Z",
|
||||
"AddedDate": "2019-08-18T00:00:00Z",
|
||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noopener\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noopener\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
||||
"LogoPath": "/bwrs_static/hibp.png",
|
||||
"LogoPath": "bwrs_static/hibp.png",
|
||||
"PwnCount": 0,
|
||||
"DataClasses": [
|
||||
"Error - No API key set!"
|
||||
|
|
|
@ -1,16 +1,14 @@
|
|||
use rocket::request::Form;
|
||||
use rocket::Route;
|
||||
use num_traits::FromPrimitive;
|
||||
use rocket::{request::Form, Route};
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::api::{
|
||||
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType,
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType},
|
||||
auth::{decode_invite, AdminHeaders, Headers, OwnerHeaders, ManagerHeaders, ManagerHeadersLoose},
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
};
|
||||
use crate::auth::{decode_invite, AdminHeaders, Headers, OwnerHeaders};
|
||||
use crate::db::models::*;
|
||||
use crate::db::DbConn;
|
||||
use crate::mail;
|
||||
use crate::CONFIG;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![
|
||||
|
@ -45,6 +43,11 @@ pub fn routes() -> Vec<Route> {
|
|||
delete_user,
|
||||
post_delete_user,
|
||||
post_org_import,
|
||||
list_policies,
|
||||
list_policies_token,
|
||||
get_policy,
|
||||
put_policy,
|
||||
get_plans,
|
||||
]
|
||||
}
|
||||
|
||||
|
@ -74,6 +77,10 @@ struct NewCollectionData {
|
|||
|
||||
#[post("/organizations", data = "<data>")]
|
||||
fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn) -> JsonResult {
|
||||
if !CONFIG.is_org_creation_allowed(&headers.user.email) {
|
||||
err!("User not allowed to create organizations")
|
||||
}
|
||||
|
||||
let data: OrgData = data.into_inner().data;
|
||||
|
||||
let org = Organization::new(data.Name, data.BillingEmail);
|
||||
|
@ -210,7 +217,7 @@ fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) ->
|
|||
#[post("/organizations/<org_id>/collections", data = "<data>")]
|
||||
fn post_organization_collections(
|
||||
org_id: String,
|
||||
_headers: AdminHeaders,
|
||||
headers: ManagerHeadersLoose,
|
||||
data: JsonUpcase<NewCollectionData>,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
|
@ -221,9 +228,22 @@ fn post_organization_collections(
|
|||
None => err!("Can't find organization details"),
|
||||
};
|
||||
|
||||
// Get the user_organization record so that we can check if the user has access to all collections.
|
||||
let user_org = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
||||
Some(u) => u,
|
||||
None => err!("User is not part of organization"),
|
||||
};
|
||||
|
||||
let collection = Collection::new(org.uuid, data.Name);
|
||||
collection.save(&conn)?;
|
||||
|
||||
// If the user doesn't have access to all collections, only in case of a Manger,
|
||||
// then we need to save the creating user uuid (Manager) to the users_collection table.
|
||||
// Else the user will not have access to his own created collection.
|
||||
if !user_org.access_all {
|
||||
CollectionUser::save(&headers.user.uuid, &collection.uuid, false, false, &conn)?;
|
||||
}
|
||||
|
||||
Ok(Json(collection.to_json()))
|
||||
}
|
||||
|
||||
|
@ -231,7 +251,7 @@ fn post_organization_collections(
|
|||
fn put_organization_collection_update(
|
||||
org_id: String,
|
||||
col_id: String,
|
||||
headers: AdminHeaders,
|
||||
headers: ManagerHeaders,
|
||||
data: JsonUpcase<NewCollectionData>,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
|
@ -242,7 +262,7 @@ fn put_organization_collection_update(
|
|||
fn post_organization_collection_update(
|
||||
org_id: String,
|
||||
col_id: String,
|
||||
_headers: AdminHeaders,
|
||||
_headers: ManagerHeaders,
|
||||
data: JsonUpcase<NewCollectionData>,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
|
@ -310,7 +330,7 @@ fn post_organization_collection_delete_user(
|
|||
}
|
||||
|
||||
#[delete("/organizations/<org_id>/collections/<col_id>")]
|
||||
fn delete_organization_collection(org_id: String, col_id: String, _headers: AdminHeaders, conn: DbConn) -> EmptyResult {
|
||||
fn delete_organization_collection(org_id: String, col_id: String, _headers: ManagerHeaders, conn: DbConn) -> EmptyResult {
|
||||
match Collection::find_by_uuid(&col_id, &conn) {
|
||||
None => err!("Collection not found"),
|
||||
Some(collection) => {
|
||||
|
@ -334,7 +354,7 @@ struct DeleteCollectionData {
|
|||
fn post_organization_collection_delete(
|
||||
org_id: String,
|
||||
col_id: String,
|
||||
headers: AdminHeaders,
|
||||
headers: ManagerHeaders,
|
||||
_data: JsonUpcase<DeleteCollectionData>,
|
||||
conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
|
@ -342,7 +362,7 @@ fn post_organization_collection_delete(
|
|||
}
|
||||
|
||||
#[get("/organizations/<org_id>/collections/<coll_id>/details")]
|
||||
fn get_org_collection_detail(org_id: String, coll_id: String, headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
fn get_org_collection_detail(org_id: String, coll_id: String, headers: ManagerHeaders, conn: DbConn) -> JsonResult {
|
||||
match Collection::find_by_uuid_and_user(&coll_id, &headers.user.uuid, &conn) {
|
||||
None => err!("Collection not found"),
|
||||
Some(collection) => {
|
||||
|
@ -356,7 +376,7 @@ fn get_org_collection_detail(org_id: String, coll_id: String, headers: AdminHead
|
|||
}
|
||||
|
||||
#[get("/organizations/<org_id>/collections/<coll_id>/users")]
|
||||
fn get_collection_users(org_id: String, coll_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
fn get_collection_users(org_id: String, coll_id: String, _headers: ManagerHeaders, conn: DbConn) -> JsonResult {
|
||||
// Get org and collection, check that collection is from org
|
||||
let collection = match Collection::find_by_uuid_and_org(&coll_id, &org_id, &conn) {
|
||||
None => err!("Collection not found in Organization"),
|
||||
|
@ -369,7 +389,7 @@ fn get_collection_users(org_id: String, coll_id: String, _headers: AdminHeaders,
|
|||
.map(|col_user| {
|
||||
UserOrganization::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn)
|
||||
.unwrap()
|
||||
.to_json_collection_user_details(col_user.read_only)
|
||||
.to_json_user_access_restrictions(&col_user)
|
||||
})
|
||||
.collect();
|
||||
|
||||
|
@ -381,7 +401,7 @@ fn put_collection_users(
|
|||
org_id: String,
|
||||
coll_id: String,
|
||||
data: JsonUpcaseVec<CollectionData>,
|
||||
_headers: AdminHeaders,
|
||||
_headers: ManagerHeaders,
|
||||
conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
// Get org and collection, check that collection is from org
|
||||
|
@ -403,7 +423,9 @@ fn put_collection_users(
|
|||
continue;
|
||||
}
|
||||
|
||||
CollectionUser::save(&user.user_uuid, &coll_id, d.ReadOnly, &conn)?;
|
||||
CollectionUser::save(&user.user_uuid, &coll_id,
|
||||
d.ReadOnly, d.HidePasswords,
|
||||
&conn)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -431,7 +453,7 @@ fn get_org_details(data: Form<OrgIdData>, headers: Headers, conn: DbConn) -> Jso
|
|||
}
|
||||
|
||||
#[get("/organizations/<org_id>/users")]
|
||||
fn get_org_users(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
fn get_org_users(org_id: String, _headers: ManagerHeadersLoose, conn: DbConn) -> JsonResult {
|
||||
let users = UserOrganization::find_by_org(&org_id, &conn);
|
||||
let users_json: Vec<Value> = users.iter().map(|c| c.to_json_user_details(&conn)).collect();
|
||||
|
||||
|
@ -447,6 +469,7 @@ fn get_org_users(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonRe
|
|||
struct CollectionData {
|
||||
Id: String,
|
||||
ReadOnly: bool,
|
||||
HidePasswords: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
|
@ -480,7 +503,11 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
|||
let user = match User::find_by_mail(&email, &conn) {
|
||||
None => {
|
||||
if !CONFIG.invitations_allowed() {
|
||||
err!(format!("User email does not exist: {}", email))
|
||||
err!(format!("User does not exist: {}", email))
|
||||
}
|
||||
|
||||
if !CONFIG.is_email_domain_allowed(&email) {
|
||||
err!("Email domain not eligible for invitations")
|
||||
}
|
||||
|
||||
if !CONFIG.mail_enabled() {
|
||||
|
@ -514,7 +541,9 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
|
|||
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
||||
None => err!("Collection not found in Organization"),
|
||||
Some(collection) => {
|
||||
CollectionUser::save(&user.uuid, &collection.uuid, col.ReadOnly, &conn)?;
|
||||
CollectionUser::save(&user.uuid, &collection.uuid,
|
||||
col.ReadOnly, col.HidePasswords,
|
||||
&conn)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -769,7 +798,9 @@ fn edit_user(
|
|||
match Collection::find_by_uuid_and_org(&col.Id, &org_id, &conn) {
|
||||
None => err!("Collection not found in Organization"),
|
||||
Some(collection) => {
|
||||
CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid, col.ReadOnly, &conn)?;
|
||||
CollectionUser::save(&user_to_edit.user_uuid, &collection.uuid,
|
||||
col.ReadOnly, col.HidePasswords,
|
||||
&conn)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -830,22 +861,13 @@ struct RelationsData {
|
|||
fn post_org_import(
|
||||
query: Form<OrgIdData>,
|
||||
data: JsonUpcase<ImportData>,
|
||||
headers: Headers,
|
||||
headers: AdminHeaders,
|
||||
conn: DbConn,
|
||||
nt: Notify,
|
||||
) -> EmptyResult {
|
||||
let data: ImportData = data.into_inner().data;
|
||||
let org_id = query.into_inner().organization_id;
|
||||
|
||||
let org_user = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
||||
Some(user) => user,
|
||||
None => err!("User is not part of the organization"),
|
||||
};
|
||||
|
||||
if org_user.atype < UserOrgType::Admin {
|
||||
err!("Only admins or owners can import into an organization")
|
||||
}
|
||||
|
||||
// Read and create the collections
|
||||
let collections: Vec<_> = data
|
||||
.Collections
|
||||
|
@ -866,6 +888,8 @@ fn post_org_import(
|
|||
relations.push((relation.Key, relation.Value));
|
||||
}
|
||||
|
||||
let headers: Headers = headers.into();
|
||||
|
||||
// Read and create the ciphers
|
||||
let ciphers: Vec<_> = data
|
||||
.Ciphers
|
||||
|
@ -901,3 +925,135 @@ fn post_org_import(
|
|||
let mut user = headers.user;
|
||||
user.update_revision(&conn)
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/policies")]
|
||||
fn list_policies(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
let policies = OrgPolicy::find_by_org(&org_id, &conn);
|
||||
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": policies_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/policies/token?<token>")]
|
||||
fn list_policies_token(org_id: String, token: String, conn: DbConn) -> JsonResult {
|
||||
let invite = crate::auth::decode_invite(&token)?;
|
||||
|
||||
let invite_org_id = match invite.org_id {
|
||||
Some(invite_org_id) => invite_org_id,
|
||||
None => err!("Invalid token"),
|
||||
};
|
||||
|
||||
if invite_org_id != org_id {
|
||||
err!("Token doesn't match request organization");
|
||||
}
|
||||
|
||||
// TODO: We receive the invite token as ?token=<>, validate it contains the org id
|
||||
let policies = OrgPolicy::find_by_org(&org_id, &conn);
|
||||
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": policies_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": null
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/policies/<pol_type>")]
|
||||
fn get_policy(org_id: String, pol_type: i32, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
|
||||
Some(pt) => pt,
|
||||
None => err!("Invalid policy type"),
|
||||
};
|
||||
|
||||
let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
|
||||
Some(p) => p,
|
||||
None => OrgPolicy::new(org_id, pol_type_enum, "{}".to_string()),
|
||||
};
|
||||
|
||||
Ok(Json(policy.to_json()))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct PolicyData {
|
||||
enabled: bool,
|
||||
#[serde(rename = "type")]
|
||||
_type: i32,
|
||||
data: Value,
|
||||
}
|
||||
|
||||
#[put("/organizations/<org_id>/policies/<pol_type>", data = "<data>")]
|
||||
fn put_policy(org_id: String, pol_type: i32, data: Json<PolicyData>, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||
let data: PolicyData = data.into_inner();
|
||||
|
||||
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
|
||||
Some(pt) => pt,
|
||||
None => err!("Invalid policy type"),
|
||||
};
|
||||
|
||||
let mut policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
|
||||
Some(p) => p,
|
||||
None => OrgPolicy::new(org_id, pol_type_enum, "{}".to_string()),
|
||||
};
|
||||
|
||||
policy.enabled = data.enabled;
|
||||
policy.data = serde_json::to_string(&data.data)?;
|
||||
policy.save(&conn)?;
|
||||
|
||||
Ok(Json(policy.to_json()))
|
||||
}
|
||||
|
||||
#[get("/plans")]
|
||||
fn get_plans(_headers: Headers, _conn: DbConn) -> JsonResult {
|
||||
Ok(Json(json!({
|
||||
"Object": "list",
|
||||
"Data": [
|
||||
{
|
||||
"Object": "plan",
|
||||
"Type": 0,
|
||||
"Product": 0,
|
||||
"Name": "Free",
|
||||
"IsAnnual": false,
|
||||
"NameLocalizationKey": "planNameFree",
|
||||
"DescriptionLocalizationKey": "planDescFree",
|
||||
"CanBeUsedByBusiness": false,
|
||||
"BaseSeats": 2,
|
||||
"BaseStorageGb": null,
|
||||
"MaxCollections": 2,
|
||||
"MaxUsers": 2,
|
||||
"HasAdditionalSeatsOption": false,
|
||||
"MaxAdditionalSeats": null,
|
||||
"HasAdditionalStorageOption": false,
|
||||
"MaxAdditionalStorage": null,
|
||||
"HasPremiumAccessOption": false,
|
||||
"TrialPeriodDays": null,
|
||||
"HasSelfHost": false,
|
||||
"HasPolicies": false,
|
||||
"HasGroups": false,
|
||||
"HasDirectory": false,
|
||||
"HasEvents": false,
|
||||
"HasTotp": false,
|
||||
"Has2fa": false,
|
||||
"HasApi": false,
|
||||
"HasSso": false,
|
||||
"UsersGetPremium": false,
|
||||
"UpgradeSortOrder": -1,
|
||||
"DisplaySortOrder": -1,
|
||||
"LegacyYear": null,
|
||||
"Disabled": false,
|
||||
"StripePlanId": null,
|
||||
"StripeSeatPlanId": null,
|
||||
"StripeStoragePlanId": null,
|
||||
"StripePremiumAccessPlanId": null,
|
||||
"BasePrice": 0.0,
|
||||
"SeatPrice": 0.0,
|
||||
"AdditionalStoragePricePerGb": 0.0,
|
||||
"PremiumAccessOptionPrice": 0.0
|
||||
}
|
||||
],
|
||||
"ContinuationToken": null
|
||||
})))
|
||||
}
|
|
@ -2,13 +2,16 @@ use data_encoding::BASE32;
|
|||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::api::core::two_factor::_generate_recover_code;
|
||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData};
|
||||
use crate::auth::Headers;
|
||||
use crate::crypto;
|
||||
use crate::db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
use crate::{
|
||||
api::{
|
||||
core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
};
|
||||
|
||||
pub use crate::config::CONFIG;
|
||||
|
@ -20,6 +23,7 @@ pub fn routes() -> Vec<Route> {
|
|||
activate_authenticator_put,
|
||||
]
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||
fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
|
@ -53,7 +57,12 @@ struct EnableAuthenticatorData {
|
|||
}
|
||||
|
||||
#[post("/two-factor/authenticator", data = "<data>")]
|
||||
fn activate_authenticator(data: JsonUpcase<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
fn activate_authenticator(
|
||||
data: JsonUpcase<EnableAuthenticatorData>,
|
||||
headers: Headers,
|
||||
ip: ClientIp,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let data: EnableAuthenticatorData = data.into_inner().data;
|
||||
let password_hash = data.MasterPasswordHash;
|
||||
let key = data.Key;
|
||||
|
@ -76,7 +85,7 @@ fn activate_authenticator(data: JsonUpcase<EnableAuthenticatorData>, headers: He
|
|||
}
|
||||
|
||||
// Validate the token provided with the key, and save new twofactor
|
||||
validate_totp_code(&user.uuid, token, &key.to_uppercase(), &conn)?;
|
||||
validate_totp_code(&user.uuid, token, &key.to_uppercase(), &ip, &conn)?;
|
||||
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
|
||||
|
@ -88,20 +97,31 @@ fn activate_authenticator(data: JsonUpcase<EnableAuthenticatorData>, headers: He
|
|||
}
|
||||
|
||||
#[put("/two-factor/authenticator", data = "<data>")]
|
||||
fn activate_authenticator_put(data: JsonUpcase<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_authenticator(data, headers, conn)
|
||||
fn activate_authenticator_put(
|
||||
data: JsonUpcase<EnableAuthenticatorData>,
|
||||
headers: Headers,
|
||||
ip: ClientIp,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
activate_authenticator(data, headers, ip, conn)
|
||||
}
|
||||
|
||||
pub fn validate_totp_code_str(user_uuid: &str, totp_code: &str, secret: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub fn validate_totp_code_str(
|
||||
user_uuid: &str,
|
||||
totp_code: &str,
|
||||
secret: &str,
|
||||
ip: &ClientIp,
|
||||
conn: &DbConn,
|
||||
) -> EmptyResult {
|
||||
let totp_code: u64 = match totp_code.parse() {
|
||||
Ok(code) => code,
|
||||
_ => err!("TOTP code is not a number"),
|
||||
};
|
||||
|
||||
validate_totp_code(user_uuid, totp_code, secret, &conn)
|
||||
validate_totp_code(user_uuid, totp_code, secret, ip, &conn)
|
||||
}
|
||||
|
||||
pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, ip: &ClientIp, conn: &DbConn) -> EmptyResult {
|
||||
use oath::{totp_raw_custom_time, HashType};
|
||||
|
||||
let decoded_secret = match BASE32.decode(secret.as_bytes()) {
|
||||
|
@ -143,11 +163,22 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: u64, secret: &str, conn: &
|
|||
twofactor.save(&conn)?;
|
||||
return Ok(());
|
||||
} else if generated == totp_code && time_step <= twofactor.last_used as i64 {
|
||||
warn!("This or a TOTP code within {} steps back and forward has already been used!", steps);
|
||||
err!(format!("Invalid TOTP code! Server time: {}", current_time.format("%F %T UTC")));
|
||||
warn!(
|
||||
"This or a TOTP code within {} steps back and forward has already been used!",
|
||||
steps
|
||||
);
|
||||
err!(format!(
|
||||
"Invalid TOTP code! Server time: {} IP: {}",
|
||||
current_time.format("%F %T UTC"),
|
||||
ip.ip
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Else no valide code received, deny access
|
||||
err!(format!("Invalid TOTP code! Server time: {}", current_time.format("%F %T UTC")));
|
||||
err!(format!(
|
||||
"Invalid TOTP code! Server time: {} IP: {}",
|
||||
current_time.format("%F %T UTC"),
|
||||
ip.ip
|
||||
));
|
||||
}
|
||||
|
|
|
@ -2,18 +2,18 @@ use chrono::Utc;
|
|||
use data_encoding::BASE64;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json;
|
||||
|
||||
use crate::api::core::two_factor::_generate_recover_code;
|
||||
use crate::api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData};
|
||||
use crate::auth::Headers;
|
||||
use crate::crypto;
|
||||
use crate::db::{
|
||||
models::{TwoFactor, TwoFactorType, User},
|
||||
DbConn,
|
||||
use crate::{
|
||||
api::{core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType, User},
|
||||
DbConn,
|
||||
},
|
||||
error::MapResult,
|
||||
CONFIG,
|
||||
};
|
||||
use crate::error::MapResult;
|
||||
use crate::CONFIG;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![get_duo, activate_duo, activate_duo_put,]
|
||||
|
@ -21,9 +21,9 @@ pub fn routes() -> Vec<Route> {
|
|||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct DuoData {
|
||||
host: String,
|
||||
ik: String,
|
||||
sk: String,
|
||||
host: String, // Duo API hostname
|
||||
ik: String, // integration key
|
||||
sk: String, // secret key
|
||||
}
|
||||
|
||||
impl DuoData {
|
||||
|
@ -187,9 +187,10 @@ fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbC
|
|||
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
||||
const AGENT: &str = "bitwarden_rs:Duo/1.0 (Rust)";
|
||||
|
||||
use reqwest::{header::*, Client, Method};
|
||||
use reqwest::{blocking::Client, header::*, Method};
|
||||
use std::str::FromStr;
|
||||
|
||||
// https://duo.com/docs/authapi#api-details
|
||||
let url = format!("https://{}{}", &data.host, path);
|
||||
let date = Utc::now().to_rfc2822();
|
||||
let username = &data.ik;
|
||||
|
@ -268,6 +269,10 @@ fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64
|
|||
}
|
||||
|
||||
pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||
// email is as entered by the user, so it needs to be normalized before
|
||||
// comparison with auth_user below.
|
||||
let email = &email.to_lowercase();
|
||||
|
||||
let split: Vec<&str> = response.split(':').collect();
|
||||
if split.len() != 2 {
|
||||
err!("Invalid response length");
|
||||
|
|
|
@ -1,21 +1,18 @@
|
|||
use chrono::{Duration, NaiveDateTime, Utc};
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json;
|
||||
|
||||
use crate::api::core::two_factor::_generate_recover_code;
|
||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, PasswordData};
|
||||
use crate::auth::Headers;
|
||||
use crate::crypto;
|
||||
use crate::db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
use crate::{
|
||||
api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
error::{Error, MapResult},
|
||||
mail, CONFIG,
|
||||
};
|
||||
use crate::error::Error;
|
||||
use crate::mail;
|
||||
use crate::CONFIG;
|
||||
|
||||
use chrono::{Duration, NaiveDateTime, Utc};
|
||||
use std::ops::Add;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![get_email, send_email_login, send_email, email,]
|
||||
|
@ -59,7 +56,7 @@ fn send_email_login(data: JsonUpcase<SendEmailLoginData>, conn: DbConn) -> Empty
|
|||
/// Generate the token, save the data for later verification and send email to user
|
||||
pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
let type_ = TwoFactorType::Email as i32;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, &conn)?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, &conn).map_res("Two factor not found")?;
|
||||
|
||||
let generated_token = crypto::generate_token(CONFIG.email_token_size())?;
|
||||
|
||||
|
@ -68,7 +65,7 @@ pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
|||
twofactor.data = twofactor_data.to_json();
|
||||
twofactor.save(&conn)?;
|
||||
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token?)?;
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -135,7 +132,7 @@ fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -
|
|||
);
|
||||
twofactor.save(&conn)?;
|
||||
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token?)?;
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -159,7 +156,7 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
|
|||
}
|
||||
|
||||
let type_ = TwoFactorType::EmailVerificationChallenge as i32;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn)?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).map_res("Two factor not found")?;
|
||||
|
||||
let mut email_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||
|
||||
|
@ -189,7 +186,7 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
|
|||
/// Validate the email code when used as TwoFactor token mechanism
|
||||
pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult {
|
||||
let mut email_data = EmailTokenData::from_json(&data)?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Email as i32, &conn)?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(&user_uuid, TwoFactorType::Email as i32, &conn).map_res("Two factor not found")?;
|
||||
let issued_token = match &email_data.last_token {
|
||||
Some(t) => t,
|
||||
_ => err!("No token available"),
|
||||
|
@ -212,7 +209,7 @@ pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &
|
|||
|
||||
let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0);
|
||||
let max_time = CONFIG.email_expiration_time() as i64;
|
||||
if date.add(Duration::seconds(max_time)) < Utc::now().naive_utc() {
|
||||
if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
|
||||
err!("Token has expired")
|
||||
}
|
||||
|
||||
|
@ -271,10 +268,10 @@ impl EmailTokenData {
|
|||
|
||||
/// Takes an email address and obscures it by replacing it with asterisks except two characters.
|
||||
pub fn obscure_email(email: &str) -> String {
|
||||
let split: Vec<&str> = email.split('@').collect();
|
||||
let split: Vec<&str> = email.rsplitn(2, '@').collect();
|
||||
|
||||
let mut name = split[0].to_string();
|
||||
let domain = &split[1];
|
||||
let mut name = split[1].to_string();
|
||||
let domain = &split[0];
|
||||
|
||||
let name_size = name.chars().count();
|
||||
|
||||
|
|
|
@ -1,22 +1,23 @@
|
|||
use data_encoding::BASE32;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::api::{JsonResult, JsonUpcase, NumberOrString, PasswordData};
|
||||
use crate::auth::Headers;
|
||||
use crate::crypto;
|
||||
use crate::db::{
|
||||
models::{TwoFactor, User},
|
||||
DbConn,
|
||||
use crate::{
|
||||
api::{JsonResult, JsonUpcase, NumberOrString, PasswordData},
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, User},
|
||||
DbConn,
|
||||
},
|
||||
};
|
||||
|
||||
pub(crate) mod authenticator;
|
||||
pub(crate) mod duo;
|
||||
pub(crate) mod email;
|
||||
pub(crate) mod u2f;
|
||||
pub(crate) mod yubikey;
|
||||
pub mod authenticator;
|
||||
pub mod duo;
|
||||
pub mod email;
|
||||
pub mod u2f;
|
||||
pub mod yubikey;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut routes = routes![
|
||||
|
@ -39,7 +40,7 @@ pub fn routes() -> Vec<Route> {
|
|||
#[get("/two-factor")]
|
||||
fn get_twofactor(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn);
|
||||
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_list).collect();
|
||||
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": twofactors_json,
|
||||
|
|
|
@ -1,27 +1,31 @@
|
|||
use once_cell::sync::Lazy;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json;
|
||||
use serde_json::Value;
|
||||
use u2f::messages::{RegisterResponse, SignResponse, U2fSignRequest};
|
||||
use u2f::protocol::{Challenge, U2f};
|
||||
use u2f::register::Registration;
|
||||
|
||||
use crate::api::core::two_factor::_generate_recover_code;
|
||||
use crate::api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData};
|
||||
use crate::auth::Headers;
|
||||
use crate::db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
use u2f::{
|
||||
messages::{RegisterResponse, SignResponse, U2fSignRequest},
|
||||
protocol::{Challenge, U2f},
|
||||
register::Registration,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, NumberOrString,
|
||||
PasswordData,
|
||||
},
|
||||
auth::Headers,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
error::Error,
|
||||
CONFIG,
|
||||
};
|
||||
use crate::error::Error;
|
||||
use crate::CONFIG;
|
||||
|
||||
const U2F_VERSION: &str = "U2F_V2";
|
||||
|
||||
lazy_static! {
|
||||
static ref APP_ID: String = format!("{}/app-id.json", &CONFIG.domain());
|
||||
static ref U2F: U2f = U2f::new(APP_ID.clone());
|
||||
}
|
||||
static APP_ID: Lazy<String> = Lazy::new(|| format!("{}/app-id.json", &CONFIG.domain()));
|
||||
static U2F: Lazy<U2f> = Lazy::new(|| U2f::new(APP_ID.clone()));
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![
|
||||
|
@ -92,6 +96,7 @@ struct RegistrationDef {
|
|||
key_handle: Vec<u8>,
|
||||
pub_key: Vec<u8>,
|
||||
attestation_cert: Option<Vec<u8>>,
|
||||
device_name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
|
|
|
@ -1,19 +1,18 @@
|
|||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json;
|
||||
use serde_json::Value;
|
||||
use yubico::config::Config;
|
||||
use yubico::verify;
|
||||
use yubico::{config::Config, verify};
|
||||
|
||||
use crate::api::core::two_factor::_generate_recover_code;
|
||||
use crate::api::{EmptyResult, JsonResult, JsonUpcase, PasswordData};
|
||||
use crate::auth::Headers;
|
||||
use crate::db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
use crate::{
|
||||
api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData},
|
||||
auth::Headers,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
error::{Error, MapResult},
|
||||
CONFIG,
|
||||
};
|
||||
use crate::error::{Error, MapResult};
|
||||
use crate::CONFIG;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![generate_yubikey, activate_yubikey, activate_yubikey_put,]
|
||||
|
|
391
src/api/icons.rs
391
src/api/icons.rs
|
@ -1,50 +1,85 @@
|
|||
use std::fs::{create_dir_all, remove_file, symlink_metadata, File};
|
||||
use std::io::prelude::*;
|
||||
use std::net::ToSocketAddrs;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use rocket::http::ContentType;
|
||||
use rocket::response::Content;
|
||||
use rocket::Route;
|
||||
|
||||
use reqwest::{header::HeaderMap, Client, Response, Url};
|
||||
|
||||
use rocket::http::Cookie;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
||||
io::prelude::*,
|
||||
net::{IpAddr, ToSocketAddrs},
|
||||
sync::RwLock,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use reqwest::{blocking::Client, blocking::Response, header, Url};
|
||||
use rocket::{http::ContentType, http::Cookie, response::Content, Route};
|
||||
use soup::prelude::*;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::CONFIG;
|
||||
use crate::{error::Error, util::Cached, CONFIG};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![icon]
|
||||
}
|
||||
|
||||
const FALLBACK_ICON: &[u8; 344] = include_bytes!("../static/fallback-icon.png");
|
||||
|
||||
const ALLOWED_CHARS: &str = "_-.";
|
||||
|
||||
lazy_static! {
|
||||
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||
// Generate the default headers
|
||||
let mut default_headers = header::HeaderMap::new();
|
||||
default_headers.insert(header::USER_AGENT, header::HeaderValue::from_static("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Safari/605.1.15"));
|
||||
default_headers.insert(header::ACCEPT_LANGUAGE, header::HeaderValue::from_static("en-US,en;q=0.8"));
|
||||
default_headers.insert(header::CACHE_CONTROL, header::HeaderValue::from_static("no-cache"));
|
||||
default_headers.insert(header::PRAGMA, header::HeaderValue::from_static("no-cache"));
|
||||
default_headers.insert(header::ACCEPT, header::HeaderValue::from_static("text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,image/apng,*/*;q=0.8"));
|
||||
|
||||
// Reuse the client between requests
|
||||
static ref CLIENT: Client = Client::builder()
|
||||
.use_sys_proxy()
|
||||
.gzip(true)
|
||||
Client::builder()
|
||||
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
||||
.default_headers(_header_map())
|
||||
.default_headers(default_headers)
|
||||
.build()
|
||||
.unwrap();
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
// Build Regex only once since this takes a lot of time.
|
||||
static ICON_REL_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)icon$|apple.*icon").unwrap());
|
||||
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
||||
|
||||
// Special HashMap which holds the user defined Regex to speedup matching the regex.
|
||||
static ICON_BLACKLIST_REGEX: Lazy<RwLock<HashMap<String, Regex>>> = Lazy::new(|| RwLock::new(HashMap::new()));
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
fn icon(domain: String) -> Option<Cached<Content<Vec<u8>>>> {
|
||||
if !is_valid_domain(&domain) {
|
||||
warn!("Invalid domain: {}", domain);
|
||||
return None;
|
||||
}
|
||||
|
||||
get_icon(&domain).map(|icon| Cached::long(Content(ContentType::new("image", "x-icon"), icon)))
|
||||
}
|
||||
|
||||
/// Returns if the domain provided is valid or not.
|
||||
///
|
||||
/// This does some manual checks and makes use of Url to do some basic checking.
|
||||
/// domains can't be larger then 63 characters (not counting multiple subdomains) according to the RFC's, but we limit the total size to 255.
|
||||
fn is_valid_domain(domain: &str) -> bool {
|
||||
// Don't allow empty or too big domains or path traversal
|
||||
if domain.is_empty() || domain.len() > 255 || domain.contains("..") {
|
||||
// If parsing the domain fails using Url, it will not work with reqwest.
|
||||
if let Err(parse_error) = Url::parse(format!("https://{}", domain).as_str()) {
|
||||
debug!("Domain parse error: '{}' - {:?}", domain, parse_error);
|
||||
return false;
|
||||
} else if domain.is_empty()
|
||||
|| domain.contains("..")
|
||||
|| domain.starts_with('.')
|
||||
|| domain.starts_with('-')
|
||||
|| domain.ends_with('-')
|
||||
{
|
||||
debug!("Domain validation error: '{}' is either empty, contains '..', starts with an '.', starts or ends with a '-'", domain);
|
||||
return false;
|
||||
} else if domain.len() > 255 {
|
||||
debug!("Domain validation error: '{}' exceeds 255 characters", domain);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Only alphanumeric or specific characters
|
||||
for c in domain.chars() {
|
||||
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
|
||||
debug!("Domain validation error: '{}' contains an invalid character '{}'", domain, c);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -52,25 +87,112 @@ fn is_valid_domain(domain: &str) -> bool {
|
|||
true
|
||||
}
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
fn icon(domain: String) -> Content<Vec<u8>> {
|
||||
let icon_type = ContentType::new("image", "x-icon");
|
||||
|
||||
if !is_valid_domain(&domain) {
|
||||
warn!("Invalid domain: {:#?}", domain);
|
||||
return Content(icon_type, FALLBACK_ICON.to_vec());
|
||||
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
|
||||
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
|
||||
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
|
||||
#[allow(clippy::nonminimal_bool)]
|
||||
#[cfg(not(feature = "unstable"))]
|
||||
fn is_global(ip: IpAddr) -> bool {
|
||||
match ip {
|
||||
IpAddr::V4(ip) => {
|
||||
// check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
|
||||
// globally routable addresses in the 192.0.0.0/24 range.
|
||||
if u32::from(ip) == 0xc0000009 || u32::from(ip) == 0xc000000a {
|
||||
return true;
|
||||
}
|
||||
!ip.is_private()
|
||||
&& !ip.is_loopback()
|
||||
&& !ip.is_link_local()
|
||||
&& !ip.is_broadcast()
|
||||
&& !ip.is_documentation()
|
||||
&& !(ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000))
|
||||
&& !(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|
||||
&& !(ip.octets()[0] & 240 == 240 && !ip.is_broadcast())
|
||||
&& !(ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18)
|
||||
// Make sure the address is not in 0.0.0.0/8
|
||||
&& ip.octets()[0] != 0
|
||||
}
|
||||
IpAddr::V6(ip) => {
|
||||
if ip.is_multicast() && ip.segments()[0] & 0x000f == 14 {
|
||||
true
|
||||
} else {
|
||||
!ip.is_multicast()
|
||||
&& !ip.is_loopback()
|
||||
&& !((ip.segments()[0] & 0xffc0) == 0xfe80)
|
||||
&& !((ip.segments()[0] & 0xfe00) == 0xfc00)
|
||||
&& !ip.is_unspecified()
|
||||
&& !((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Content(icon_type, get_icon(&domain))
|
||||
}
|
||||
|
||||
fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
|
||||
#[cfg(feature = "unstable")]
|
||||
fn is_global(ip: IpAddr) -> bool {
|
||||
ip.is_global()
|
||||
}
|
||||
|
||||
/// These are some tests to check that the implementations match
|
||||
/// The IPv4 can be all checked in 5 mins or so and they are correct as of nightly 2020-07-11
|
||||
/// The IPV6 can't be checked in a reasonable time, so we check about ten billion random ones, so far correct
|
||||
/// Note that the is_global implementation is subject to change as new IP RFCs are created
|
||||
///
|
||||
/// To run while showing progress output:
|
||||
/// cargo test --features sqlite,unstable -- --nocapture --ignored
|
||||
#[cfg(test)]
|
||||
#[cfg(feature = "unstable")]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_ipv4_global() {
|
||||
for a in 0..u8::MAX {
|
||||
println!("Iter: {}/255", a);
|
||||
for b in 0..u8::MAX {
|
||||
for c in 0..u8::MAX {
|
||||
for d in 0..u8::MAX {
|
||||
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
|
||||
assert_eq!(ip.is_global(), is_global(ip))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_ipv6_global() {
|
||||
use ring::rand::{SecureRandom, SystemRandom};
|
||||
let mut v = [0u8; 16];
|
||||
let rand = SystemRandom::new();
|
||||
for i in 0..1_000 {
|
||||
println!("Iter: {}/1_000", i);
|
||||
for _ in 0..10_000_000 {
|
||||
rand.fill(&mut v).expect("Error generating random values");
|
||||
let ip = IpAddr::V6(std::net::Ipv6Addr::new(
|
||||
(v[14] as u16) << 8 | v[15] as u16,
|
||||
(v[12] as u16) << 8 | v[13] as u16,
|
||||
(v[10] as u16) << 8 | v[11] as u16,
|
||||
(v[8] as u16) << 8 | v[9] as u16,
|
||||
(v[6] as u16) << 8 | v[7] as u16,
|
||||
(v[4] as u16) << 8 | v[5] as u16,
|
||||
(v[2] as u16) << 8 | v[3] as u16,
|
||||
(v[0] as u16) << 8 | v[1] as u16,
|
||||
));
|
||||
assert_eq!(ip.is_global(), is_global(ip))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_domain_blacklisted(domain: &str) -> bool {
|
||||
let mut is_blacklisted = CONFIG.icon_blacklist_non_global_ips()
|
||||
&& (domain, 0)
|
||||
.to_socket_addrs()
|
||||
.map(|x| {
|
||||
for ip_port in x {
|
||||
if !ip_port.ip().is_global() {
|
||||
if !is_global(ip_port.ip()) {
|
||||
warn!("IP {} for domain '{}' is not a global IP!", ip_port.ip(), domain);
|
||||
return true;
|
||||
}
|
||||
|
@ -82,7 +204,31 @@ fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
|
|||
// Skip the regex check if the previous one is true already
|
||||
if !is_blacklisted {
|
||||
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
||||
let regex = Regex::new(&blacklist).expect("Valid Regex");
|
||||
let mut regex_hashmap = ICON_BLACKLIST_REGEX.read().unwrap();
|
||||
|
||||
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
|
||||
let regex = if let Some(regex) = regex_hashmap.get(&blacklist) {
|
||||
regex
|
||||
} else {
|
||||
drop(regex_hashmap);
|
||||
|
||||
let mut regex_hashmap_write = ICON_BLACKLIST_REGEX.write().unwrap();
|
||||
// Clear the current list if the previous key doesn't exists.
|
||||
// To prevent growing of the HashMap after someone has changed it via the admin interface.
|
||||
if regex_hashmap_write.len() >= 1 {
|
||||
regex_hashmap_write.clear();
|
||||
}
|
||||
|
||||
// Generate the regex to store in too the Lazy Static HashMap.
|
||||
let blacklist_regex = Regex::new(&blacklist).unwrap();
|
||||
regex_hashmap_write.insert(blacklist.to_string(), blacklist_regex);
|
||||
drop(regex_hashmap_write);
|
||||
|
||||
regex_hashmap = ICON_BLACKLIST_REGEX.read().unwrap();
|
||||
regex_hashmap.get(&blacklist).unwrap()
|
||||
};
|
||||
|
||||
// Use the pre-generate Regex stored in a Lazy HashMap.
|
||||
if regex.is_match(&domain) {
|
||||
warn!("Blacklisted domain: {:#?} matched {:#?}", domain, blacklist);
|
||||
is_blacklisted = true;
|
||||
|
@ -93,39 +239,38 @@ fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
|
|||
is_blacklisted
|
||||
}
|
||||
|
||||
fn get_icon(domain: &str) -> Vec<u8> {
|
||||
fn get_icon(domain: &str) -> Option<Vec<u8>> {
|
||||
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
||||
|
||||
// Check for expiration of negatively cached copy
|
||||
if icon_is_negcached(&path) {
|
||||
return None;
|
||||
}
|
||||
|
||||
if let Some(icon) = get_cached_icon(&path) {
|
||||
return icon;
|
||||
return Some(icon);
|
||||
}
|
||||
|
||||
if CONFIG.disable_icon_download() {
|
||||
return FALLBACK_ICON.to_vec();
|
||||
return None;
|
||||
}
|
||||
|
||||
// Get the icon, or fallback in case of error
|
||||
// Get the icon, or None in case of error
|
||||
match download_icon(&domain) {
|
||||
Ok(icon) => {
|
||||
save_icon(&path, &icon);
|
||||
icon
|
||||
Some(icon)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error downloading icon: {:?}", e);
|
||||
let miss_indicator = path + ".miss";
|
||||
let empty_icon = Vec::new();
|
||||
save_icon(&miss_indicator, &empty_icon);
|
||||
FALLBACK_ICON.to_vec()
|
||||
save_icon(&miss_indicator, &[]);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
||||
// Check for expiration of negatively cached copy
|
||||
if icon_is_negcached(path) {
|
||||
return Some(FALLBACK_ICON.to_vec());
|
||||
}
|
||||
|
||||
// Check for expiration of successfully cached copy
|
||||
if icon_is_expired(path) {
|
||||
return None;
|
||||
|
@ -182,11 +327,17 @@ struct Icon {
|
|||
}
|
||||
|
||||
impl Icon {
|
||||
fn new(priority: u8, href: String) -> Self {
|
||||
const fn new(priority: u8, href: String) -> Self {
|
||||
Self { href, priority }
|
||||
}
|
||||
}
|
||||
|
||||
struct IconUrlResult {
|
||||
iconlist: Vec<Icon>,
|
||||
cookies: String,
|
||||
referer: String,
|
||||
}
|
||||
|
||||
/// Returns a Result/Tuple which holds a Vector IconList and a string which holds the cookies from the last response.
|
||||
/// There will always be a result with a string which will contain https://example.com/favicon.ico and an empty string for the cookies.
|
||||
/// This does not mean that that location does exists, but it is the default location browser use.
|
||||
|
@ -199,24 +350,65 @@ impl Icon {
|
|||
/// let (mut iconlist, cookie_str) = get_icon_url("github.com")?;
|
||||
/// let (mut iconlist, cookie_str) = get_icon_url("gitlab.com")?;
|
||||
/// ```
|
||||
fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
|
||||
fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
// Default URL with secure and insecure schemes
|
||||
let ssldomain = format!("https://{}", domain);
|
||||
let httpdomain = format!("http://{}", domain);
|
||||
|
||||
// First check the domain as given during the request for both HTTPS and HTTP.
|
||||
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)) {
|
||||
Ok(c) => Ok(c),
|
||||
Err(e) => {
|
||||
let mut sub_resp = Err(e);
|
||||
|
||||
// When the domain is not an IP, and has more then one dot, remove all subdomains.
|
||||
let is_ip = domain.parse::<IpAddr>();
|
||||
if is_ip.is_err() && domain.matches('.').count() > 1 {
|
||||
let mut domain_parts = domain.split('.');
|
||||
let base_domain = format!(
|
||||
"{base}.{tld}",
|
||||
tld = domain_parts.next_back().unwrap(),
|
||||
base = domain_parts.next_back().unwrap()
|
||||
);
|
||||
if is_valid_domain(&base_domain) {
|
||||
let sslbase = format!("https://{}", base_domain);
|
||||
let httpbase = format!("http://{}", base_domain);
|
||||
debug!("[get_icon_url]: Trying without subdomains '{}'", base_domain);
|
||||
|
||||
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase));
|
||||
}
|
||||
|
||||
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
|
||||
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
|
||||
let www_domain = format!("www.{}", domain);
|
||||
if is_valid_domain(&www_domain) {
|
||||
let sslwww = format!("https://{}", www_domain);
|
||||
let httpwww = format!("http://{}", www_domain);
|
||||
debug!("[get_icon_url]: Trying with www. prefix '{}'", www_domain);
|
||||
|
||||
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww));
|
||||
}
|
||||
}
|
||||
|
||||
sub_resp
|
||||
}
|
||||
};
|
||||
|
||||
// Create the iconlist
|
||||
let mut iconlist: Vec<Icon> = Vec::new();
|
||||
|
||||
// Create the cookie_str to fill it all the cookies from the response
|
||||
// These cookies can be used to request/download the favicon image.
|
||||
// Some sites have extra security in place with for example XSRF Tokens.
|
||||
let mut cookie_str = String::new();
|
||||
let mut cookie_str = "".to_string();
|
||||
let mut referer = "".to_string();
|
||||
|
||||
let resp = get_page(&ssldomain).or_else(|_| get_page(&httpdomain));
|
||||
if let Ok(mut content) = resp {
|
||||
if let Ok(content) = resp {
|
||||
// Extract the URL from the respose in case redirects occured (like @ gitlab.com)
|
||||
let url = content.url().clone();
|
||||
|
||||
// Get all the cookies and pass it on to the next function.
|
||||
// Needed for XSRF Cookies for example (like @ mijn.ing.nl)
|
||||
let raw_cookies = content.headers().get_all("set-cookie");
|
||||
cookie_str = raw_cookies
|
||||
.iter()
|
||||
|
@ -230,26 +422,34 @@ fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
|
|||
})
|
||||
.collect::<String>();
|
||||
|
||||
// Set the referer to be used on the final request, some sites check this.
|
||||
// Mostly used to prevent direct linking and other security resons.
|
||||
referer = url.as_str().to_string();
|
||||
|
||||
// Add the default favicon.ico to the list with the domain the content responded from.
|
||||
iconlist.push(Icon::new(35, url.join("/favicon.ico").unwrap().into_string()));
|
||||
|
||||
// 512KB should be more than enough for the HTML, though as we only really need
|
||||
// the HTML header, it could potentially be reduced even further
|
||||
let limited_reader = crate::util::LimitedReader::new(&mut content, 512 * 1024);
|
||||
let limited_reader = content.take(512 * 1024);
|
||||
|
||||
let soup = Soup::from_reader(limited_reader)?;
|
||||
// Search for and filter
|
||||
let favicons = soup
|
||||
.tag("link")
|
||||
.attr("rel", Regex::new(r"icon$|apple.*icon")?) // Only use icon rels
|
||||
.attr("href", Regex::new(r"(?i)\w+\.(jpg|jpeg|png|ico)(\?.*)?$|^data:image.*base64")?) // Only allow specific extensions
|
||||
.attr("rel", ICON_REL_REGEX.clone()) // Only use icon rels
|
||||
.attr_name("href") // Make sure there is a href
|
||||
.find_all();
|
||||
|
||||
// Loop through all the found icons and determine it's priority
|
||||
for favicon in favicons {
|
||||
let sizes = favicon.get("sizes");
|
||||
let href = favicon.get("href").expect("Missing href");
|
||||
let full_href = url.join(&href).unwrap().into_string();
|
||||
let href = favicon.get("href").unwrap();
|
||||
// Skip invalid url's
|
||||
let full_href = match url.join(&href) {
|
||||
Ok(h) => h.into_string(),
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
let priority = get_icon_priority(&full_href, sizes);
|
||||
|
||||
|
@ -265,28 +465,33 @@ fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
|
|||
iconlist.sort_by_key(|x| x.priority);
|
||||
|
||||
// There always is an icon in the list, so no need to check if it exists, and just return the first one
|
||||
Ok((iconlist, cookie_str))
|
||||
Ok(IconUrlResult{
|
||||
iconlist,
|
||||
cookies: cookie_str,
|
||||
referer
|
||||
})
|
||||
}
|
||||
|
||||
fn get_page(url: &str) -> Result<Response, Error> {
|
||||
get_page_with_cookies(url, "")
|
||||
get_page_with_cookies(url, "", "")
|
||||
}
|
||||
|
||||
fn get_page_with_cookies(url: &str, cookie_str: &str) -> Result<Response, Error> {
|
||||
if check_icon_domain_is_blacklisted(Url::parse(url).unwrap().host_str().unwrap_or_default()) {
|
||||
err!("Favicon rel linked to a non blacklisted domain!");
|
||||
fn get_page_with_cookies(url: &str, cookie_str: &str, referer: &str) -> Result<Response, Error> {
|
||||
if is_domain_blacklisted(Url::parse(url).unwrap().host_str().unwrap_or_default()) {
|
||||
err!("Favicon rel linked to a blacklisted domain!");
|
||||
}
|
||||
|
||||
if cookie_str.is_empty() {
|
||||
CLIENT.get(url).send()?.error_for_status().map_err(Into::into)
|
||||
} else {
|
||||
CLIENT
|
||||
.get(url)
|
||||
.header("cookie", cookie_str)
|
||||
.send()?
|
||||
.error_for_status()
|
||||
.map_err(Into::into)
|
||||
let mut client = CLIENT.get(url);
|
||||
if !cookie_str.is_empty() {
|
||||
client = client.header("Cookie", cookie_str)
|
||||
}
|
||||
if !referer.is_empty() {
|
||||
client = client.header("Referer", referer)
|
||||
}
|
||||
|
||||
client.send()?
|
||||
.error_for_status()
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
||||
|
@ -314,7 +519,7 @@ fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 {
|
|||
1
|
||||
} else if width == 64 {
|
||||
2
|
||||
} else if width >= 24 && width <= 128 {
|
||||
} else if (24..=128).contains(&width) {
|
||||
3
|
||||
} else if width == 16 {
|
||||
4
|
||||
|
@ -354,7 +559,7 @@ fn parse_sizes(sizes: Option<String>) -> (u16, u16) {
|
|||
let mut height: u16 = 0;
|
||||
|
||||
if let Some(sizes) = sizes {
|
||||
match Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap().captures(sizes.trim()) {
|
||||
match ICON_SIZE_REGEX.captures(sizes.trim()) {
|
||||
None => {}
|
||||
Some(dimensions) => {
|
||||
if dimensions.len() >= 3 {
|
||||
|
@ -369,17 +574,17 @@ fn parse_sizes(sizes: Option<String>) -> (u16, u16) {
|
|||
}
|
||||
|
||||
fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
||||
if check_icon_domain_is_blacklisted(domain) {
|
||||
if is_domain_blacklisted(domain) {
|
||||
err!("Domain is blacklisted", domain)
|
||||
}
|
||||
|
||||
let (iconlist, cookie_str) = get_icon_url(&domain)?;
|
||||
let icon_result = get_icon_url(&domain)?;
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
|
||||
use data_url::DataUrl;
|
||||
|
||||
for icon in iconlist.iter().take(5) {
|
||||
for icon in icon_result.iconlist.iter().take(5) {
|
||||
if icon.href.starts_with("data:image") {
|
||||
let datauri = DataUrl::process(&icon.href).unwrap();
|
||||
// Check if we are able to decode the data uri
|
||||
|
@ -394,13 +599,13 @@ fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
|||
_ => warn!("data uri is invalid"),
|
||||
};
|
||||
} else {
|
||||
match get_page_with_cookies(&icon.href, &cookie_str) {
|
||||
match get_page_with_cookies(&icon.href, &icon_result.cookies, &icon_result.referer) {
|
||||
Ok(mut res) => {
|
||||
info!("Downloaded icon from {}", icon.href);
|
||||
res.copy_to(&mut buffer)?;
|
||||
break;
|
||||
}
|
||||
Err(_) => info!("Download failed for {}", icon.href),
|
||||
},
|
||||
_ => warn!("Download failed for {}", icon.href),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -425,25 +630,3 @@ fn save_icon(path: &str, icon: &[u8]) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn _header_map() -> HeaderMap {
|
||||
// Set some default headers for the request.
|
||||
// Use a browser like user-agent to make sure most websites will return there correct website.
|
||||
use reqwest::header::*;
|
||||
|
||||
macro_rules! headers {
|
||||
($( $name:ident : $value:literal),+ $(,)? ) => {
|
||||
let mut headers = HeaderMap::new();
|
||||
$( headers.insert($name, HeaderValue::from_static($value)); )+
|
||||
headers
|
||||
};
|
||||
}
|
||||
|
||||
headers! {
|
||||
USER_AGENT: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299",
|
||||
ACCEPT_LANGUAGE: "en-US,en;q=0.8",
|
||||
CACHE_CONTROL: "no-cache",
|
||||
PRAGMA: "no-cache",
|
||||
ACCEPT: "text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,image/apng,*/*;q=0.8",
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,19 +1,22 @@
|
|||
use chrono::Utc;
|
||||
use chrono::Local;
|
||||
use num_traits::FromPrimitive;
|
||||
use rocket::request::{Form, FormItems, FromForm};
|
||||
use rocket::Route;
|
||||
use rocket::{
|
||||
request::{Form, FormItems, FromForm},
|
||||
Route,
|
||||
};
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::api::core::two_factor::email::EmailTokenData;
|
||||
use crate::api::core::two_factor::{duo, email, yubikey};
|
||||
use crate::api::{ApiResult, EmptyResult, JsonResult};
|
||||
use crate::auth::ClientIp;
|
||||
use crate::db::models::*;
|
||||
use crate::db::DbConn;
|
||||
use crate::mail;
|
||||
use crate::util;
|
||||
use crate::CONFIG;
|
||||
use crate::{
|
||||
api::{
|
||||
core::two_factor::{duo, email, email::EmailTokenData, yubikey},
|
||||
ApiResult, EmptyResult, JsonResult,
|
||||
},
|
||||
auth::ClientIp,
|
||||
db::{models::*, DbConn},
|
||||
error::MapResult,
|
||||
mail, util, CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![login]
|
||||
|
@ -38,7 +41,7 @@ fn login(data: Form<ConnectData>, conn: DbConn, ip: ClientIp) -> JsonResult {
|
|||
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||
|
||||
_password_login(data, conn, ip)
|
||||
_password_login(data, conn, &ip)
|
||||
}
|
||||
t => err!("Invalid type", t),
|
||||
}
|
||||
|
@ -49,10 +52,7 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
|||
let token = data.refresh_token.unwrap();
|
||||
|
||||
// Get device by refresh token
|
||||
let mut device = match Device::find_by_refresh_token(&token, &conn) {
|
||||
Some(device) => device,
|
||||
None => err!("Invalid refresh token"),
|
||||
};
|
||||
let mut device = Device::find_by_refresh_token(&token, &conn).map_res("Invalid refresh token")?;
|
||||
|
||||
// COMMON
|
||||
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
|
||||
|
@ -68,10 +68,15 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
|||
"refresh_token": device.refresh_token,
|
||||
"Key": user.akey,
|
||||
"PrivateKey": user.private_key,
|
||||
|
||||
"Kdf": user.client_kdf_type,
|
||||
"KdfIterations": user.client_kdf_iter,
|
||||
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
||||
"scope": "api offline_access"
|
||||
})))
|
||||
}
|
||||
|
||||
fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult {
|
||||
// Validate scope
|
||||
let scope = data.scope.as_ref().unwrap();
|
||||
if scope != "api offline_access" {
|
||||
|
@ -97,8 +102,18 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
|
|||
)
|
||||
}
|
||||
|
||||
// Check if the user is disabled
|
||||
if !user.enabled {
|
||||
err!(
|
||||
"This user has been disabled",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username)
|
||||
)
|
||||
}
|
||||
|
||||
let now = Local::now();
|
||||
|
||||
if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() {
|
||||
let now = Utc::now().naive_utc();
|
||||
let now = now.naive_utc();
|
||||
if user.last_verifying_at.is_none() || now.signed_duration_since(user.last_verifying_at.unwrap()).num_seconds() > CONFIG.signups_verify_resend_time() as i64 {
|
||||
let resend_limit = CONFIG.signups_verify_resend_limit() as i32;
|
||||
if resend_limit == 0 || user.login_verify_count < resend_limit {
|
||||
|
@ -127,10 +142,10 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
|
|||
|
||||
let (mut device, new_device) = get_device(&data, &conn, &user);
|
||||
|
||||
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, &conn)?;
|
||||
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, &ip, &conn)?;
|
||||
|
||||
if CONFIG.mail_enabled() && new_device {
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &device.updated_at, &device.name) {
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name) {
|
||||
error!("Error sending new device email: {:#?}", e);
|
||||
|
||||
if CONFIG.require_device_email() {
|
||||
|
@ -140,7 +155,6 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
|
|||
}
|
||||
|
||||
// Common
|
||||
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
|
||||
let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
|
||||
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
|
||||
|
@ -154,6 +168,11 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: ClientIp) -> JsonResult
|
|||
"Key": user.akey,
|
||||
"PrivateKey": user.private_key,
|
||||
//"TwoFactorToken": "11122233333444555666777888999"
|
||||
|
||||
"Kdf": user.client_kdf_type,
|
||||
"KdfIterations": user.client_kdf_iter,
|
||||
"ResetMasterPassword": false,// TODO: Same as above
|
||||
"scope": "api offline_access"
|
||||
});
|
||||
|
||||
if let Some(token) = twofactor_token {
|
||||
|
@ -197,6 +216,7 @@ fn twofactor_auth(
|
|||
user_uuid: &str,
|
||||
data: &ConnectData,
|
||||
device: &mut Device,
|
||||
ip: &ClientIp,
|
||||
conn: &DbConn,
|
||||
) -> ApiResult<Option<String>> {
|
||||
let twofactors = TwoFactor::find_by_user(user_uuid, conn);
|
||||
|
@ -216,8 +236,7 @@ fn twofactor_auth(
|
|||
|
||||
let selected_twofactor = twofactors
|
||||
.into_iter()
|
||||
.filter(|tf| tf.atype == selected_id && tf.enabled)
|
||||
.nth(0);
|
||||
.find(|tf| tf.atype == selected_id && tf.enabled);
|
||||
|
||||
use crate::api::core::two_factor as _tf;
|
||||
use crate::crypto::ct_eq;
|
||||
|
@ -226,7 +245,7 @@ fn twofactor_auth(
|
|||
let mut remember = data.two_factor_remember.unwrap_or(0);
|
||||
|
||||
match TwoFactorType::from_i32(selected_id) {
|
||||
Some(TwoFactorType::Authenticator) => _tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, conn)?,
|
||||
Some(TwoFactorType::Authenticator) => _tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn)?,
|
||||
Some(TwoFactorType::U2f) => _tf::u2f::validate_u2f_login(user_uuid, twofactor_code, conn)?,
|
||||
Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?)?,
|
||||
Some(TwoFactorType::Duo) => _tf::duo::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)?,
|
||||
|
@ -252,10 +271,7 @@ fn twofactor_auth(
|
|||
}
|
||||
|
||||
fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
|
||||
match tf {
|
||||
Some(tf) => Ok(tf.data),
|
||||
None => err!("Two factor doesn't exist"),
|
||||
}
|
||||
tf.map(|t| t.data).map_res("Two factor doesn't exist")
|
||||
}
|
||||
|
||||
fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult<Value> {
|
||||
|
@ -347,6 +363,7 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
|||
Ok(result)
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs
|
||||
#[derive(Debug, Clone, Default)]
|
||||
#[allow(non_snake_case)]
|
||||
struct ConnectData {
|
||||
|
@ -364,6 +381,7 @@ struct ConnectData {
|
|||
device_identifier: Option<String>,
|
||||
device_name: Option<String>,
|
||||
device_type: Option<String>,
|
||||
device_push_token: Option<String>, // Unused; mobile device push not yet supported.
|
||||
|
||||
// Needed for two-factor auth
|
||||
two_factor_provider: Option<i32>,
|
||||
|
@ -391,6 +409,7 @@ impl<'f> FromForm<'f> for ConnectData {
|
|||
"deviceidentifier" => form.device_identifier = Some(value),
|
||||
"devicename" => form.device_name = Some(value),
|
||||
"devicetype" => form.device_type = Some(value),
|
||||
"devicepushtoken" => form.device_push_token = Some(value),
|
||||
"twofactorprovider" => form.two_factor_provider = value.parse().ok(),
|
||||
"twofactortoken" => form.two_factor_token = Some(value),
|
||||
"twofactorremember" => form.two_factor_remember = value.parse().ok(),
|
||||
|
|
|
@ -1,27 +1,29 @@
|
|||
mod admin;
|
||||
pub(crate) mod core;
|
||||
pub mod core;
|
||||
mod icons;
|
||||
mod identity;
|
||||
mod notifications;
|
||||
mod web;
|
||||
|
||||
pub use self::admin::routes as admin_routes;
|
||||
pub use self::core::routes as core_routes;
|
||||
pub use self::icons::routes as icons_routes;
|
||||
pub use self::identity::routes as identity_routes;
|
||||
pub use self::notifications::routes as notifications_routes;
|
||||
pub use self::notifications::{start_notification_server, Notify, UpdateType};
|
||||
pub use self::web::routes as web_routes;
|
||||
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
pub use crate::api::{
|
||||
admin::routes as admin_routes,
|
||||
core::routes as core_routes,
|
||||
icons::routes as icons_routes,
|
||||
identity::routes as identity_routes,
|
||||
notifications::routes as notifications_routes,
|
||||
notifications::{start_notification_server, Notify, UpdateType},
|
||||
web::routes as web_routes,
|
||||
};
|
||||
use crate::util;
|
||||
|
||||
// Type aliases for API methods results
|
||||
type ApiResult<T> = Result<T, crate::error::Error>;
|
||||
pub type JsonResult = ApiResult<Json<Value>>;
|
||||
pub type EmptyResult = ApiResult<()>;
|
||||
|
||||
use crate::util;
|
||||
type JsonUpcase<T> = Json<util::UpCase<T>>;
|
||||
type JsonUpcaseVec<T> = Json<Vec<util::UpCase<T>>>;
|
||||
|
||||
|
|
|
@ -4,11 +4,12 @@ use rocket::Route;
|
|||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value as JsonValue;
|
||||
|
||||
use crate::api::{EmptyResult, JsonResult};
|
||||
use crate::auth::Headers;
|
||||
use crate::db::DbConn;
|
||||
|
||||
use crate::{Error, CONFIG};
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult},
|
||||
auth::Headers,
|
||||
db::DbConn,
|
||||
Error, CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![negotiate, websockets_err]
|
||||
|
@ -19,10 +20,12 @@ static SHOW_WEBSOCKETS_MSG: AtomicBool = AtomicBool::new(true);
|
|||
#[get("/hub")]
|
||||
fn websockets_err() -> EmptyResult {
|
||||
if CONFIG.websocket_enabled() && SHOW_WEBSOCKETS_MSG.compare_and_swap(true, false, Ordering::Relaxed) {
|
||||
err!("###########################################################
|
||||
err!(
|
||||
"###########################################################
|
||||
'/notifications/hub' should be proxied to the websocket server or notifications won't work.
|
||||
Go to the Wiki for more info, or disable WebSockets setting WEBSOCKET_ENABLED=false.
|
||||
###########################################################################################")
|
||||
###########################################################################################"
|
||||
)
|
||||
} else {
|
||||
Err(Error::empty())
|
||||
}
|
||||
|
@ -136,7 +139,6 @@ struct InitialMessage {
|
|||
const PING_MS: u64 = 15_000;
|
||||
const PING: Token = Token(1);
|
||||
|
||||
const ID_KEY: &str = "id=";
|
||||
const ACCESS_TOKEN_KEY: &str = "access_token=";
|
||||
|
||||
impl WSHandler {
|
||||
|
@ -147,38 +149,50 @@ impl WSHandler {
|
|||
let io_error = io::Error::from(io::ErrorKind::InvalidData);
|
||||
Err(ws::Error::new(ws::ErrorKind::Io(io_error), msg))
|
||||
}
|
||||
|
||||
fn get_request_token(&self, hs: Handshake) -> Option<String> {
|
||||
use std::str::from_utf8;
|
||||
|
||||
// Verify we have a token header
|
||||
if let Some(header_value) = hs.request.header("Authorization") {
|
||||
if let Ok(converted) = from_utf8(header_value) {
|
||||
if let Some(token_part) = converted.split("Bearer ").nth(1) {
|
||||
return Some(token_part.into());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Otherwise verify the query parameter value
|
||||
let path = hs.request.resource();
|
||||
if let Some(params) = path.split('?').nth(1) {
|
||||
let params_iter = params.split('&').take(1);
|
||||
for val in params_iter {
|
||||
if val.starts_with(ACCESS_TOKEN_KEY) {
|
||||
return Some(val[ACCESS_TOKEN_KEY.len()..].into());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Handler for WSHandler {
|
||||
fn on_open(&mut self, hs: Handshake) -> ws::Result<()> {
|
||||
// Path == "/notifications/hub?id=<id>==&access_token=<access_token>"
|
||||
let path = hs.request.resource();
|
||||
//
|
||||
// We don't use `id`, and as of around 2020-03-25, the official clients
|
||||
// no longer seem to pass `id` (only `access_token`).
|
||||
|
||||
let (_id, access_token) = match path.split('?').nth(1) {
|
||||
Some(params) => {
|
||||
let mut params_iter = params.split('&').take(2);
|
||||
|
||||
let mut id = None;
|
||||
let mut access_token = None;
|
||||
while let Some(val) = params_iter.next() {
|
||||
if val.starts_with(ID_KEY) {
|
||||
id = Some(&val[ID_KEY.len()..]);
|
||||
} else if val.starts_with(ACCESS_TOKEN_KEY) {
|
||||
access_token = Some(&val[ACCESS_TOKEN_KEY.len()..]);
|
||||
}
|
||||
}
|
||||
|
||||
match (id, access_token) {
|
||||
(Some(a), Some(b)) => (a, b),
|
||||
_ => return self.err("Missing id or access token"),
|
||||
}
|
||||
}
|
||||
None => return self.err("Missing query path"),
|
||||
// Get user token from header or query parameter
|
||||
let access_token = match self.get_request_token(hs) {
|
||||
Some(token) => token,
|
||||
_ => return self.err("Missing access token"),
|
||||
};
|
||||
|
||||
// Validate the user
|
||||
use crate::auth;
|
||||
let claims = match auth::decode_login(access_token) {
|
||||
let claims = match auth::decode_login(access_token.as_str()) {
|
||||
Ok(claims) => claims,
|
||||
Err(_) => return self.err("Invalid access token provided"),
|
||||
};
|
||||
|
@ -256,7 +270,9 @@ impl Factory for WSFactory {
|
|||
// Remove handler
|
||||
if let Some(user_uuid) = &handler.user_uuid {
|
||||
if let Some(mut user_conn) = self.users.map.get_mut(user_uuid) {
|
||||
user_conn.remove_item(&handler.out);
|
||||
if let Some(pos) = user_conn.iter().position(|x| x == &handler.out) {
|
||||
user_conn.remove(pos);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -327,7 +343,7 @@ impl WebSocketUsers {
|
|||
/* Message Structure
|
||||
[
|
||||
1, // MessageType.Invocation
|
||||
{}, // Headers
|
||||
{}, // Headers (map)
|
||||
null, // InvocationId
|
||||
"ReceiveMessage", // Target
|
||||
[ // Arguments
|
||||
|
@ -344,7 +360,7 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType) -> Vec<u8> {
|
|||
|
||||
let value = V::Array(vec![
|
||||
1.into(),
|
||||
V::Array(vec![]),
|
||||
V::Map(vec![]),
|
||||
V::Nil,
|
||||
"ReceiveMessage".into(),
|
||||
V::Array(vec![V::Map(vec![
|
||||
|
|
|
@ -1,15 +1,10 @@
|
|||
use std::path::{Path, PathBuf};
|
||||
|
||||
use rocket::http::ContentType;
|
||||
use rocket::response::content::Content;
|
||||
use rocket::response::NamedFile;
|
||||
use rocket::Route;
|
||||
use rocket::{http::ContentType, response::content::Content, response::NamedFile, Route};
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::util::Cached;
|
||||
use crate::CONFIG;
|
||||
use crate::{error::Error, util::Cached, CONFIG};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
// If addding more routes here, consider also adding them to
|
||||
|
@ -37,7 +32,17 @@ fn app_id() -> Cached<Content<Json<Value>>> {
|
|||
{
|
||||
"version": { "major": 1, "minor": 0 },
|
||||
"ids": [
|
||||
&CONFIG.domain(),
|
||||
// Per <https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-appid-and-facets-v2.0-id-20180227.html#determining-the-facetid-of-a-calling-application>:
|
||||
//
|
||||
// "In the Web case, the FacetID MUST be the Web Origin [RFC6454]
|
||||
// of the web page triggering the FIDO operation, written as
|
||||
// a URI with an empty path. Default ports are omitted and any
|
||||
// path component is ignored."
|
||||
//
|
||||
// This leaves it unclear as to whether the path must be empty,
|
||||
// or whether it can be non-empty and will be ignored. To be on
|
||||
// the safe side, use a proper web origin (with empty path).
|
||||
&CONFIG.domain_origin(),
|
||||
"ios:bundle-id:com.8bit.bitwarden",
|
||||
"android:apk-key-hash:dUGFzUzf3lmHSLBDBIv+WaFyZMI" ]
|
||||
}]
|
||||
|
@ -68,13 +73,17 @@ fn static_files(filename: String) -> Result<Content<&'static [u8]>, Error> {
|
|||
match filename.as_ref() {
|
||||
"mail-github.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/mail-github.png"))),
|
||||
"logo-gray.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/logo-gray.png"))),
|
||||
"shield-white.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/shield-white.png"))),
|
||||
"error-x.svg" => Ok(Content(ContentType::SVG, include_bytes!("../static/images/error-x.svg"))),
|
||||
"hibp.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/hibp.png"))),
|
||||
|
||||
"bootstrap.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
||||
"bootstrap-native-v4.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native-v4.js"))),
|
||||
"bootstrap-native.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native.js"))),
|
||||
"md5.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/md5.js"))),
|
||||
"identicon.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/identicon.js"))),
|
||||
_ => err!("Image not found"),
|
||||
"datatables.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
||||
"datatables.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
||||
"jquery-3.5.1.slim.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.5.1.slim.js"))),
|
||||
_ => err!(format!("Static file not found: {}", filename)),
|
||||
}
|
||||
}
|
||||
|
|
246
src/auth.rs
246
src/auth.rs
|
@ -1,38 +1,40 @@
|
|||
//
|
||||
// JWT Handling
|
||||
//
|
||||
use crate::util::read_file;
|
||||
use chrono::{Duration, Utc};
|
||||
use num_traits::FromPrimitive;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
use jsonwebtoken::{self, Algorithm, Header};
|
||||
use jsonwebtoken::{self, Algorithm, DecodingKey, EncodingKey, Header};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::ser::Serialize;
|
||||
|
||||
use crate::error::{Error, MapResult};
|
||||
use crate::CONFIG;
|
||||
use crate::{
|
||||
error::{Error, MapResult},
|
||||
util::read_file,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref DEFAULT_VALIDITY: Duration = Duration::hours(2);
|
||||
static ref JWT_HEADER: Header = Header::new(JWT_ALGORITHM);
|
||||
pub static ref JWT_LOGIN_ISSUER: String = format!("{}|login", CONFIG.domain());
|
||||
pub static ref JWT_INVITE_ISSUER: String = format!("{}|invite", CONFIG.domain());
|
||||
pub static ref JWT_DELETE_ISSUER: String = format!("{}|delete", CONFIG.domain());
|
||||
pub static ref JWT_VERIFYEMAIL_ISSUER: String = format!("{}|verifyemail", CONFIG.domain());
|
||||
pub static ref JWT_ADMIN_ISSUER: String = format!("{}|admin", CONFIG.domain());
|
||||
static ref PRIVATE_RSA_KEY: Vec<u8> = match read_file(&CONFIG.private_rsa_key()) {
|
||||
Ok(key) => key,
|
||||
Err(e) => panic!("Error loading private RSA Key.\n Error: {}", e),
|
||||
};
|
||||
static ref PUBLIC_RSA_KEY: Vec<u8> = match read_file(&CONFIG.public_rsa_key()) {
|
||||
Ok(key) => key,
|
||||
Err(e) => panic!("Error loading public RSA Key.\n Error: {}", e),
|
||||
};
|
||||
}
|
||||
pub static DEFAULT_VALIDITY: Lazy<Duration> = Lazy::new(|| Duration::hours(2));
|
||||
static JWT_HEADER: Lazy<Header> = Lazy::new(|| Header::new(JWT_ALGORITHM));
|
||||
pub static JWT_LOGIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|login", CONFIG.domain_origin()));
|
||||
static JWT_INVITE_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|invite", CONFIG.domain_origin()));
|
||||
static JWT_DELETE_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|delete", CONFIG.domain_origin()));
|
||||
static JWT_VERIFYEMAIL_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|verifyemail", CONFIG.domain_origin()));
|
||||
static JWT_ADMIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|admin", CONFIG.domain_origin()));
|
||||
static PRIVATE_RSA_KEY: Lazy<Vec<u8>> = Lazy::new(|| match read_file(&CONFIG.private_rsa_key()) {
|
||||
Ok(key) => key,
|
||||
Err(e) => panic!("Error loading private RSA Key.\n Error: {}", e),
|
||||
});
|
||||
static PUBLIC_RSA_KEY: Lazy<Vec<u8>> = Lazy::new(|| match read_file(&CONFIG.public_rsa_key()) {
|
||||
Ok(key) => key,
|
||||
Err(e) => panic!("Error loading public RSA Key.\n Error: {}", e),
|
||||
});
|
||||
|
||||
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
|
||||
match jsonwebtoken::encode(&JWT_HEADER, claims, &PRIVATE_RSA_KEY) {
|
||||
match jsonwebtoken::encode(&JWT_HEADER, claims, &EncodingKey::from_rsa_der(&PRIVATE_RSA_KEY)) {
|
||||
Ok(token) => token,
|
||||
Err(e) => panic!("Error encoding jwt {}", e),
|
||||
}
|
||||
|
@ -51,7 +53,7 @@ fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Err
|
|||
|
||||
let token = token.replace(char::is_whitespace, "");
|
||||
|
||||
jsonwebtoken::decode(&token, &PUBLIC_RSA_KEY, &validation)
|
||||
jsonwebtoken::decode(&token, &DecodingKey::from_rsa_der(&PUBLIC_RSA_KEY), &validation)
|
||||
.map(|d| d.claims)
|
||||
.map_res("Error decoding JWT")
|
||||
}
|
||||
|
@ -213,11 +215,12 @@ pub fn generate_admin_claims() -> AdminJWTClaims {
|
|||
//
|
||||
// Bearer token authentication
|
||||
//
|
||||
use rocket::request::{self, FromRequest, Request};
|
||||
use rocket::Outcome;
|
||||
use rocket::request::{FromRequest, Outcome, Request};
|
||||
|
||||
use crate::db::models::{Device, User, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
use crate::db::DbConn;
|
||||
use crate::db::{
|
||||
models::{CollectionUser, Device, User, UserOrgStatus, UserOrgType, UserOrganization, UserStampException},
|
||||
DbConn,
|
||||
};
|
||||
|
||||
pub struct Headers {
|
||||
pub host: String,
|
||||
|
@ -228,7 +231,7 @@ pub struct Headers {
|
|||
impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
let headers = request.headers();
|
||||
|
||||
// Get host
|
||||
|
@ -293,7 +296,25 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
|||
};
|
||||
|
||||
if user.security_stamp != claims.sstamp {
|
||||
err_handler!("Invalid security stamp")
|
||||
if let Some(stamp_exception) = user
|
||||
.stamp_exception
|
||||
.as_deref()
|
||||
.and_then(|s| serde_json::from_str::<UserStampException>(s).ok())
|
||||
{
|
||||
let current_route = match request.route().and_then(|r| r.name) {
|
||||
Some(name) => name,
|
||||
_ => err_handler!("Error getting current route for stamp exception"),
|
||||
};
|
||||
|
||||
// Check if both match, if not this route is not allowed with the current security stamp.
|
||||
if stamp_exception.route != current_route {
|
||||
err_handler!("Invalid security stamp: Current route and exception route do not match")
|
||||
} else if stamp_exception.security_stamp != claims.sstamp {
|
||||
err_handler!("Invalid security stamp for matched stamp exception")
|
||||
}
|
||||
} else {
|
||||
err_handler!("Invalid security stamp")
|
||||
}
|
||||
}
|
||||
|
||||
Outcome::Success(Headers { host, device, user })
|
||||
|
@ -305,19 +326,39 @@ pub struct OrgHeaders {
|
|||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_user_type: UserOrgType,
|
||||
pub org_user: UserOrganization,
|
||||
pub org_id: String,
|
||||
}
|
||||
|
||||
// org_id is usually the second param ("/organizations/<org_id>")
|
||||
// But there are cases where it is located in a query value.
|
||||
// First check the param, if this is not a valid uuid, we will try the query value.
|
||||
fn get_org_id(request: &Request) -> Option<String> {
|
||||
if let Some(Ok(org_id)) = request.get_param::<String>(1) {
|
||||
if uuid::Uuid::parse_str(&org_id).is_ok() {
|
||||
return Some(org_id);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(Ok(org_id)) = request.get_query_value::<String>("organizationId") {
|
||||
if uuid::Uuid::parse_str(&org_id).is_ok() {
|
||||
return Some(org_id);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<Headers>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
Outcome::Success(headers) => {
|
||||
// org_id is expected to be the second param ("/organizations/<org_id>")
|
||||
match request.get_param::<String>(1) {
|
||||
Some(Ok(org_id)) => {
|
||||
match get_org_id(request) {
|
||||
Some(org_id) => {
|
||||
let conn = match request.guard::<DbConn>() {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
|
@ -347,6 +388,8 @@ impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
|
|||
err_handler!("Unknown user type in the database")
|
||||
}
|
||||
},
|
||||
org_user,
|
||||
org_id,
|
||||
})
|
||||
}
|
||||
_ => err_handler!("Error getting the organization id"),
|
||||
|
@ -366,7 +409,7 @@ pub struct AdminHeaders {
|
|||
impl<'a, 'r> FromRequest<'a, 'r> for AdminHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<OrgHeaders>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
|
@ -386,6 +429,137 @@ impl<'a, 'r> FromRequest<'a, 'r> for AdminHeaders {
|
|||
}
|
||||
}
|
||||
|
||||
impl Into<Headers> for AdminHeaders {
|
||||
fn into(self) -> Headers {
|
||||
Headers {
|
||||
host: self.host,
|
||||
device: self.device,
|
||||
user: self.user,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// col_id is usually the forth param ("/organizations/<org_id>/collections/<col_id>")
|
||||
// But there cloud be cases where it is located in a query value.
|
||||
// First check the param, if this is not a valid uuid, we will try the query value.
|
||||
fn get_col_id(request: &Request) -> Option<String> {
|
||||
if let Some(Ok(col_id)) = request.get_param::<String>(3) {
|
||||
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
||||
return Some(col_id);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(Ok(col_id)) = request.get_query_value::<String>("collectionId") {
|
||||
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
||||
return Some(col_id);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// The ManagerHeaders are used to check if you are at least a Manager
|
||||
/// and have access to the specific collection provided via the <col_id>/collections/collectionId.
|
||||
/// This does strict checking on the collection_id, ManagerHeadersLoose does not.
|
||||
pub struct ManagerHeaders {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_user_type: UserOrgType,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for ManagerHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<OrgHeaders>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
Outcome::Success(headers) => {
|
||||
if headers.org_user_type >= UserOrgType::Manager {
|
||||
match get_col_id(request) {
|
||||
Some(col_id) => {
|
||||
let conn = match request.guard::<DbConn>() {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
|
||||
if !headers.org_user.access_all {
|
||||
match CollectionUser::find_by_collection_and_user(&col_id, &headers.org_user.user_uuid, &conn) {
|
||||
Some(_) => (),
|
||||
None => err_handler!("The current user isn't a manager for this collection"),
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => err_handler!("Error getting the collection id"),
|
||||
}
|
||||
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Headers> for ManagerHeaders {
|
||||
fn into(self) -> Headers {
|
||||
Headers {
|
||||
host: self.host,
|
||||
device: self.device,
|
||||
user: self.user,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The ManagerHeadersLoose is used when you at least need to be a Manager,
|
||||
/// but there is no collection_id sent with the request (either in the path or as form data).
|
||||
pub struct ManagerHeadersLoose {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_user_type: UserOrgType,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for ManagerHeadersLoose {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<OrgHeaders>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
Outcome::Success(headers) => {
|
||||
if headers.org_user_type >= UserOrgType::Manager {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Headers> for ManagerHeadersLoose {
|
||||
fn into(self) -> Headers {
|
||||
Headers {
|
||||
host: self.host,
|
||||
device: self.device,
|
||||
user: self.user,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OwnerHeaders {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
|
@ -395,7 +569,7 @@ pub struct OwnerHeaders {
|
|||
impl<'a, 'r> FromRequest<'a, 'r> for OwnerHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<OrgHeaders>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
|
@ -426,7 +600,7 @@ pub struct ClientIp {
|
|||
impl<'a, 'r> FromRequest<'a, 'r> for ClientIp {
|
||||
type Error = ();
|
||||
|
||||
fn from_request(req: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
fn from_request(req: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
let ip = if CONFIG._ip_header_enabled() {
|
||||
req.headers().get_one(&CONFIG.ip_header()).and_then(|ip| {
|
||||
match ip.find(',') {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user