2018-12-07 01:05:45 +00:00
#![ feature(proc_macro_hygiene, decl_macro, vec_remove_item, try_trait) ]
2019-02-02 23:22:18 +00:00
#![ recursion_limit = " 256 " ]
2018-10-10 18:40:39 +00:00
2018-12-30 22:34:31 +00:00
#[ macro_use ]
extern crate rocket ;
#[ macro_use ]
extern crate serde_derive ;
#[ macro_use ]
extern crate serde_json ;
#[ macro_use ]
extern crate log ;
#[ macro_use ]
extern crate diesel ;
#[ macro_use ]
extern crate diesel_migrations ;
#[ macro_use ]
extern crate lazy_static ;
#[ macro_use ]
extern crate derive_more ;
#[ macro_use ]
extern crate num_derive ;
use std ::{
path ::Path ,
process ::{ exit , Command } ,
} ;
2018-02-10 00:00:55 +00:00
2018-12-30 22:34:31 +00:00
#[ macro_use ]
mod error ;
2018-02-10 00:00:55 +00:00
mod api ;
mod auth ;
2019-01-25 17:23:51 +00:00
mod config ;
2018-12-30 22:34:31 +00:00
mod crypto ;
mod db ;
2018-08-15 06:32:19 +00:00
mod mail ;
2018-12-30 22:34:31 +00:00
mod util ;
2018-02-10 00:00:55 +00:00
2019-01-25 17:23:51 +00:00
pub use config ::CONFIG ;
2019-02-14 01:03:37 +00:00
pub use error ::{ Error , MapResult } ;
2019-01-25 17:23:51 +00:00
2018-02-10 00:00:55 +00:00
fn main ( ) {
2019-02-20 19:59:37 +00:00
launch_info ( ) ;
2019-01-25 17:23:51 +00:00
if CONFIG . extended_logging ( ) {
2018-12-06 19:35:25 +00:00
init_logging ( ) . ok ( ) ;
}
2019-05-27 20:58:52 +00:00
#[ cfg(all(feature = " sqlite " , feature = " mysql " )) ]
compile_error! ( " Can't enable both backends " ) ;
2018-05-12 20:55:18 +00:00
check_db ( ) ;
check_rsa_keys ( ) ;
2018-09-13 18:59:51 +00:00
check_web_vault ( ) ;
2018-08-30 15:43:46 +00:00
migrations ::run_migrations ( ) ;
2018-02-10 00:00:55 +00:00
2019-02-12 23:03:16 +00:00
launch_rocket ( ) ;
2018-02-10 00:00:55 +00:00
}
2019-02-20 19:59:37 +00:00
fn launch_info ( ) {
println! ( " /-------------------------------------------------------------------- \\ " ) ;
println! ( " | Starting Bitwarden_RS | " ) ;
if let Some ( version ) = option_env! ( " GIT_VERSION " ) {
println! ( " | {:^68} | " , format! ( " Version {} " , version ) ) ;
}
println! ( " |--------------------------------------------------------------------| " ) ;
println! ( " | This is an *unofficial* Bitwarden implementation, DO NOT use the | " ) ;
println! ( " | official channels to report bugs/features, regardless of client. | " ) ;
println! ( " | Report URL: https://github.com/dani-garcia/bitwarden_rs/issues/new | " ) ;
println! ( " \\ --------------------------------------------------------------------/ \n " ) ;
}
2018-12-06 19:35:25 +00:00
fn init_logging ( ) -> Result < ( ) , fern ::InitError > {
2019-03-25 13:12:41 +00:00
use std ::str ::FromStr ;
2018-12-06 19:35:25 +00:00
let mut logger = fern ::Dispatch ::new ( )
2018-12-30 22:34:31 +00:00
. format ( | out , message , record | {
out . finish ( format_args! (
" {}[{}][{}] {} " ,
chrono ::Local ::now ( ) . format ( " [%Y-%m-%d %H:%M:%S] " ) ,
record . target ( ) ,
record . level ( ) ,
message
) )
} )
2019-03-25 13:12:41 +00:00
. level ( log ::LevelFilter ::from_str ( & CONFIG . log_level ( ) ) . expect ( " Valid log level " ) )
2019-03-25 12:55:21 +00:00
// Hide unknown certificate errors if using self-signed
. level_for ( " rustls::session " , log ::LevelFilter ::Off )
// Hide failed to close stream messages
. level_for ( " hyper::server " , log ::LevelFilter ::Warn )
2018-12-30 22:34:31 +00:00
. chain ( std ::io ::stdout ( ) ) ;
2018-12-06 19:35:25 +00:00
2019-01-25 17:23:51 +00:00
if let Some ( log_file ) = CONFIG . log_file ( ) {
2018-12-06 19:35:25 +00:00
logger = logger . chain ( fern ::log_file ( log_file ) ? ) ;
}
2019-04-26 20:08:26 +00:00
#[ cfg(not(windows)) ]
{
if cfg! ( feature = " enable_syslog " ) | | CONFIG . use_syslog ( ) {
2019-03-29 19:27:20 +00:00
logger = chain_syslog ( logger ) ;
}
}
2018-12-06 19:35:25 +00:00
logger . apply ( ) ? ;
Ok ( ( ) )
}
2019-03-29 19:27:20 +00:00
#[ cfg(not(windows)) ]
2018-12-06 19:35:25 +00:00
fn chain_syslog ( logger : fern ::Dispatch ) -> fern ::Dispatch {
let syslog_fmt = syslog ::Formatter3164 {
facility : syslog ::Facility ::LOG_USER ,
hostname : None ,
process : " bitwarden_rs " . into ( ) ,
pid : 0 ,
} ;
match syslog ::unix ( syslog_fmt ) {
Ok ( sl ) = > logger . chain ( sl ) ,
Err ( e ) = > {
error! ( " Unable to connect to syslog: {:?} " , e ) ;
logger
}
}
}
2018-05-12 20:55:18 +00:00
fn check_db ( ) {
2019-01-25 17:23:51 +00:00
let url = CONFIG . database_url ( ) ;
2019-05-27 20:58:52 +00:00
if cfg! ( feature = " sqlite " ) {
let path = Path ::new ( & url ) ;
if let Some ( parent ) = path . parent ( ) {
use std ::fs ;
if fs ::create_dir_all ( parent ) . is_err ( ) {
error! ( " Error creating database directory " ) ;
exit ( 1 ) ;
}
}
// Turn on WAL in SQLite
if CONFIG . enable_db_wal ( ) {
use diesel ::RunQueryDsl ;
let connection = db ::get_connection ( ) . expect ( " Can't conect to DB " ) ;
diesel ::sql_query ( " PRAGMA journal_mode=wal " )
. execute ( & connection )
. expect ( " Failed to turn on WAL " ) ;
}
}
2019-05-20 19:12:41 +00:00
println! ( " {} " , url . to_string ( ) ) ;
2019-05-28 05:48:17 +00:00
db ::get_connection ( ) . expect ( " Can't connect to DB " ) ;
2018-05-12 20:55:18 +00:00
}
2018-02-17 00:13:02 +00:00
fn check_rsa_keys ( ) {
// If the RSA keys don't exist, try to create them
2019-01-25 17:23:51 +00:00
if ! util ::file_exists ( & CONFIG . private_rsa_key ( ) ) | | ! util ::file_exists ( & CONFIG . public_rsa_key ( ) ) {
2018-12-06 19:35:25 +00:00
info! ( " JWT keys don't exist, checking if OpenSSL is available... " ) ;
2018-02-17 00:13:02 +00:00
2019-02-20 19:59:37 +00:00
Command ::new ( " openssl " ) . arg ( " version " ) . status ( ) . unwrap_or_else ( | _ | {
2018-12-06 19:35:25 +00:00
info! ( " Can't create keys because OpenSSL is not available, make sure it's installed and available on the PATH " ) ;
2018-02-17 00:13:02 +00:00
exit ( 1 ) ;
} ) ;
2018-12-06 19:35:25 +00:00
info! ( " OpenSSL detected, creating keys... " ) ;
2018-02-17 00:13:02 +00:00
2019-02-20 19:59:37 +00:00
let key = CONFIG . rsa_key_filename ( ) ;
let pem = format! ( " {} .pem " , key ) ;
let priv_der = format! ( " {} .der " , key ) ;
let pub_der = format! ( " {} .pub.der " , key ) ;
2018-12-30 22:34:31 +00:00
let mut success = Command ::new ( " openssl " )
2019-02-20 19:59:37 +00:00
. args ( & [ " genrsa " , " -out " , & pem ] )
. status ( )
2018-12-30 22:34:31 +00:00
. expect ( " Failed to create private pem file " )
. success ( ) ;
success & = Command ::new ( " openssl " )
2019-02-20 19:59:37 +00:00
. args ( & [ " rsa " , " -in " , & pem , " -outform " , " DER " , " -out " , & priv_der ] )
. status ( )
2018-12-30 22:34:31 +00:00
. expect ( " Failed to create private der file " )
. success ( ) ;
success & = Command ::new ( " openssl " )
2019-02-20 19:59:37 +00:00
. args ( & [ " rsa " , " -in " , & priv_der , " -inform " , " DER " ] )
. args ( & [ " -RSAPublicKey_out " , " -outform " , " DER " , " -out " , & pub_der ] )
. status ( )
2018-12-30 22:34:31 +00:00
. expect ( " Failed to create public der file " )
. success ( ) ;
2018-02-17 00:13:02 +00:00
if success {
2018-12-06 19:35:25 +00:00
info! ( " Keys created correctly. " ) ;
2018-02-17 00:13:02 +00:00
} else {
2018-12-06 19:35:25 +00:00
error! ( " Error creating keys, exiting... " ) ;
2018-02-17 00:13:02 +00:00
exit ( 1 ) ;
}
}
}
2018-04-24 20:38:23 +00:00
fn check_web_vault ( ) {
2019-01-25 17:23:51 +00:00
if ! CONFIG . web_vault_enabled ( ) {
2018-06-12 19:09:42 +00:00
return ;
}
2019-01-25 17:23:51 +00:00
let index_path = Path ::new ( & CONFIG . web_vault_folder ( ) ) . join ( " index.html " ) ;
2018-04-24 20:38:23 +00:00
if ! index_path . exists ( ) {
2019-01-29 20:44:46 +00:00
error! ( " Web vault is not found. To install it, please follow the steps in https://github.com/dani-garcia/bitwarden_rs/wiki/Building-binary#install-the-web-vault " ) ;
2018-04-24 20:38:23 +00:00
exit ( 1 ) ;
}
}
2019-02-20 19:59:37 +00:00
// Embed the migrations from the migrations folder into the application
// This way, the program automatically migrates the database to the latest version
// https://docs.rs/diesel_migrations/*/diesel_migrations/macro.embed_migrations.html
#[ allow(unused_imports) ]
mod migrations {
2019-05-26 21:02:41 +00:00
#[ cfg(feature = " sqlite " ) ]
embed_migrations! ( " migrations/sqlite " ) ;
#[ cfg(feature = " mysql " ) ]
embed_migrations! ( " migrations/mysql " ) ;
2019-02-12 21:47:00 +00:00
2019-02-20 19:59:37 +00:00
pub fn run_migrations ( ) {
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
2019-02-25 00:08:38 +00:00
let connection = crate ::db ::get_connection ( ) . expect ( " Can't connect to DB " ) ;
2019-02-12 21:47:00 +00:00
2019-02-20 19:59:37 +00:00
use std ::io ::stdout ;
embedded_migrations ::run_with_output ( & connection , & mut stdout ( ) ) . expect ( " Can't run migrations " ) ;
2019-02-12 21:47:00 +00:00
}
2019-02-20 19:59:37 +00:00
}
fn launch_rocket ( ) {
// Create Rocket object, this stores current log level and sets it's own
let rocket = rocket ::ignite ( ) ;
2019-02-12 21:47:00 +00:00
2019-02-20 19:59:37 +00:00
// If we aren't logging the mounts, we force the logging level down
if ! CONFIG . log_mounts ( ) {
log ::set_max_level ( log ::LevelFilter ::Warn ) ;
}
let rocket = rocket
. mount ( " / " , api ::web_routes ( ) )
. mount ( " /api " , api ::core_routes ( ) )
. mount ( " /admin " , api ::admin_routes ( ) )
. mount ( " /identity " , api ::identity_routes ( ) )
. mount ( " /icons " , api ::icons_routes ( ) )
. mount ( " /notifications " , api ::notifications_routes ( ) ) ;
// Force the level up for the fairings, managed state and lauch
if ! CONFIG . log_mounts ( ) {
log ::set_max_level ( log ::LevelFilter ::max ( ) ) ;
}
let rocket = rocket
. manage ( db ::init_pool ( ) )
. manage ( api ::start_notification_server ( ) )
. attach ( util ::AppHeaders ( ) ) ;
// Launch and print error if there is one
// The launch will restore the original logging level
error! ( " Launch error {:#?} " , rocket . launch ( ) ) ;
2019-01-11 13:18:13 +00:00
}