Compare commits
1 Commits
main
...
post-boots
Author | SHA1 | Date | |
---|---|---|---|
2466f98468 |
@ -115,71 +115,429 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"results": {
|
"results": {
|
||||||
"nomad/core/metrics/grafana/grafana.ini": [
|
"nomad/backups/oneoff.nomad": [
|
||||||
{
|
|
||||||
"type": "Basic Auth Credentials",
|
|
||||||
"filename": "nomad/core/metrics/grafana/grafana.ini",
|
|
||||||
"hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4",
|
|
||||||
"is_verified": false,
|
|
||||||
"line_number": 78,
|
|
||||||
"is_secret": false
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"type": "Secret Keyword",
|
"type": "Secret Keyword",
|
||||||
"filename": "nomad/core/metrics/grafana/grafana.ini",
|
"filename": "nomad/backups/oneoff.nomad",
|
||||||
"hashed_secret": "55ebda65c08313526e7ba08ad733e5ebea9900bd",
|
"hashed_secret": "f2baa52d02ca888455ce47823f47bf372d5eecb3",
|
||||||
"is_verified": false,
|
"is_verified": false,
|
||||||
"line_number": 109,
|
"line_number": 114,
|
||||||
"is_secret": false
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "Secret Keyword",
|
|
||||||
"filename": "nomad/core/metrics/grafana/grafana.ini",
|
|
||||||
"hashed_secret": "d033e22ae348aeb5660fc2140aec35850c4da997",
|
|
||||||
"is_verified": false,
|
|
||||||
"line_number": 151,
|
|
||||||
"is_secret": false
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "Secret Keyword",
|
|
||||||
"filename": "nomad/core/metrics/grafana/grafana.ini",
|
|
||||||
"hashed_secret": "10bea62ff1e1a7540dc7a6bc10f5fa992349023f",
|
|
||||||
"is_verified": false,
|
|
||||||
"line_number": 154,
|
|
||||||
"is_secret": false
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "Secret Keyword",
|
|
||||||
"filename": "nomad/core/metrics/grafana/grafana.ini",
|
|
||||||
"hashed_secret": "5718bce97710e6be87ea160b36eaefb5032857d3",
|
|
||||||
"is_verified": false,
|
|
||||||
"line_number": 239,
|
|
||||||
"is_secret": false
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "Secret Keyword",
|
|
||||||
"filename": "nomad/core/metrics/grafana/grafana.ini",
|
|
||||||
"hashed_secret": "10aed9d7ebef778a9b3033dba3f7813b639e0d50",
|
|
||||||
"is_verified": false,
|
|
||||||
"line_number": 252,
|
|
||||||
"is_secret": false
|
"is_secret": false
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"nomad/core/syslogng.nomad": [
|
"nomad/databases/mysql.nomad": [
|
||||||
|
{
|
||||||
|
"type": "Secret Keyword",
|
||||||
|
"filename": "nomad/databases/mysql.nomad",
|
||||||
|
"hashed_secret": "18960546905b75c869e7de63961dc185f9a0a7c9",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 66,
|
||||||
|
"is_secret": false
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"nomad/metrics/grafana.nomad": [
|
||||||
|
{
|
||||||
|
"type": "Secret Keyword",
|
||||||
|
"filename": "nomad/metrics/grafana.nomad",
|
||||||
|
"hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 75,
|
||||||
|
"is_secret": false
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"nomad/packer/cloud-config": [
|
||||||
{
|
{
|
||||||
"type": "Base64 High Entropy String",
|
"type": "Base64 High Entropy String",
|
||||||
"filename": "nomad/core/syslogng.nomad",
|
"filename": "nomad/packer/cloud-config",
|
||||||
"hashed_secret": "298b5925fe7c7458cb8a12a74621fdedafea5ad6",
|
"hashed_secret": "9ef2b7de7d9cb43de75586aa57c8325a46639ac9",
|
||||||
"is_verified": false,
|
"is_verified": false,
|
||||||
"line_number": 159,
|
"line_number": 26,
|
||||||
"is_secret": false
|
"is_secret": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"type": "Base64 High Entropy String",
|
"type": "Base64 High Entropy String",
|
||||||
"filename": "nomad/core/syslogng.nomad",
|
"filename": "nomad/packer/cloud-config",
|
||||||
"hashed_secret": "3a1cec2d3c3de7e4da4d99c6731ca696c24b72b4",
|
"hashed_secret": "2bb3f24183094c8ff5d5ac381a411fc4ab7a35da",
|
||||||
"is_verified": false,
|
"is_verified": false,
|
||||||
"line_number": 159,
|
"line_number": 27,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "67d96cf75c8d2edca3bdd2614003c4d1fc62055c",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 28,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "2f86f87d3ecf5a696afa6d8f61d0c9a13f2f6304",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 29,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "0462eefb3a04a6e4b97137d7682d9730d433efef",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 30,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "2bc96fb643b5c5149711f1a6630e92a0a40b5b52",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 31,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "3219ab282e5f68beb580dd3b7de2c8f171e0490d",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 32,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "5d167ddff0f00dce98abf89c8a924b5930d7ad83",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 33,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "d2a685cccdd672ec626c079d449e99cc094077b0",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 34,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "05a42fe5f719093045673ce08eeab08ecb019923",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 35,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "67cb7a776194efdd644961546be659b2c9167560",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 36,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "9a696a465a523fa4658747f902443af71329d5b1",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 37,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "1b3b4a544abe1482fb00cb1cdcd6b2a8164be8a3",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 38,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "d63d3ee4601ae418a9fafb284f6f57e7caa3372f",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 39,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "f1b6163dfe3e65a418a5d76dc2c3c730df79456d",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 40,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "bbc7610266af9f573207f340beaa494ea1e95ed7",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 41,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "e3f1c5b2b28515fd232629f226227d014a0a6870",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 42,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "7346f3b1b1e953966a71f35a83fab1351ca21510",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 43,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "c178ec42fc63c81c594d2320c01b2d618fd6256b",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 44,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "9a9c57ad4c90af8557c4abea07e156d288c435c8",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 45,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "41d969550bd78c1c4ba03eac7e7196f9507489d4",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 46,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "aa837393fc553576af61b2c3b00d51c356790070",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 47,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "b12573ed44f9ced804f4b67cb3decdaf950aa118",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 48,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "78663a675e5480881bf74645cd34a4a532cc6251",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 49,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "fccc316b54ab46ccadf00e94252e813ea59aca44",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 50,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "efce3378a7e2e3c4cf7e987049b89c2f90a472e8",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 51,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "94c80e1690072d1f88b21a0252d973fb7ee4beb7",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 52,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "8842e7efc9473b354d140170dbf6381208046b9c",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 53,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "bbeca400bf38dcf4b1a9243a6e026bdf86a1e0b4",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 54,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "d82b9a8fe372666d26021efd1ca9f8509d8d17ac",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 55,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "014dac6cb8f4a13bb0c7411261a386a95a7b693d",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 56,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "8645a12846d5ff41bf134336620a75fa56df87a6",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 57,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "e99e046a926b00dc114ae0372cfa841202d72409",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 58,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "587aeadfd3e6cff1e79ebd7218e7d7eb205039d2",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 59,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "b109b6e5c12a0801f8ee3625f83ce88d338c6bbb",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 60,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "fc9a86e095e968baebdc6f0f3a8c1fe7cc0680a5",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 61,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "fcab48515cfe5b2611fa6240d1f43bb6832734f4",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 62,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "16cb0e2482414d7b0dfce595ae782c437b0113ae",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 63,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "46b706d44f86eab95c68353b4e766afba43d3cf7",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 64,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "bc766ecc3c4300e5898db57ac69aa6daaf41183a",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 65,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "edf48876ce85b3041038d38ea21ca254826383e0",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 66,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "d0b110105dac510d2795c2b0d55f72e574311c5a",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 67,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "243997353c494328938298dd999ea751a85572a8",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 68,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "a00ed23fe8d7e981a4e39159cf2a9cb9d9a473f0",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 69,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "8d4b327f0feab6ee6088a19b44798b129f3dde27",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 70,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "0215562638f2418de7c39d85628f529b455fc46b",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 71,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "f152eebec4ed5168d64c48d34c5e574884c70992",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 72,
|
||||||
|
"is_secret": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Base64 High Entropy String",
|
||||||
|
"filename": "nomad/packer/cloud-config",
|
||||||
|
"hashed_secret": "084f9e7b38bf21a62094d4eff295373125f5d1b8",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 73,
|
||||||
|
"is_secret": false
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"nomad/packer/ubuntu-cloud-init.pkr.hcl": [
|
||||||
|
{
|
||||||
|
"type": "Secret Keyword",
|
||||||
|
"filename": "nomad/packer/ubuntu-cloud-init.pkr.hcl",
|
||||||
|
"hashed_secret": "cbd2e782c0b1331013ac63de0b8d3b6f6a2ab5af",
|
||||||
|
"is_verified": false,
|
||||||
|
"line_number": 27,
|
||||||
"is_secret": false
|
"is_secret": false
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@ -199,16 +557,8 @@
|
|||||||
"is_verified": false,
|
"is_verified": false,
|
||||||
"line_number": 10,
|
"line_number": 10,
|
||||||
"is_secret": false
|
"is_secret": false
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "Secret Keyword",
|
|
||||||
"filename": "nomad/vault_hashi_vault_values.example.yml",
|
|
||||||
"hashed_secret": "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33",
|
|
||||||
"is_verified": false,
|
|
||||||
"line_number": 22,
|
|
||||||
"is_secret": false
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"generated_at": "2022-10-27T21:28:03Z"
|
"generated_at": "2022-07-22T03:15:58Z"
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
provider "registry.terraform.io/hashicorp/consul" {
|
provider "registry.terraform.io/hashicorp/consul" {
|
||||||
version = "2.14.0"
|
version = "2.14.0"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:lJWOdlqevg6FQLFlfM3tGOsy9yPrjm9/vqkfzVrqT/A=",
|
|
||||||
"h1:xRwktNwLL3Vo43F7v73tfcgbcnjCE2KgCzcNrsQJ1cc=",
|
"h1:xRwktNwLL3Vo43F7v73tfcgbcnjCE2KgCzcNrsQJ1cc=",
|
||||||
"zh:06dcca1f76b839af8f86c7b6f65b944003a7a35b30b865b3884f48e2c42f9aee",
|
"zh:06dcca1f76b839af8f86c7b6f65b944003a7a35b30b865b3884f48e2c42f9aee",
|
||||||
"zh:16111df6a485e21cee6ca33cb863434baa1ca360c819c8e2af85e465c1361d2b",
|
"zh:16111df6a485e21cee6ca33cb863434baa1ca360c819c8e2af85e465c1361d2b",
|
||||||
@ -23,7 +22,7 @@ provider "registry.terraform.io/hashicorp/consul" {
|
|||||||
provider "registry.terraform.io/hashicorp/external" {
|
provider "registry.terraform.io/hashicorp/external" {
|
||||||
version = "2.2.2"
|
version = "2.2.2"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:e7RpnZ2PbJEEPnfsg7V0FNwbfSk0/Z3FdrLsXINBmDY=",
|
"h1:BKQ5f5ijzeyBSnUr+j0wUi+bYv6KBQVQNDXNRVEcfJE=",
|
||||||
"zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca",
|
"zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca",
|
||||||
"zh:10cf5c9b9524ca2e4302bf02368dc6aac29fb50aeaa6f7758cce9aa36ae87a28",
|
"zh:10cf5c9b9524ca2e4302bf02368dc6aac29fb50aeaa6f7758cce9aa36ae87a28",
|
||||||
"zh:56a016ee871c8501acb3f2ee3b51592ad7c3871a1757b098838349b17762ba6b",
|
"zh:56a016ee871c8501acb3f2ee3b51592ad7c3871a1757b098838349b17762ba6b",
|
||||||
@ -42,7 +41,6 @@ provider "registry.terraform.io/hashicorp/external" {
|
|||||||
provider "registry.terraform.io/hashicorp/nomad" {
|
provider "registry.terraform.io/hashicorp/nomad" {
|
||||||
version = "1.4.16"
|
version = "1.4.16"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
|
|
||||||
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||||
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||||
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
||||||
@ -62,7 +60,6 @@ provider "registry.terraform.io/hashicorp/vault" {
|
|||||||
version = "3.3.1"
|
version = "3.3.1"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:SOTmxGynxFf1hECFq0/FGujGQZNktePze/4mfdR/iiU=",
|
"h1:SOTmxGynxFf1hECFq0/FGujGQZNktePze/4mfdR/iiU=",
|
||||||
"h1:i7EC2IF0KParI+JPA5ZtXJrAn3bAntW5gEMLvOXwpW4=",
|
|
||||||
"zh:3e1866037f43c1083ff825dce2a9e3853c757bb0121c5ae528ee3cf3f99b4113",
|
"zh:3e1866037f43c1083ff825dce2a9e3853c757bb0121c5ae528ee3cf3f99b4113",
|
||||||
"zh:49636cc5c4939134e098c4ec0163c41fae103f24d7e1e8fc0432f8ad93d596a0",
|
"zh:49636cc5c4939134e098c4ec0163c41fae103f24d7e1e8fc0432f8ad93d596a0",
|
||||||
"zh:5258a7001719c4aeb84f4c4da7115b795da4794754938a3c4176a4b578fe93a1",
|
"zh:5258a7001719c4aeb84f4c4da7115b795da4794754938a3c4176a4b578fe93a1",
|
||||||
|
@ -55,35 +55,18 @@ cluster: ansible-cluster
|
|||||||
|
|
||||||
venv/bin/ansible:
|
venv/bin/ansible:
|
||||||
python3 -m venv venv
|
python3 -m venv venv
|
||||||
./venv/bin/pip install ansible python-consul hvac
|
./venv/bin/pip install ansible
|
||||||
|
./venv/bin/pip install python-consul
|
||||||
.PHONY: galaxy
|
./venv/bin/pip install hvac
|
||||||
galaxy: venv/bin/ansible
|
|
||||||
./venv/bin/ansible-galaxy install -p roles -r roles/requirements.yml
|
|
||||||
./venv/bin/ansible-galaxy collection install -r collections/requirements.yml
|
|
||||||
|
|
||||||
|
|
||||||
.PHONY: ansible-cluster
|
.PHONY: ansible-cluster
|
||||||
ansible-cluster: venv/bin/ansible galaxy
|
ansible-cluster: venv/bin/ansible
|
||||||
|
./venv/bin/ansible-galaxy install -p roles -r roles/requirements.yml
|
||||||
|
./venv/bin/ansible-galaxy collection install -r collections/requirements.yml
|
||||||
env VIRTUAL_ENV=/Users/ifij/workspace/iamthefij/orchestration-tests/nomad/venv ./venv/bin/ansible-playbook -K -vv \
|
env VIRTUAL_ENV=/Users/ifij/workspace/iamthefij/orchestration-tests/nomad/venv ./venv/bin/ansible-playbook -K -vv \
|
||||||
$(shell test -f vault-keys.json && echo '-e "@vault-keys.json"') \
|
$(shell test -f vault-keys.json && echo '-e "@vault-keys.json"') \
|
||||||
-i ansible_hosts.yml -M ./roles ./setup-cluster.yml
|
-i ansible_hosts.yml -M ./roles ./setup-cluster.yml
|
||||||
|
|
||||||
.PHONY: bootstrap-values
|
|
||||||
bootstrap-values: venv/bin/ansible galaxy
|
|
||||||
env VIRTUAL_ENV=/Users/ifij/workspace/iamthefij/orchestration-tests/nomad/venv ./venv/bin/ansible-playbook -vv \
|
|
||||||
$(shell test -f vault-keys.json && echo '-e "@vault-keys.json"') \
|
|
||||||
-i ansible_hosts.yml -M ./roles ./bootstrap-values.yml
|
|
||||||
|
|
||||||
.PHONY: unseal-vault
|
|
||||||
unseal-vault: venv/bin/ansible galaxy
|
|
||||||
env VIRTUAL_ENV=/Users/ifij/workspace/iamthefij/orchestration-tests/nomad/venv ./venv/bin/ansible-playbook -K -vv \
|
|
||||||
-e "@vault-keys.json" -i ansible_hosts.yml -M ./roles ./unseal-vault.yml
|
|
||||||
|
|
||||||
.PHONY: init
|
|
||||||
init:
|
|
||||||
@terraform init
|
|
||||||
|
|
||||||
.PHONY: plan
|
.PHONY: plan
|
||||||
plan:
|
plan:
|
||||||
@terraform plan \
|
@terraform plan \
|
||||||
|
@ -1,59 +1,38 @@
|
|||||||
# This file is maintained automatically by "terraform init".
|
# This file is maintained automatically by "terraform init".
|
||||||
# Manual edits may be lost in future updates.
|
# Manual edits may be lost in future updates.
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/consul" {
|
|
||||||
version = "2.15.1"
|
|
||||||
hashes = [
|
|
||||||
"h1:PexyQBRLDA+SR+sWlzYBZswry5O5h/tTfj87CaECtLc=",
|
|
||||||
"zh:1806830a3cf103e65e772a7d28fd4df2788c29a029fb2def1326bc777ad107ed",
|
|
||||||
"zh:252be544fb4c9daf09cad7d3776daf5fa66b62740d3ea9d6d499a7b1697c3433",
|
|
||||||
"zh:50985fe02a8e5ae47c75d7c28c911b25d7dc4716cff2ed55ca05889ab77a1f73",
|
|
||||||
"zh:54cf0ec90538703c66937c77e8d72a38d5af47437eb0b8b55eb5836c5d288878",
|
|
||||||
"zh:704f536c621337e06fffef6d5f49ac81f52d249f937250527c12884cb83aefed",
|
|
||||||
"zh:896d8ef6d0b555299f124eb25bce8a17d735da14ef21f07582098d301f47da30",
|
|
||||||
"zh:976277a85b0a0baafe267cc494f766448d1da5b6936ddcb3ce393bd4d22f08d2",
|
|
||||||
"zh:c7faa9a2b11bc45833a3e8e340f22f1ecf01597eaeffa7669234b4549d7dfa85",
|
|
||||||
"zh:caf851ef9c8ce482864badf7058f9278d4537112fa236efd8f1a9315801d9061",
|
|
||||||
"zh:db203435d58b0ac842540861b3307a623423275d85754c171773f3b210ae5b24",
|
|
||||||
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
|
|
||||||
"zh:f710a37190429045d109edd35de69db3b5f619919c2fa04c77a3a639fea9fd7d",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/nomad" {
|
provider "registry.terraform.io/hashicorp/nomad" {
|
||||||
version = "1.4.17"
|
version = "1.4.16"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
|
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||||
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
|
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||||
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
|
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
||||||
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
|
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
|
||||||
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
|
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
|
||||||
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
|
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
|
||||||
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
|
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
|
||||||
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
|
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
|
||||||
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
|
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
|
||||||
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
|
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
|
||||||
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
|
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
|
||||||
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/vault" {
|
provider "registry.terraform.io/hashicorp/vault" {
|
||||||
version = "3.7.0"
|
version = "3.4.1"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:idawLPCbZgHIb+NRLJs4YdIcQgACqYiT5VwQfChkn+w=",
|
"h1:oow6cAwKiFpJBBWKsDqNmwZIrFTWWvoeIbqs+vyUDE0=",
|
||||||
"zh:256b82692c560c76ad51414a2c003cadfa10338a9df333dbe22dd14a9ed16f95",
|
"zh:1eb8370a1846e34e2bcc4d11eece5733735784a8eab447bbed3cfd822101b577",
|
||||||
"zh:329ed8135a98bd6a000d014e40bc5981c6868cf50eedf454f1a1f72ac463bdf0",
|
"zh:2df3989327cea68b2167514b7ebddc67b09340f00bbf3fa85df03c97adfb9d25",
|
||||||
"zh:3b32c18b492a6ac8e1ccac40d28cd42a88892ef8f3515291676136e3faac351c",
|
"zh:3dd1e317264f574985e856296deef71a76464918bf0566eb0d7f6389ea0586bd",
|
||||||
"zh:4c5ea8e80543b36b1999257a41c8b9cde852542251de82a94cff2f9d280ac2ec",
|
"zh:9750861f2822482aa608ea5a52b385bc42b2e1f2511094e6a975412618c4495d",
|
||||||
"zh:5d968ed305cde7aa3567a943cb2f5f8def54b40a2292b66027b1405a1cf28585",
|
"zh:9b940e7f78975d29a4d0a116cf43c0bc1cb03bec4ad8d34887d64e6e60bacb9e",
|
||||||
"zh:60226d1a0a496a9a6c1d646800dd7e1bd1c4f5527e7307ff0bca9f4d0b5395e2",
|
"zh:9cb6e7ad2a62529d35dacd20695d49c2f02230cb785d46178cc10f4ec80e5a51",
|
||||||
"zh:71b11def501c994ee5305f24bd47ebfcca2314c5acca3efcdd209373d0068ac0",
|
"zh:a12718689bbcb37bcbb9132c18bffd354fad8ab5c8cb89cec1a0ee85c65b8cb7",
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
"zh:a6e38afacca1af4fab04a9f2dc49b8295eb462db68bdc7451352d0f950f804f8",
|
||||||
"zh:89be6b5db3be473bfd14422a9abf83245c4b22ce47a8fe463bbebf8e20958ab1",
|
"zh:d6e0e994d51b9e07d5713d4796381f9e129e9de962e79caae2b7055f6f68297e",
|
||||||
"zh:8f91051d43ae309bb8f3f6a9659f0fd26b1b239faf671c139b4e9ad0d208db05",
|
"zh:ea4bbef7a1bb2553db473fa304c93845674167b61e8c9677107a96c8c696da12",
|
||||||
"zh:b5114983273d3170878f657b92738b2c40953aedeef2e1840588ecaf1bc0827e",
|
"zh:f985a8b7f4ef7d1eba9cef7d99997ee9c4a54ffe76dab7fa8b1fdec2a9edca7e",
|
||||||
"zh:fd56db01c5444dc8ca2e0ad2f13fc4c17735d0fdeb5960e23176fb3f5a5114d3",
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
6
nomad/acls/acls.tf
Normal file
6
nomad/acls/acls.tf
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
resource "nomad_acl_policy" "create_post_bootstrap_policy" {
|
||||||
|
# count = can(tobool(var.nomad_secret_id)) ? 1 : 0
|
||||||
|
name = "anonymous"
|
||||||
|
description = "Anon RW"
|
||||||
|
rules_hcl = file("${path.module}/nomad-anon-bootstrap.hcl")
|
||||||
|
}
|
@ -1,23 +0,0 @@
|
|||||||
namespace "*" {
|
|
||||||
policy = "read"
|
|
||||||
}
|
|
||||||
|
|
||||||
agent {
|
|
||||||
policy = "read"
|
|
||||||
}
|
|
||||||
|
|
||||||
operator {
|
|
||||||
policy = "read"
|
|
||||||
}
|
|
||||||
|
|
||||||
quota {
|
|
||||||
policy = "read"
|
|
||||||
}
|
|
||||||
|
|
||||||
node {
|
|
||||||
policy = "read"
|
|
||||||
}
|
|
||||||
|
|
||||||
host_volume "*" {
|
|
||||||
policy = "read"
|
|
||||||
}
|
|
@ -1,4 +0,0 @@
|
|||||||
namespace "*" {
|
|
||||||
policy = "read"
|
|
||||||
capabilities = ["submit-job", "dispatch-job", "read-logs"]
|
|
||||||
}
|
|
@ -1,18 +0,0 @@
|
|||||||
resource "nomad_acl_policy" "anon_policy" {
|
|
||||||
name = "anonymous"
|
|
||||||
description = "Anon RO"
|
|
||||||
rules_hcl = file("${path.module}/nomad-anon-policy.hcl")
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nomad_acl_policy" "admin" {
|
|
||||||
name = "admin"
|
|
||||||
description = "Admin RW for admins"
|
|
||||||
rules_hcl = file("${path.module}/nomad-admin-policy.hcl")
|
|
||||||
}
|
|
||||||
|
|
||||||
# TODO: (security) Limit this scope
|
|
||||||
resource "nomad_acl_policy" "deploy" {
|
|
||||||
name = "deploy"
|
|
||||||
description = "Write for job deployments"
|
|
||||||
rules_hcl = file("${path.module}/nomad-deploy-policy.hcl")
|
|
||||||
}
|
|
@ -8,33 +8,27 @@ resource "vault_nomad_secret_backend" "config" {
|
|||||||
backend = "nomad"
|
backend = "nomad"
|
||||||
description = "Nomad ACL"
|
description = "Nomad ACL"
|
||||||
token = nomad_acl_token.vault.secret_id
|
token = nomad_acl_token.vault.secret_id
|
||||||
|
|
||||||
default_lease_ttl_seconds = "3600"
|
|
||||||
max_lease_ttl_seconds = "7200"
|
|
||||||
|
|
||||||
ttl = "3600"
|
|
||||||
max_ttl = "7200"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Vault roles generating Nomad tokens
|
|
||||||
resource "vault_nomad_secret_role" "nomad-deploy" {
|
resource "vault_nomad_secret_role" "nomad-deploy" {
|
||||||
backend = vault_nomad_secret_backend.config.backend
|
backend = vault_nomad_secret_backend.config.backend
|
||||||
role = "nomad-deploy"
|
role = "nomad-deploy"
|
||||||
# Nomad policies
|
policies = ["nomad-deploy"]
|
||||||
policies = ["deploy"]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "vault_nomad_secret_role" "admin-management" {
|
resource "vault_nomad_secret_role" "admin" {
|
||||||
backend = vault_nomad_secret_backend.config.backend
|
backend = vault_nomad_secret_backend.config.backend
|
||||||
role = "admin-management"
|
role = "admin-management"
|
||||||
type = "management"
|
type = "management"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "vault_nomad_secret_role" "admin" {
|
resource "vault_policy" "nomad-deploy" {
|
||||||
backend = vault_nomad_secret_backend.config.backend
|
name = "nomad-deploy"
|
||||||
role = "admin"
|
policy = <<EOH
|
||||||
# Nomad policies
|
path "nomad/creds/nomad-deploy" {
|
||||||
policies = ["admin"]
|
capabilities = ["read"]
|
||||||
|
}
|
||||||
|
EOH
|
||||||
}
|
}
|
||||||
|
|
||||||
# Nomad Vault token access
|
# Nomad Vault token access
|
||||||
@ -46,3 +40,76 @@ resource "vault_token_auth_backend_role" "nomad-cluster" {
|
|||||||
token_period = 259200
|
token_period = 259200
|
||||||
renewable = true
|
renewable = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Policy for clusters
|
||||||
|
resource "vault_policy" "nomad-task" {
|
||||||
|
name = "nomad-task"
|
||||||
|
policy = <<EOH
|
||||||
|
# This section grants all access on "secret/*". Further restrictions can be
|
||||||
|
# applied to this broad policy, as shown below.
|
||||||
|
path "kv/data/*" {
|
||||||
|
capabilities = ["create", "read", "update", "delete", "list"]
|
||||||
|
}
|
||||||
|
EOH
|
||||||
|
}
|
||||||
|
|
||||||
|
# Policy for nomad tokens
|
||||||
|
resource "vault_policy" "nomad-token" {
|
||||||
|
name = "nomad-server"
|
||||||
|
policy = <<EOH
|
||||||
|
# Allow creating tokens under "nomad-cluster" token role. The token role name
|
||||||
|
# should be updated if "nomad-cluster" is not used.
|
||||||
|
path "auth/token/create/nomad-cluster" {
|
||||||
|
capabilities = ["update"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Allow looking up "nomad-cluster" token role. The token role name should be
|
||||||
|
# updated if "nomad-cluster" is not used.
|
||||||
|
path "auth/token/roles/nomad-cluster" {
|
||||||
|
capabilities = ["read"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Allow looking up the token passed to Nomad to validate # the token has the
|
||||||
|
# proper capabilities. This is provided by the "default" policy.
|
||||||
|
path "auth/token/lookup-self" {
|
||||||
|
capabilities = ["read"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Allow looking up incoming tokens to validate they have permissions to access
|
||||||
|
# the tokens they are requesting. This is only required if
|
||||||
|
# `allow_unauthenticated` is set to false.
|
||||||
|
path "auth/token/lookup" {
|
||||||
|
capabilities = ["update"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Allow revoking tokens that should no longer exist. This allows revoking
|
||||||
|
# tokens for dead tasks.
|
||||||
|
path "auth/token/revoke-accessor" {
|
||||||
|
capabilities = ["update"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Allow checking the capabilities of our own token. This is used to validate the
|
||||||
|
# token upon startup.
|
||||||
|
path "sys/capabilities-self" {
|
||||||
|
capabilities = ["update"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Allow our own token to be renewed.
|
||||||
|
path "auth/token/renew-self" {
|
||||||
|
capabilities = ["update"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# This section grants all access on "secret/*". Further restrictions can be
|
||||||
|
# applied to this broad policy, as shown below.
|
||||||
|
path "kv/data/*" {
|
||||||
|
capabilities = ["create", "read", "update", "delete", "list"]
|
||||||
|
}
|
||||||
|
EOH
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create a vault token for Nomad
|
||||||
|
# resource "vault_token" "nomad-token" {
|
||||||
|
# policies = ["nomad-server"]
|
||||||
|
# period = "72h"
|
||||||
|
# no_parent = true
|
||||||
|
# }
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
#
|
#
|
||||||
# mysql {
|
# mysql {
|
||||||
# # How to give access here?
|
# # How to give access here?
|
||||||
# connection_url = "{{username}}:{{password}}@tcp(mysql-server.service.consul:3306)"
|
# connection_url = "{{username}}:{{password}}@tcp(localhost:3306)"
|
||||||
# username = ""
|
# username = ""
|
||||||
# password = ""
|
# password = ""
|
||||||
# }
|
# }
|
||||||
|
@ -1,38 +0,0 @@
|
|||||||
# Configure Consul provider
|
|
||||||
provider "consul" {
|
|
||||||
address = var.consul_address
|
|
||||||
}
|
|
||||||
|
|
||||||
# Get Nomad client from Consul
|
|
||||||
data "consul_service" "nomad" {
|
|
||||||
name = "nomad-client"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Get Vault client from Consul
|
|
||||||
data "consul_service" "vault" {
|
|
||||||
name = "vault"
|
|
||||||
tag = "active"
|
|
||||||
}
|
|
||||||
|
|
||||||
locals {
|
|
||||||
# Get Nomad address from Consul
|
|
||||||
nomad_node = data.consul_service.nomad.service[0]
|
|
||||||
nomad_node_address = "http://${local.nomad_node.node_address}:${local.nomad_node.port}"
|
|
||||||
|
|
||||||
# Get Vault address from Consul
|
|
||||||
vault_node = data.consul_service.vault.service[0]
|
|
||||||
vault_node_address = "http://${local.vault_node.node_address}:${local.vault_node.port}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Configure the Nomad provider
|
|
||||||
provider "nomad" {
|
|
||||||
address = local.nomad_node_address
|
|
||||||
secret_id = var.nomad_secret_id
|
|
||||||
region = "global"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Configure the Vault provider
|
|
||||||
provider "vault" {
|
|
||||||
address = local.vault_node_address
|
|
||||||
token = var.vault_token
|
|
||||||
}
|
|
@ -1,17 +0,0 @@
|
|||||||
variable "consul_address" {
|
|
||||||
type = string
|
|
||||||
default = "http://n1.thefij:8500"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "nomad_secret_id" {
|
|
||||||
type = string
|
|
||||||
description = "Secret ID for ACL bootstrapped Nomad"
|
|
||||||
sensitive = true
|
|
||||||
default = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "vault_token" {
|
|
||||||
type = string
|
|
||||||
sensitive = true
|
|
||||||
default = ""
|
|
||||||
}
|
|
@ -1,8 +0,0 @@
|
|||||||
resource "vault_auth_backend" "userpass" {
|
|
||||||
type = "userpass"
|
|
||||||
|
|
||||||
tune {
|
|
||||||
max_lease_ttl = "1h"
|
|
||||||
listing_visibility = "unauth"
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,83 +0,0 @@
|
|||||||
resource "vault_policy" "admin" {
|
|
||||||
name = "admin"
|
|
||||||
|
|
||||||
policy = <<EOF
|
|
||||||
path "*" {
|
|
||||||
capabilities = ["create", "read", "update", "delete", "list", "sudo"]
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "vault_policy" "nomad-deploy" {
|
|
||||||
name = "nomad-deploy"
|
|
||||||
policy = <<EOH
|
|
||||||
path "nomad/creds/nomad-deploy" {
|
|
||||||
capabilities = ["read"]
|
|
||||||
}
|
|
||||||
EOH
|
|
||||||
}
|
|
||||||
|
|
||||||
# Policy for clusters
|
|
||||||
resource "vault_policy" "nomad-task" {
|
|
||||||
name = "nomad-task"
|
|
||||||
policy = <<EOH
|
|
||||||
path "kv/data/*" {
|
|
||||||
# Does this need create, update, delete?
|
|
||||||
capabilities = ["create", "read", "update", "delete", "list"]
|
|
||||||
}
|
|
||||||
EOH
|
|
||||||
}
|
|
||||||
|
|
||||||
# Policy for nomad tokens
|
|
||||||
resource "vault_policy" "nomad-server" {
|
|
||||||
name = "nomad-server"
|
|
||||||
policy = <<EOH
|
|
||||||
# Allow creating tokens under "nomad-cluster" token role. The token role name
|
|
||||||
# should be updated if "nomad-cluster" is not used.
|
|
||||||
path "auth/token/create/nomad-cluster" {
|
|
||||||
capabilities = ["update"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Allow looking up "nomad-cluster" token role. The token role name should be
|
|
||||||
# updated if "nomad-cluster" is not used.
|
|
||||||
path "auth/token/roles/nomad-cluster" {
|
|
||||||
capabilities = ["read"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Allow looking up the token passed to Nomad to validate # the token has the
|
|
||||||
# proper capabilities. This is provided by the "default" policy.
|
|
||||||
path "auth/token/lookup-self" {
|
|
||||||
capabilities = ["read"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Allow looking up incoming tokens to validate they have permissions to access
|
|
||||||
# the tokens they are requesting. This is only required if
|
|
||||||
# `allow_unauthenticated` is set to false.
|
|
||||||
path "auth/token/lookup" {
|
|
||||||
capabilities = ["update"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Allow revoking tokens that should no longer exist. This allows revoking
|
|
||||||
# tokens for dead tasks.
|
|
||||||
path "auth/token/revoke-accessor" {
|
|
||||||
capabilities = ["update"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Allow checking the capabilities of our own token. This is used to validate the
|
|
||||||
# token upon startup.
|
|
||||||
path "sys/capabilities-self" {
|
|
||||||
capabilities = ["update"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Allow our own token to be renewed.
|
|
||||||
path "auth/token/renew-self" {
|
|
||||||
capabilities = ["update"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# This section grants all access on "secret/*". Further restrictions can be
|
|
||||||
# applied to this broad policy, as shown below.
|
|
||||||
path "kv/data/*" {
|
|
||||||
capabilities = ["create", "read", "update", "delete", "list"]
|
|
||||||
}
|
|
||||||
EOH
|
|
||||||
}
|
|
@ -13,14 +13,7 @@ all:
|
|||||||
group: "bin"
|
group: "bin"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
read_only: false
|
read_only: false
|
||||||
- name: lldap-data
|
|
||||||
path: /srv/volumes/lldap
|
|
||||||
owner: "root"
|
|
||||||
group: "bin"
|
|
||||||
mode: "0755"
|
|
||||||
read_only: false
|
|
||||||
n2.thefij:
|
n2.thefij:
|
||||||
nomad_node_class: ingress
|
|
||||||
nomad_node_role: both
|
nomad_node_role: both
|
||||||
nomad_unique_host_volumes:
|
nomad_unique_host_volumes:
|
||||||
- name: nextcloud-data
|
- name: nextcloud-data
|
||||||
@ -35,25 +28,15 @@ all:
|
|||||||
group: "bin"
|
group: "bin"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
read_only: false
|
read_only: false
|
||||||
- name: sonarr-data
|
- name: authentik-data
|
||||||
path: /srv/volumes/sonarr
|
path: /srv/volumes/gitea
|
||||||
owner: "root"
|
owner: "root"
|
||||||
group: "bin"
|
group: "bin"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
read_only: false
|
read_only: false
|
||||||
- name: nzbget-data
|
n3.thefij:
|
||||||
path: /srv/volumes/nzbget
|
nomad_node_class: ingress
|
||||||
owner: "root"
|
nomad_node_role: both
|
||||||
group: "bin"
|
|
||||||
mode: "0755"
|
|
||||||
read_only: false
|
|
||||||
# n3.thefij:
|
|
||||||
# nomad_node_class: ingress
|
|
||||||
# nomad_node_role: both
|
|
||||||
# pi3:
|
|
||||||
# nomad_node_role: client
|
|
||||||
# pi4:
|
|
||||||
# nomad_node_role: client
|
|
||||||
|
|
||||||
consul_instances:
|
consul_instances:
|
||||||
children:
|
children:
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
provider "registry.terraform.io/hashicorp/nomad" {
|
provider "registry.terraform.io/hashicorp/nomad" {
|
||||||
version = "1.4.16"
|
version = "1.4.16"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
|
|
||||||
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||||
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||||
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
136
nomad/backups/backup.nomad
Normal file
136
nomad/backups/backup.nomad
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
variable "nextcloud_backup" {
|
||||||
|
type = string
|
||||||
|
description = "HCL config for Restic Scheduler jobs"
|
||||||
|
}
|
||||||
|
|
||||||
|
job "backup" {
|
||||||
|
datacenters = ["dc1"]
|
||||||
|
type = "system"
|
||||||
|
|
||||||
|
constraint {
|
||||||
|
attribute = "${node.unique.name}"
|
||||||
|
# Only node with a backup job so far
|
||||||
|
# Remove when backing up all nodes
|
||||||
|
value = "n2"
|
||||||
|
}
|
||||||
|
|
||||||
|
group "backup" {
|
||||||
|
|
||||||
|
network {
|
||||||
|
mode = "bridge"
|
||||||
|
|
||||||
|
port "metrics" {
|
||||||
|
to = 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
volume "all-volumes" {
|
||||||
|
type = "host"
|
||||||
|
read_only = true
|
||||||
|
source = "all-volumes"
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
port = "metrics"
|
||||||
|
|
||||||
|
# Add connect to mysql
|
||||||
|
connect {
|
||||||
|
sidecar_service {
|
||||||
|
proxy {
|
||||||
|
local_service_port = 8080
|
||||||
|
|
||||||
|
upstreams {
|
||||||
|
destination_name = "mysql-server"
|
||||||
|
local_bind_port = 6060
|
||||||
|
}
|
||||||
|
|
||||||
|
config {
|
||||||
|
protocol = "tcp"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sidecar_task {
|
||||||
|
resources {
|
||||||
|
cpu = 50
|
||||||
|
memory = 50
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
meta {
|
||||||
|
metrics_addr = "${NOMAD_ADDR_metrics}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task "backup" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
volume_mount {
|
||||||
|
volume = "all-volumes"
|
||||||
|
destination = "/data"
|
||||||
|
read_only = true
|
||||||
|
}
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "iamthefij/resticscheduler"
|
||||||
|
ports = ["metrics"]
|
||||||
|
args = [
|
||||||
|
"/jobs/node-jobs.hcl",
|
||||||
|
]
|
||||||
|
|
||||||
|
mount {
|
||||||
|
type = "bind"
|
||||||
|
target = "/jobs"
|
||||||
|
source = "jobs"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
vault {
|
||||||
|
policies = [
|
||||||
|
"access-tables",
|
||||||
|
"nomad-task",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
env = {
|
||||||
|
"MYSQL_HOST" = "${NOMAD_UPSTREAM_IP_mysql_server}"
|
||||||
|
"MYSQL_PORT" = "${NOMAD_UPSTREAM_PORT_mysql_server}"
|
||||||
|
}
|
||||||
|
|
||||||
|
template {
|
||||||
|
# Probably want to use database credentials that have access to dump all tables
|
||||||
|
data = <<EOF
|
||||||
|
{{ with secret "kv/data/nextcloud" }}
|
||||||
|
MYSQL_DATABASE={{ .Data.data.db_name }}
|
||||||
|
MYSQL_USER={{ .Data.data.db_user }}
|
||||||
|
MYSQL_PASSWORD={{ .Data.data.db_pass }}
|
||||||
|
{{ end }}
|
||||||
|
{{ with secret "kv/data/backups" }}
|
||||||
|
BACKUP_PASSPHRASE={{ .Data.data.backup_passphrase }}
|
||||||
|
{{ end }}
|
||||||
|
EOF
|
||||||
|
destination = "secrets/db.env"
|
||||||
|
env = true
|
||||||
|
}
|
||||||
|
|
||||||
|
template {
|
||||||
|
# Build jobs based on node
|
||||||
|
data = <<EOF
|
||||||
|
# Current node is {{ env "node.unique.name" }}
|
||||||
|
{{ range service "nextcloud" }}
|
||||||
|
# Nextcloud .Node {{ .Node }}
|
||||||
|
{{ if eq .Node (env "node.unique.name") }}
|
||||||
|
${var.nextcloud_backup}
|
||||||
|
{{ end }}{{ end }}
|
||||||
|
EOF
|
||||||
|
destination = "jobs/node-jobs.hcl"
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 50
|
||||||
|
memory = 256
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
25
nomad/backups/backups.tf
Normal file
25
nomad/backups/backups.tf
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
locals {
|
||||||
|
nextcloud_backup = file("${path.module}/jobs/nextcloud.hcl")
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nomad_job" "backups" {
|
||||||
|
hcl2 {
|
||||||
|
enabled = true
|
||||||
|
vars = {
|
||||||
|
"nextcloud_backup" = "${local.nextcloud_backup}",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
jobspec = file("${path.module}/backup.nomad")
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nomad_job" "backups-oneoff" {
|
||||||
|
hcl2 {
|
||||||
|
enabled = true
|
||||||
|
vars = {
|
||||||
|
"nextcloud_backup" = "${local.nextcloud_backup}",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
jobspec = file("${path.module}/oneoff.nomad")
|
||||||
|
}
|
@ -1,11 +1,23 @@
|
|||||||
job "Consul" {
|
job "Consul" {
|
||||||
schedule = "0 * * * *"
|
schedule = "0 0 * * *"
|
||||||
|
|
||||||
config {
|
config {
|
||||||
repo = "rclone::ftp,env_auth:/nomad/consul"
|
# TODO: Backup to a meaningful location, this is just for testing
|
||||||
|
repo = "/local/repo"
|
||||||
|
# Read from secret file
|
||||||
passphrase = env("BACKUP_PASSPHRASE")
|
passphrase = env("BACKUP_PASSPHRASE")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Remove when using a proper backup destination
|
||||||
|
task "Create dir for repo" {
|
||||||
|
pre_script {
|
||||||
|
on_backup = "echo 'Backing up something'"
|
||||||
|
}
|
||||||
|
pre_script {
|
||||||
|
on_backup = "mkdir -p /local/repo"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
task "Use consul snapshots" {
|
task "Use consul snapshots" {
|
||||||
pre_script {
|
pre_script {
|
||||||
on_backup = "mkdir -p /local/consul"
|
on_backup = "mkdir -p /local/consul"
|
@ -1,11 +1,23 @@
|
|||||||
job "Nextcloud" {
|
job "Nextcloud" {
|
||||||
schedule = "0 * * * *"
|
schedule = "* * * * *"
|
||||||
|
|
||||||
config {
|
config {
|
||||||
repo = "rclone::ftp,env_auth:/nomad/nextcloud"
|
# TODO: Backup to a meaningful location, this is just for testing
|
||||||
|
repo = "/local/repo"
|
||||||
|
# Read from secret file
|
||||||
passphrase = env("BACKUP_PASSPHRASE")
|
passphrase = env("BACKUP_PASSPHRASE")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Remove when using a proper backup destination
|
||||||
|
task "Create dir for repo" {
|
||||||
|
pre_script {
|
||||||
|
on_backup = "echo 'Backing up something'"
|
||||||
|
}
|
||||||
|
pre_script {
|
||||||
|
on_backup = "mkdir -p /local/repo"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
mysql "Backup database" {
|
mysql "Backup database" {
|
||||||
hostname = env("MYSQL_HOST")
|
hostname = env("MYSQL_HOST")
|
||||||
port = env("MYSQL_PORT")
|
port = env("MYSQL_PORT")
|
131
nomad/backups/oneoff.nomad
Normal file
131
nomad/backups/oneoff.nomad
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
variable "nextcloud_backup" {
|
||||||
|
type = string
|
||||||
|
description = "HCL config for Restic Scheduler jobs"
|
||||||
|
}
|
||||||
|
|
||||||
|
job "backup-oneoff" {
|
||||||
|
datacenters = ["dc1"]
|
||||||
|
type = "batch"
|
||||||
|
|
||||||
|
parameterized {
|
||||||
|
meta_required = ["job_name"]
|
||||||
|
meta_optional = ["task", "snapshot"]
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
meta {
|
||||||
|
task = "backup"
|
||||||
|
snapshot = "latest"
|
||||||
|
}
|
||||||
|
|
||||||
|
group "nextcloud" {
|
||||||
|
count = 1
|
||||||
|
|
||||||
|
network {
|
||||||
|
mode = "bridge"
|
||||||
|
}
|
||||||
|
|
||||||
|
volume "nextcloud-data" {
|
||||||
|
type = "host"
|
||||||
|
read_only = true
|
||||||
|
source = "nextcloud-data"
|
||||||
|
}
|
||||||
|
|
||||||
|
volume "gitea-data" {
|
||||||
|
type = "host"
|
||||||
|
read_only = true
|
||||||
|
source = "gitea-data"
|
||||||
|
}
|
||||||
|
|
||||||
|
volume "authentik-data" {
|
||||||
|
type = "host"
|
||||||
|
read_only = true
|
||||||
|
source = "authentik-data"
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
connect {
|
||||||
|
sidecar_service {
|
||||||
|
proxy {
|
||||||
|
upstreams {
|
||||||
|
destination_name = "mysql-server"
|
||||||
|
local_bind_port = 6060
|
||||||
|
}
|
||||||
|
|
||||||
|
config {
|
||||||
|
protocol = "tcp"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sidecar_task {
|
||||||
|
resources {
|
||||||
|
cpu = 50
|
||||||
|
memory = 50
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task "backup" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
volume_mount {
|
||||||
|
volume = "nextcloud-data"
|
||||||
|
destination = "/data/nextcloud"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_mount {
|
||||||
|
volume = "gitea-data"
|
||||||
|
destination = "/data/gitea"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_mount {
|
||||||
|
volume = "authentik-data"
|
||||||
|
destination = "/data/authentik"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "iamthefij/resticscheduler"
|
||||||
|
ports = ["backup"]
|
||||||
|
args = [
|
||||||
|
"-once",
|
||||||
|
"-${NOMAD_META_task}",
|
||||||
|
"${NOMAD_META_job_name}",
|
||||||
|
"/jobs/nextcloud.hcl",
|
||||||
|
]
|
||||||
|
|
||||||
|
mount {
|
||||||
|
type = "bind"
|
||||||
|
target = "/jobs"
|
||||||
|
source = "jobs"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
env = {
|
||||||
|
"MYSQL_HOST" = "${NOMAD_UPSTREAM_IP_mysql_server}"
|
||||||
|
"MYSQL_PORT" = "${NOMAD_UPSTREAM_PORT_mysql_server}"
|
||||||
|
# TODO: Add user with access to all databases or variables for each user
|
||||||
|
"MYSQL_DATABASE" = "nextcloud"
|
||||||
|
"MYSQL_USER" = "nextcloud"
|
||||||
|
"MYSQL_PASSWORD" = "nextcloud"
|
||||||
|
|
||||||
|
# TODO: Something from vault
|
||||||
|
"BACKUP_PASSPHRASE" = "secretpass"
|
||||||
|
}
|
||||||
|
|
||||||
|
template {
|
||||||
|
data = var.nextcloud_backup
|
||||||
|
destination = "jobs/nextcloud.hcl"
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 50
|
||||||
|
memory = 256
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -4,7 +4,6 @@
|
|||||||
provider "registry.terraform.io/hashicorp/nomad" {
|
provider "registry.terraform.io/hashicorp/nomad" {
|
||||||
version = "1.4.16"
|
version = "1.4.16"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
|
|
||||||
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||||
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||||
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
@ -23,7 +23,6 @@ job "blocky" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
port "api" {
|
port "api" {
|
||||||
host_network = "loopback"
|
|
||||||
to = "4000"
|
to = "4000"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -69,8 +68,7 @@ job "blocky" {
|
|||||||
sidecar_task {
|
sidecar_task {
|
||||||
resources {
|
resources {
|
||||||
cpu = 50
|
cpu = 50
|
||||||
memory = 20
|
memory = 50
|
||||||
memory_max = 50
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -85,7 +83,7 @@ job "blocky" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
task "blocky" {
|
task "main" {
|
||||||
driver = "docker"
|
driver = "docker"
|
||||||
|
|
||||||
config {
|
config {
|
||||||
@ -101,14 +99,12 @@ job "blocky" {
|
|||||||
|
|
||||||
resources {
|
resources {
|
||||||
cpu = 50
|
cpu = 50
|
||||||
memory = 50
|
memory = 100
|
||||||
memory_max = 100
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template {
|
template {
|
||||||
data = var.config_data
|
data = var.config_data
|
||||||
destination = "app/config.yml"
|
destination = "app/config.yml"
|
||||||
splay = "1m"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -17,15 +17,6 @@ upstream:
|
|||||||
- https://dns10.quad9.net/dns-query
|
- https://dns10.quad9.net/dns-query
|
||||||
- tcp-tls:dns10.quad9.net
|
- tcp-tls:dns10.quad9.net
|
||||||
|
|
||||||
conditional:
|
|
||||||
mapping:
|
|
||||||
home.arpa: 192.168.2.1
|
|
||||||
in-addr.arpa: 192.168.2.1
|
|
||||||
iot: 192.168.2.1
|
|
||||||
local: 192.168.2.1
|
|
||||||
thefij: 192.168.2.1
|
|
||||||
.: 192.168.2.1
|
|
||||||
|
|
||||||
blocking:
|
blocking:
|
||||||
blackLists:
|
blackLists:
|
||||||
ads:
|
ads:
|
@ -1,80 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Bootstrap Consul values
|
|
||||||
hosts: consul_instances
|
|
||||||
gather_facts: false
|
|
||||||
|
|
||||||
vars_files:
|
|
||||||
- consul_values.yml
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Add values
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
block:
|
|
||||||
- name: Install python-consul
|
|
||||||
pip:
|
|
||||||
name: python-consul
|
|
||||||
extra_args: --index-url https://pypi.org/simple
|
|
||||||
|
|
||||||
- name: Write values
|
|
||||||
consul_kv:
|
|
||||||
host: "{{ inventory_hostname }}"
|
|
||||||
key: "{{ item.key }}"
|
|
||||||
value: "{{ item.value }}"
|
|
||||||
loop: "{{ consul_values | default({}) | dict2items }}"
|
|
||||||
|
|
||||||
- name: Bootstrap value values
|
|
||||||
hosts: vault_instances
|
|
||||||
gather_facts: false
|
|
||||||
|
|
||||||
vars_files:
|
|
||||||
- ./vault_hashi_vault_values.yml
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Bootstrap Vault secrets
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
block:
|
|
||||||
- name: Install hvac
|
|
||||||
pip:
|
|
||||||
name: hvac
|
|
||||||
extra_args: --index-url https://pypi.org/simple
|
|
||||||
|
|
||||||
- name: Check mount
|
|
||||||
community.hashi_vault.vault_read:
|
|
||||||
url: "http://{{ inventory_hostname }}:8200"
|
|
||||||
token: "{{ root_token }}"
|
|
||||||
path: "/sys/mounts/kv"
|
|
||||||
ignore_errors: true
|
|
||||||
register: check_mount
|
|
||||||
|
|
||||||
- name: Create kv mount
|
|
||||||
community.hashi_vault.vault_write:
|
|
||||||
url: "http://{{ inventory_hostname }}:8200"
|
|
||||||
token: "{{ root_token }}"
|
|
||||||
path: "/sys/mounts/kv"
|
|
||||||
data:
|
|
||||||
type: kv-v2
|
|
||||||
when: check_mount is not succeeded
|
|
||||||
|
|
||||||
- name: Write values
|
|
||||||
no_log: true
|
|
||||||
community.hashi_vault.vault_write:
|
|
||||||
url: "http://{{ inventory_hostname }}:8200"
|
|
||||||
token: "{{ root_token }}"
|
|
||||||
path: "kv/data/{{ item.key }}"
|
|
||||||
data:
|
|
||||||
data:
|
|
||||||
"{{ item.value }}"
|
|
||||||
loop: "{{ hashi_vault_values | default({}) | dict2items }}"
|
|
||||||
retries: 2
|
|
||||||
delay: 10
|
|
||||||
|
|
||||||
- name: Write userpass
|
|
||||||
no_log: true
|
|
||||||
community.hashi_vault.vault_write:
|
|
||||||
url: "http://{{ inventory_hostname }}:8200"
|
|
||||||
token: "{{ root_token }}"
|
|
||||||
path: "auth/userpass/users/{{ item.name }}"
|
|
||||||
data: '{"password": "{{ item.password }}", "policies": "{{ item.policies }}"}'
|
|
||||||
loop: "{{ vault_userpass }}"
|
|
@ -1,64 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Delete Consul data
|
|
||||||
hosts: consul_instances
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Stop consul
|
|
||||||
systemd:
|
|
||||||
name: consul
|
|
||||||
state: stopped
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Stop vault
|
|
||||||
systemd:
|
|
||||||
name: consul
|
|
||||||
state: stopped
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Remove data dir
|
|
||||||
file:
|
|
||||||
path: /opt/consul
|
|
||||||
state: absent
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Delete Nomad data
|
|
||||||
hosts: nomad_instances
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Stop nomad
|
|
||||||
systemd:
|
|
||||||
name: nomad
|
|
||||||
state: stopped
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Kill nomad
|
|
||||||
shell:
|
|
||||||
cmd: systemctl kill nomad
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Stop all containers
|
|
||||||
shell:
|
|
||||||
cmd: docker ps -a | awk '/^[0-9abcdef]/{print $1}' | xargs -r docker stop
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Remove all containers
|
|
||||||
shell:
|
|
||||||
cmd: docker ps -a | awk '/^[0-9abcdef]/{print $1}' | xargs -r docker rm
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Unmount secrets
|
|
||||||
shell:
|
|
||||||
cmd: mount | awk '/nomad/ {print $3}' | xargs -n1 -r umount
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Remove data dir
|
|
||||||
file:
|
|
||||||
path: /var/nomad
|
|
||||||
state: absent
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Remove data dir
|
|
||||||
file:
|
|
||||||
path: /opt/nomad/data
|
|
||||||
state: absent
|
|
||||||
become: true
|
|
@ -2,11 +2,49 @@ module "databases" {
|
|||||||
source = "./databases"
|
source = "./databases"
|
||||||
}
|
}
|
||||||
|
|
||||||
module "core" {
|
module "blocky" {
|
||||||
source = "./core"
|
source = "./blocky"
|
||||||
|
|
||||||
base_hostname = var.base_hostname
|
base_hostname = var.base_hostname
|
||||||
|
|
||||||
# Metrics and Blocky depend on databases
|
|
||||||
depends_on = [module.databases]
|
depends_on = [module.databases]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module "traefik" {
|
||||||
|
source = "./traefik"
|
||||||
|
|
||||||
|
consul_address = var.consul_address
|
||||||
|
base_hostname = var.base_hostname
|
||||||
|
}
|
||||||
|
|
||||||
|
module "metrics" {
|
||||||
|
source = "./metrics"
|
||||||
|
|
||||||
|
consul_address = var.consul_address
|
||||||
|
}
|
||||||
|
|
||||||
|
module "loki" {
|
||||||
|
source = "./levant"
|
||||||
|
|
||||||
|
template_path = "service.nomad"
|
||||||
|
variables = {
|
||||||
|
name = "loki"
|
||||||
|
image = "grafana/loki:2.2.1"
|
||||||
|
service_port = 3100
|
||||||
|
ingress = true
|
||||||
|
sticky_disk = true
|
||||||
|
templates = jsonencode([
|
||||||
|
{
|
||||||
|
data = file("./loki-config.yml")
|
||||||
|
dest = "/etc/loki/local-config.yaml"
|
||||||
|
}
|
||||||
|
])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nomad_job" "syslog-ng" {
|
||||||
|
hcl2 {
|
||||||
|
enabled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
jobspec = file("${path.module}/syslogng.nomad")
|
||||||
|
}
|
||||||
|
@ -1,59 +0,0 @@
|
|||||||
# This file is maintained automatically by "terraform init".
|
|
||||||
# Manual edits may be lost in future updates.
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/consul" {
|
|
||||||
version = "2.16.2"
|
|
||||||
hashes = [
|
|
||||||
"h1:epldE7sZPBTQHnWEA4WlNJIOVT1UEX+/02SMg5nniaE=",
|
|
||||||
"zh:0a2e11ca2ba650954951a087a1daec95eee2f3000456b295409a9880c4a10b1a",
|
|
||||||
"zh:34f6bda06a0d1c213fa8d87d4313687681e67bc8c40c4cbaa7dbe59ce24a4f7e",
|
|
||||||
"zh:5b85cf93db11ee890f720c317a38158927071feb634855786a0c0cd65825a43c",
|
|
||||||
"zh:75ef915f3d087e6045751a66fbb7066a852a0944ec8c97200d1134dd84df7ffc",
|
|
||||||
"zh:8a4a95697bd91ad51a581c12fe50ac61a114afba27895d027f77ac4154a7ea15",
|
|
||||||
"zh:973d538c8d72793861a1ac9718249a9493f417a2b5096846367560054fd843b9",
|
|
||||||
"zh:9feb2bdc06fdc2d8370cc9aad9a0c69e7e5ae38aac43f315c3f57507c57be030",
|
|
||||||
"zh:c5709672d0afecbbe298bf519741ebcb9d04f02a73b5ee0c186dfa241aa5a524",
|
|
||||||
"zh:c65c60570de6da7190e1e7762577655a463caeb59bc5d38e33034821ed0cbcb9",
|
|
||||||
"zh:c958d6282650fc472aade61d5df4300936033f43cfb898293ef86aceccdfdf1d",
|
|
||||||
"zh:cdd3632c81e1d11d3becd193aaa061688840f39147950c45c4301d042743ae6a",
|
|
||||||
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/external" {
|
|
||||||
version = "2.2.2"
|
|
||||||
hashes = [
|
|
||||||
"h1:e7RpnZ2PbJEEPnfsg7V0FNwbfSk0/Z3FdrLsXINBmDY=",
|
|
||||||
"zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca",
|
|
||||||
"zh:10cf5c9b9524ca2e4302bf02368dc6aac29fb50aeaa6f7758cce9aa36ae87a28",
|
|
||||||
"zh:56a016ee871c8501acb3f2ee3b51592ad7c3871a1757b098838349b17762ba6b",
|
|
||||||
"zh:719d6ef39c50e4cffc67aa67d74d195adaf42afcf62beab132dafdb500347d39",
|
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
|
||||||
"zh:7fbfc4d37435ac2f717b0316f872f558f608596b389b895fcb549f118462d327",
|
|
||||||
"zh:8ac71408204db606ce63fe8f9aeaf1ddc7751d57d586ec421e62d440c402e955",
|
|
||||||
"zh:a4cacdb06f114454b6ed0033add28006afa3f65a0ea7a43befe45fc82e6809fb",
|
|
||||||
"zh:bb5ce3132b52ae32b6cc005bc9f7627b95259b9ffe556de4dad60d47d47f21f0",
|
|
||||||
"zh:bb60d2976f125ffd232a7ccb4b3f81e7109578b23c9c6179f13a11d125dca82a",
|
|
||||||
"zh:f9540ecd2e056d6e71b9ea5f5a5cf8f63dd5c25394b9db831083a9d4ea99b372",
|
|
||||||
"zh:ffd998b55b8a64d4335a090b6956b4bf8855b290f7554dd38db3302de9c41809",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/nomad" {
|
|
||||||
version = "1.4.19"
|
|
||||||
hashes = [
|
|
||||||
"h1:EdBny2gaLr/IE+l+6csyCKeIGFMYZ/4tHKpcbS7ArgE=",
|
|
||||||
"zh:2f3ceeb3318a6304026035b0ac9ee3e52df04913bb9ee78827e58c5398b41254",
|
|
||||||
"zh:3fbe76c7d957d20dfe3c8c0528b33084651f22a95be9e0452b658e0922916e2a",
|
|
||||||
"zh:595671a05828cfe6c42ef73aac894ac39f81a52cc662a76f37eb74ebe04ddf75",
|
|
||||||
"zh:5d76e8788d2af3e60daf8076babf763ec887480bbb9734baccccd8fcddf4f03e",
|
|
||||||
"zh:676985afeaca6e67b22d60d43fd0ed7055763029ffebc3026089fe2fd3b4a288",
|
|
||||||
"zh:69152ce6164ac999a640cff962ece45208270e1ac37c10dac484eeea5cf47275",
|
|
||||||
"zh:6da0b15c05b81f947ec8e139bd81eeeb05c0d36eb5a967b985d0625c60998b40",
|
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
|
||||||
"zh:822c0a3bbada5e38099a379db8b2e339526843699627c3be3664cc3b3752bab7",
|
|
||||||
"zh:af23af2f98a84695b25c8eba7028a81ad4aad63c44aefb79e01bbe2dc82e7f78",
|
|
||||||
"zh:e36cac9960b7506d92925b667254322520966b9c3feb3ca6102e57a1fb9b1761",
|
|
||||||
"zh:ffd1e096c1cc35de879c740a91918e9f06b627818a3cb4b1d87b829b54a6985f",
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,55 +0,0 @@
|
|||||||
job "ddclient" {
|
|
||||||
datacenters = ["dc1"]
|
|
||||||
type = "service"
|
|
||||||
|
|
||||||
group "ddclient" {
|
|
||||||
|
|
||||||
task "ddclient" {
|
|
||||||
driver = "docker"
|
|
||||||
config {
|
|
||||||
image = "linuxserver/ddclient:3.9.1"
|
|
||||||
|
|
||||||
mount {
|
|
||||||
type = "bind"
|
|
||||||
source = "secrets/ddclient.conf"
|
|
||||||
target = "/config/ddclient.conf"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
vault {
|
|
||||||
policies = [
|
|
||||||
"access-tables",
|
|
||||||
"nomad-task",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
daemon=900
|
|
||||||
ssl=yes
|
|
||||||
use=web
|
|
||||||
|
|
||||||
protocol=cloudflare,
|
|
||||||
zone={{ key "ddclient/zone" }},
|
|
||||||
ttl=1,
|
|
||||||
{{ with secret "kv/data/cloudflare" -}}
|
|
||||||
login={{ .Data.data.api_user }},
|
|
||||||
password={{ .Data.data.api_key }}
|
|
||||||
# login=token,
|
|
||||||
# password={{ .Data.data.api_token_dns_edit_all }}
|
|
||||||
{{ end -}}
|
|
||||||
|
|
||||||
{{ key "ddclient/domain" }}
|
|
||||||
EOH
|
|
||||||
destination = "secrets/ddclient.conf"
|
|
||||||
change_mode = "restart"
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 50
|
|
||||||
memory_max = 100
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,140 +0,0 @@
|
|||||||
job "lldap" {
|
|
||||||
datacenters = ["dc1"]
|
|
||||||
type = "service"
|
|
||||||
|
|
||||||
group "lldap" {
|
|
||||||
|
|
||||||
network {
|
|
||||||
mode = "bridge"
|
|
||||||
|
|
||||||
port "web" {
|
|
||||||
host_network = "loopback"
|
|
||||||
to = 17170
|
|
||||||
}
|
|
||||||
|
|
||||||
port "ldap" {
|
|
||||||
host_network = "loopback"
|
|
||||||
to = 3890
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
volume "lldap-data" {
|
|
||||||
type = "host"
|
|
||||||
read_only = false
|
|
||||||
source = "lldap-data"
|
|
||||||
}
|
|
||||||
|
|
||||||
service {
|
|
||||||
name = "lldap"
|
|
||||||
port = "ldap"
|
|
||||||
|
|
||||||
connect {
|
|
||||||
sidecar_service {
|
|
||||||
proxy {
|
|
||||||
local_service_port = 3890
|
|
||||||
|
|
||||||
config {
|
|
||||||
protocol = "tcp"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sidecar_task {
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 20
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
service {
|
|
||||||
name = "ldap-admin"
|
|
||||||
port = "web"
|
|
||||||
|
|
||||||
connect {
|
|
||||||
sidecar_service {
|
|
||||||
proxy {
|
|
||||||
local_service_port = 17170
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sidecar_task {
|
|
||||||
resources {
|
|
||||||
cpu = 20
|
|
||||||
memory = 20
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.ldap-admin.entryPoints=websecure",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
task "lldap" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
volume_mount {
|
|
||||||
volume = "lldap-data"
|
|
||||||
destination = "/data"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "nitnelave/lldap"
|
|
||||||
ports = ["ldap", "web"]
|
|
||||||
args = ["run", "--config-file", "/lldap_config.toml"]
|
|
||||||
|
|
||||||
mount {
|
|
||||||
type = "bind"
|
|
||||||
source = "secrets/lldap_config.toml"
|
|
||||||
target = "/lldap_config.toml"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
vault {
|
|
||||||
policies = [
|
|
||||||
"access-tables",
|
|
||||||
"nomad-task",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
database_url = "sqlite:///data/users.db?mode=rwc"
|
|
||||||
key_file = "/data/private_key"
|
|
||||||
ldap_base_dn = "{{ keyOrDefault "global/ldap/base_dn" "dc=example,dc=com" }}"
|
|
||||||
{{ with secret "kv/data/lldap" -}}
|
|
||||||
jwt_secret = "{{ .Data.data.jwt_secret }}"
|
|
||||||
ldap_user_dn = "{{ .Data.data.admin_user }}"
|
|
||||||
ldap_user_email = "{{ .Data.data.admin_email }}"
|
|
||||||
ldap_user_pass = "{{ .Data.data.admin_password }}"
|
|
||||||
{{ end -}}
|
|
||||||
{{ with secret "kv/data/smtp" -}}
|
|
||||||
[smtp_options]
|
|
||||||
enable_password_reset = true
|
|
||||||
server = "{{ .Data.data.server }}"
|
|
||||||
port = {{ .Data.data.port }}
|
|
||||||
tls_required = {{ .Data.data.tls }}
|
|
||||||
user = "{{ .Data.data.user }}"
|
|
||||||
password = "{{ .Data.data.password }}"
|
|
||||||
{{ with secret "kv/data/lldap" -}}
|
|
||||||
from = "{{ .Data.data.smtp_from }}"
|
|
||||||
reply_to = "{{ .Data.data.smtp_reply_to }}"
|
|
||||||
{{ end -}}
|
|
||||||
{{ end -}}
|
|
||||||
EOH
|
|
||||||
destination = "secrets/lldap_config.toml"
|
|
||||||
change_mode = "restart"
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
cpu = 10
|
|
||||||
memory = 20
|
|
||||||
memory_max = 100
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,134 +0,0 @@
|
|||||||
|
|
||||||
module "blocky" {
|
|
||||||
source = "./blocky"
|
|
||||||
|
|
||||||
base_hostname = var.base_hostname
|
|
||||||
# Not in this module
|
|
||||||
# depends_on = [module.databases]
|
|
||||||
}
|
|
||||||
|
|
||||||
module "traefik" {
|
|
||||||
source = "./traefik"
|
|
||||||
|
|
||||||
base_hostname = var.base_hostname
|
|
||||||
}
|
|
||||||
|
|
||||||
module "nomad_login" {
|
|
||||||
source = "../levant"
|
|
||||||
|
|
||||||
template_path = "service.nomad"
|
|
||||||
variables = {
|
|
||||||
name = "nomad-login"
|
|
||||||
image = "iamthefij/nomad-vault-login"
|
|
||||||
service_port = 5000
|
|
||||||
ingress = true
|
|
||||||
ingress_rule = "Host(`nomad.thefij.rocks`) && PathPrefix(`/login`)"
|
|
||||||
env = jsonencode({
|
|
||||||
VAULT_ADDR = "http://$${attr.unique.network.ip-address}:8200",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module "metrics" {
|
|
||||||
source = "./metrics"
|
|
||||||
# Not in this module
|
|
||||||
# depends_on = [module.databases]
|
|
||||||
}
|
|
||||||
|
|
||||||
module "loki" {
|
|
||||||
source = "../levant"
|
|
||||||
|
|
||||||
template_path = "service.nomad"
|
|
||||||
variables = {
|
|
||||||
name = "loki"
|
|
||||||
image = "grafana/loki:2.2.1"
|
|
||||||
service_port = 3100
|
|
||||||
ingress = true
|
|
||||||
sticky_disk = true
|
|
||||||
healthcheck = "/ready"
|
|
||||||
templates = jsonencode([
|
|
||||||
{
|
|
||||||
data = file("${path.module}/loki-config.yml")
|
|
||||||
dest = "/etc/loki/local-config.yaml"
|
|
||||||
}
|
|
||||||
])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "consul_config_entry" "loki_intent" {
|
|
||||||
name = "loki"
|
|
||||||
kind = "service-intentions"
|
|
||||||
|
|
||||||
config_json = jsonencode({
|
|
||||||
Sources = [
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "grafana"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "promtail"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "syslogng-promtail"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nomad_job" "syslog-ng" {
|
|
||||||
jobspec = file("${path.module}/syslogng.nomad")
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nomad_job" "ddclient" {
|
|
||||||
jobspec = file("${path.module}/ddclient.nomad")
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nomad_job" "lldap" {
|
|
||||||
jobspec = file("${path.module}/lldap.nomad")
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "consul_config_entry" "syslogng_promtail_intent" {
|
|
||||||
name = "syslogng-promtail"
|
|
||||||
kind = "service-intentions"
|
|
||||||
|
|
||||||
config_json = jsonencode({
|
|
||||||
Sources = [
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "syslogng"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "consul_config_entry" "global_access" {
|
|
||||||
name = "*"
|
|
||||||
kind = "service-intentions"
|
|
||||||
|
|
||||||
config_json = jsonencode({
|
|
||||||
Sources = [
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "traefik"
|
|
||||||
Precedence = 6
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action = "deny"
|
|
||||||
Name = "*"
|
|
||||||
Precedence = 5
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
})
|
|
||||||
}
|
|
@ -1,150 +0,0 @@
|
|||||||
job "metrics" {
|
|
||||||
datacenters = ["dc1"]
|
|
||||||
type = "system"
|
|
||||||
|
|
||||||
group "promtail" {
|
|
||||||
|
|
||||||
network {
|
|
||||||
mode = "bridge"
|
|
||||||
|
|
||||||
port "promtail" {
|
|
||||||
to = 9080
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
service {
|
|
||||||
name = "promtail"
|
|
||||||
port = "promtail"
|
|
||||||
|
|
||||||
meta {
|
|
||||||
metrics_addr = "${NOMAD_ADDR_promtail}"
|
|
||||||
nomad_dc = "${NOMAD_DC}"
|
|
||||||
nomad_node_name = "${node.unique.name}"
|
|
||||||
}
|
|
||||||
|
|
||||||
connect {
|
|
||||||
sidecar_service {
|
|
||||||
proxy {
|
|
||||||
local_service_port = 9080
|
|
||||||
|
|
||||||
upstreams {
|
|
||||||
destination_name = "loki"
|
|
||||||
local_bind_port = 1000
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sidecar_task {
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 20
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
check {
|
|
||||||
type = "http"
|
|
||||||
path = "/metrics"
|
|
||||||
port = "promtail"
|
|
||||||
interval = "10s"
|
|
||||||
timeout = "10s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task "promtail" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "grafana/promtail:2.2.1"
|
|
||||||
args = ["-config.file=/etc/promtail/promtail.yml"]
|
|
||||||
ports = ["promtail"]
|
|
||||||
|
|
||||||
# Mount config
|
|
||||||
mount {
|
|
||||||
type = "bind"
|
|
||||||
target = "/etc/promtail/promtail.yml"
|
|
||||||
source = "local/promtail.yml"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Bind mount host machine-id and log directories
|
|
||||||
|
|
||||||
mount {
|
|
||||||
type = "bind"
|
|
||||||
source = "/etc/machine-id"
|
|
||||||
target = "/etc/machine-id"
|
|
||||||
readonly = true
|
|
||||||
}
|
|
||||||
|
|
||||||
mount {
|
|
||||||
type = "bind"
|
|
||||||
source = "/var/log/journal/"
|
|
||||||
target = "/var/log/journal/"
|
|
||||||
readonly = true
|
|
||||||
}
|
|
||||||
|
|
||||||
mount {
|
|
||||||
type = "bind"
|
|
||||||
source = "/run/log/journal/"
|
|
||||||
target = "/run/log/journal/"
|
|
||||||
readonly = true
|
|
||||||
}
|
|
||||||
|
|
||||||
# mount {
|
|
||||||
# type = "bind"
|
|
||||||
# source = "/var/log/audit"
|
|
||||||
# target = "/var/log/audit"
|
|
||||||
# readonly = true
|
|
||||||
# }
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOF
|
|
||||||
---
|
|
||||||
server:
|
|
||||||
http_listen_address: 0.0.0.0
|
|
||||||
http_listen_port: 9080
|
|
||||||
|
|
||||||
clients:
|
|
||||||
# loki upstream: {{ env "NOMAD_UPSTREAM_ADDR_loki" }}
|
|
||||||
- url: http://{{ env "NOMAD_UPSTREAM_ADDR_loki" }}/loki/api/v1/push
|
|
||||||
|
|
||||||
scrape_configs:
|
|
||||||
|
|
||||||
- job_name: journal
|
|
||||||
journal:
|
|
||||||
json: false
|
|
||||||
max_age: 12h
|
|
||||||
path: /var/log/journal
|
|
||||||
labels:
|
|
||||||
job: systemd-journal
|
|
||||||
relabel_configs:
|
|
||||||
- source_labels: ['__journal__systemd_unit']
|
|
||||||
target_label: unit
|
|
||||||
- source_labels: ['__journal__hostname']
|
|
||||||
target_label: hostname
|
|
||||||
- source_labels: ['__journal__transport']
|
|
||||||
target_label: journal_transport
|
|
||||||
# Docker log labels
|
|
||||||
- source_labels: ['__journal_syslog_identifier']
|
|
||||||
target_label: syslog_identifier
|
|
||||||
- source_labels: ['__journal_image_name']
|
|
||||||
target_label: docker_image_name
|
|
||||||
- source_labels: ['__journal_container_name']
|
|
||||||
target_label: docker_container_name
|
|
||||||
- source_labels: ['__journal_container_id']
|
|
||||||
target_label: docker_container_id
|
|
||||||
- source_labels: ['__journal_com_docker_compose_project']
|
|
||||||
target_label: docker_compose_project
|
|
||||||
- source_labels: ['__journal_com_docker_compose_service']
|
|
||||||
target_label: docker_compose_service
|
|
||||||
EOF
|
|
||||||
destination = "local/promtail.yml"
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 50
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,209 +0,0 @@
|
|||||||
job "grafana" {
|
|
||||||
datacenters = ["dc1"]
|
|
||||||
|
|
||||||
group "grafana" {
|
|
||||||
count = 1
|
|
||||||
|
|
||||||
# TODO: Add backup task or job
|
|
||||||
|
|
||||||
network {
|
|
||||||
mode = "bridge"
|
|
||||||
|
|
||||||
port "web" {
|
|
||||||
host_network = "loopback"
|
|
||||||
to = 3000
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ephemeral_disk {
|
|
||||||
migrate = true
|
|
||||||
sticky = true
|
|
||||||
}
|
|
||||||
|
|
||||||
service {
|
|
||||||
name = "grafana"
|
|
||||||
port = "web"
|
|
||||||
|
|
||||||
connect {
|
|
||||||
sidecar_service {
|
|
||||||
proxy {
|
|
||||||
local_service_port = 3000
|
|
||||||
|
|
||||||
upstreams {
|
|
||||||
destination_name = "prometheus"
|
|
||||||
local_bind_port = 9090
|
|
||||||
}
|
|
||||||
|
|
||||||
upstreams {
|
|
||||||
destination_name = "loki"
|
|
||||||
local_bind_port = 3100
|
|
||||||
}
|
|
||||||
|
|
||||||
upstreams {
|
|
||||||
destination_name = "mysql-server"
|
|
||||||
local_bind_port = 6060
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sidecar_task {
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 50
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
check {
|
|
||||||
type = "http"
|
|
||||||
path = "/"
|
|
||||||
port = "web"
|
|
||||||
interval = "10s"
|
|
||||||
timeout = "10s"
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
task "grafana-bootstrap" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
lifecycle {
|
|
||||||
hook = "prestart"
|
|
||||||
sidecar = false
|
|
||||||
}
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "mysql:8"
|
|
||||||
args = [
|
|
||||||
"/bin/bash",
|
|
||||||
"-c",
|
|
||||||
"/usr/bin/mysql --defaults-extra-file=/task/my.cnf < /task/bootstrap.sql",
|
|
||||||
]
|
|
||||||
|
|
||||||
mount {
|
|
||||||
type = "bind"
|
|
||||||
source = "local/"
|
|
||||||
target = "/task/"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
vault {
|
|
||||||
policies = [
|
|
||||||
"access-tables",
|
|
||||||
"nomad-task",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOF
|
|
||||||
[client]
|
|
||||||
host={{ env "NOMAD_UPSTREAM_IP_mysql_server" }}
|
|
||||||
port={{ env "NOMAD_UPSTREAM_PORT_mysql_server" }}
|
|
||||||
user=root
|
|
||||||
{{ with secret "kv/data/mysql" }}
|
|
||||||
password={{ .Data.data.root_password }}
|
|
||||||
{{ end }}
|
|
||||||
EOF
|
|
||||||
destination = "local/my.cnf"
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOF
|
|
||||||
{{ with secret "kv/data/grafana" -}}
|
|
||||||
{{ if .Data.data.db_name -}}
|
|
||||||
CREATE DATABASE IF NOT EXISTS `{{ .Data.data.db_name }}`;
|
|
||||||
CREATE USER IF NOT EXISTS '{{ .Data.data.db_user }}'@'%' IDENTIFIED BY '{{ .Data.data.db_pass }}';
|
|
||||||
GRANT ALL ON `{{ .Data.data.db_name }}`.* to '{{ .Data.data.db_user }}'@'%';
|
|
||||||
{{ else -}}
|
|
||||||
SELECT 'NOOP';
|
|
||||||
{{ end -}}
|
|
||||||
{{ end -}}
|
|
||||||
EOF
|
|
||||||
destination = "local/bootstrap.sql"
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 50
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task "grafana" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "grafana/grafana:7.3.6"
|
|
||||||
ports = ["web"]
|
|
||||||
|
|
||||||
mount {
|
|
||||||
type = "bind"
|
|
||||||
target = "/etc/grafana"
|
|
||||||
source = "local/config"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
env = {
|
|
||||||
"GF_INSTALL_PLUGINS" = "grafana-clock-panel,grafana-piechart-panel,grafana-polystat-panel",
|
|
||||||
}
|
|
||||||
|
|
||||||
vault {
|
|
||||||
policies = [
|
|
||||||
"access-tables",
|
|
||||||
"nomad-task",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOF
|
|
||||||
{{ with secret "kv/data/grafana" -}}
|
|
||||||
GF_SECURITY_ADMIN_PASSWORD={{ .Data.data.admin_pw }}
|
|
||||||
GF_SMTP_USER={{ .Data.data.smtp_user }}
|
|
||||||
GF_SMTP_PASSWORD={{ .Data.data.smtp_password }}
|
|
||||||
GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .Data.data.minio_access_key }}
|
|
||||||
GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .Data.data.minio_secret_key }}
|
|
||||||
GRAFANA_ALERT_EMAIL_ADDRESSES={{ .Data.data.alert_email_addresses }}
|
|
||||||
{{ if .Data.data.db_name -}}
|
|
||||||
# Database storage
|
|
||||||
GF_DATABASE_TYPE=mysql
|
|
||||||
GF_DATABASE_HOST={{ env "NOMAD_UPSTREAM_ADDR_mysql_server" }}
|
|
||||||
GF_DATABASE_NAME={{ .Data.data.db_name }}
|
|
||||||
GF_DATABASE_USER={{ .Data.data.db_user }}
|
|
||||||
GF_DATABASE_PASSWORD={{ .Data.data.db_pass }}
|
|
||||||
{{ end -}}
|
|
||||||
{{ end -}}
|
|
||||||
{{ with secret "kv/data/slack" -}}
|
|
||||||
SLACK_BOT_URL={{ .Data.data.bot_url }}
|
|
||||||
SLACK_BOT_TOKEN={{ .Data.data.bot_token }}
|
|
||||||
SLACK_HOOK_URL={{ .Data.data.hook_url }}
|
|
||||||
{{ end -}}
|
|
||||||
EOF
|
|
||||||
env = true
|
|
||||||
destination = "secrets/conf.env"
|
|
||||||
}
|
|
||||||
|
|
||||||
%{ for config_file in fileset(join("/", [module_path, "grafana"]), "**") ~}
|
|
||||||
template {
|
|
||||||
data = <<EOF
|
|
||||||
${file(join("/", [module_path, "grafana", config_file]))}
|
|
||||||
EOF
|
|
||||||
change_mode = "signal"
|
|
||||||
change_signal = "SIGHUP"
|
|
||||||
destination = "local/config/${config_file}"
|
|
||||||
# Change template delimeter for dashboard files that use json and have double curly braces and square braces
|
|
||||||
%{ if length(regexall("dashboard", config_file)) > 0 ~}
|
|
||||||
left_delimiter = "<<<<"
|
|
||||||
right_delimiter = ">>>>"
|
|
||||||
%{ endif ~}
|
|
||||||
}
|
|
||||||
%{ endfor ~}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
cpu = 100
|
|
||||||
memory = 200
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@ -1,21 +0,0 @@
|
|||||||
# This file is maintained automatically by "terraform init".
|
|
||||||
# Manual edits may be lost in future updates.
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/nomad" {
|
|
||||||
version = "1.4.17"
|
|
||||||
hashes = [
|
|
||||||
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
|
|
||||||
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
|
|
||||||
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
|
|
||||||
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
|
|
||||||
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
|
|
||||||
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
|
|
||||||
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
|
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
|
||||||
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
|
|
||||||
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
|
|
||||||
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
|
|
||||||
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
|
|
||||||
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,302 +0,0 @@
|
|||||||
variable "base_hostname" {
|
|
||||||
type = string
|
|
||||||
description = "Base hostname to serve content from"
|
|
||||||
default = "dev.homelab"
|
|
||||||
}
|
|
||||||
|
|
||||||
job "traefik" {
|
|
||||||
datacenters = ["dc1"]
|
|
||||||
type = "service"
|
|
||||||
priority = 100
|
|
||||||
|
|
||||||
constraint {
|
|
||||||
attribute = "${node.class}"
|
|
||||||
value = "ingress"
|
|
||||||
}
|
|
||||||
|
|
||||||
constraint {
|
|
||||||
distinct_hosts = true
|
|
||||||
}
|
|
||||||
|
|
||||||
update {
|
|
||||||
max_parallel = 1
|
|
||||||
# canary = 1
|
|
||||||
# auto_promote = true
|
|
||||||
auto_revert = true
|
|
||||||
}
|
|
||||||
|
|
||||||
group "traefik" {
|
|
||||||
count = 1
|
|
||||||
|
|
||||||
network {
|
|
||||||
port "web" {
|
|
||||||
static = 80
|
|
||||||
}
|
|
||||||
|
|
||||||
port "websecure" {
|
|
||||||
static = 443
|
|
||||||
}
|
|
||||||
|
|
||||||
port "syslog" {
|
|
||||||
static = 514
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ephemeral_disk {
|
|
||||||
migrate = true
|
|
||||||
sticky = true
|
|
||||||
}
|
|
||||||
|
|
||||||
service {
|
|
||||||
name = "traefik"
|
|
||||||
port = "web"
|
|
||||||
|
|
||||||
check {
|
|
||||||
type = "http"
|
|
||||||
path = "/ping"
|
|
||||||
port = "web"
|
|
||||||
interval = "10s"
|
|
||||||
timeout = "2s"
|
|
||||||
}
|
|
||||||
|
|
||||||
connect {
|
|
||||||
native = true
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.traefik.entryPoints=websecure",
|
|
||||||
"traefik.http.routers.traefik.service=api@internal",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
task "traefik" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "traefik:2.6"
|
|
||||||
|
|
||||||
ports = ["web", "websecure"]
|
|
||||||
network_mode = "host"
|
|
||||||
|
|
||||||
mount {
|
|
||||||
type = "bind"
|
|
||||||
target = "/etc/traefik"
|
|
||||||
source = "local/config"
|
|
||||||
}
|
|
||||||
|
|
||||||
mount {
|
|
||||||
type = "bind"
|
|
||||||
target = "/etc/traefik/usersfile"
|
|
||||||
source = "secrets/usersfile"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
vault {
|
|
||||||
policies = ["access-tables", "nomad-task"]
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
# Avoid conflict with TOML lists [[ ]] and Go templates {{ }}
|
|
||||||
left_delimiter = "<<"
|
|
||||||
right_delimiter = ">>"
|
|
||||||
data = <<EOH
|
|
||||||
[log]
|
|
||||||
level = "DEBUG"
|
|
||||||
|
|
||||||
[entryPoints]
|
|
||||||
[entryPoints.web]
|
|
||||||
address = ":80"
|
|
||||||
[entryPoints.web.http]
|
|
||||||
[entryPoints.web.http.redirections]
|
|
||||||
[entryPoints.web.http.redirections.entrypoint]
|
|
||||||
to = "websecure"
|
|
||||||
scheme = "https"
|
|
||||||
|
|
||||||
[entryPoints.websecure]
|
|
||||||
address = ":443"
|
|
||||||
[entryPoints.websecure.http.tls]
|
|
||||||
<< if keyExists "traefik/acme/email" ->>
|
|
||||||
certResolver = "letsEncrypt"
|
|
||||||
[[entryPoints.websecure.http.tls.domains]]
|
|
||||||
main = "*.<< keyOrDefault "global/base_hostname" "${var.base_hostname}" >>"
|
|
||||||
<< end ->>
|
|
||||||
|
|
||||||
[entryPoints.metrics]
|
|
||||||
address = ":8989"
|
|
||||||
|
|
||||||
[entryPoints.syslogtcp]
|
|
||||||
address = ":514"
|
|
||||||
|
|
||||||
[entryPoints.syslogudp]
|
|
||||||
address = ":514/udp"
|
|
||||||
|
|
||||||
[api]
|
|
||||||
dashboard = true
|
|
||||||
|
|
||||||
[ping]
|
|
||||||
entrypoint = "web"
|
|
||||||
|
|
||||||
[metrics]
|
|
||||||
[metrics.prometheus]
|
|
||||||
entrypoint = "metrics"
|
|
||||||
# manualRouting = true
|
|
||||||
|
|
||||||
[providers.file]
|
|
||||||
directory = "/etc/traefik/conf"
|
|
||||||
watch = true
|
|
||||||
|
|
||||||
[providers.consulCatalog]
|
|
||||||
connectAware = true
|
|
||||||
connectByDefault = true
|
|
||||||
exposedByDefault = false
|
|
||||||
defaultRule = "Host(`{{normalize .Name}}.<< keyOrDefault "global/base_hostname" "${var.base_hostname}" >>`)"
|
|
||||||
[providers.consulCatalog.endpoint]
|
|
||||||
address = "http://<< env "CONSUL_HTTP_ADDR" >>"
|
|
||||||
|
|
||||||
<< if keyExists "traefik/acme/email" ->>
|
|
||||||
[certificatesResolvers.letsEncrypt.acme]
|
|
||||||
email = "<< key "traefik/acme/email" >>"
|
|
||||||
# Store in /local because /secrets doesn't persist with ephemeral disk
|
|
||||||
storage = "/local/acme.json"
|
|
||||||
[certificatesResolvers.letsEncrypt.acme.dnsChallenge]
|
|
||||||
provider = "cloudflare"
|
|
||||||
resolvers = ["1.1.1.1:53", "8.8.8.8:53"]
|
|
||||||
delayBeforeCheck = 0
|
|
||||||
<< end ->>
|
|
||||||
EOH
|
|
||||||
destination = "local/config/traefik.toml"
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
{{ with secret "kv/data/cloudflare" }}
|
|
||||||
CF_DNS_API_TOKEN={{ .Data.data.api_token_dns_edit }}
|
|
||||||
CF_ZONE_API_TOKEN={{ .Data.data.api_token_zone_read }}
|
|
||||||
{{ end }}
|
|
||||||
EOH
|
|
||||||
destination = "secrets/cloudflare.env"
|
|
||||||
env = true
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
[http]
|
|
||||||
[http.routers]
|
|
||||||
[http.routers.nomad]
|
|
||||||
entryPoints = ["websecure"]
|
|
||||||
# middlewares = []
|
|
||||||
service = "nomad"
|
|
||||||
rule = "Host(`nomad.{{ keyOrDefault "global/base_hostname" "${var.base_hostname}" }}`)"
|
|
||||||
[http.routers.consul]
|
|
||||||
entryPoints = ["websecure"]
|
|
||||||
# middlewares = []
|
|
||||||
service = "consul"
|
|
||||||
rule = "Host(`consul.{{ keyOrDefault "global/base_hostname" "${var.base_hostname}" }}`)"
|
|
||||||
[http.routers.vault]
|
|
||||||
entryPoints = ["websecure"]
|
|
||||||
# middlewares = []
|
|
||||||
service = "vault"
|
|
||||||
rule = "Host(`vault.{{ keyOrDefault "global/base_hostname" "${var.base_hostname}" }}`)"
|
|
||||||
|
|
||||||
[http.services]
|
|
||||||
{{ with service "nomad-client" -}}
|
|
||||||
[http.services.nomad]
|
|
||||||
[http.services.nomad.loadBalancer]
|
|
||||||
{{ range . -}}
|
|
||||||
[[http.services.nomad.loadBalancer.servers]]
|
|
||||||
url = "http://{{ .Address }}:{{ .Port }}"
|
|
||||||
{{ end }}
|
|
||||||
{{- end }}
|
|
||||||
{{ with service "consul" -}}
|
|
||||||
[http.services.consul]
|
|
||||||
[http.services.consul.loadBalancer]
|
|
||||||
{{ range . -}}
|
|
||||||
[[http.services.consul.loadBalancer.servers]]
|
|
||||||
# Not using .Port because that's an RPC port
|
|
||||||
url = "http://{{ .Address }}:8500"
|
|
||||||
{{ end }}
|
|
||||||
{{- end }}
|
|
||||||
{{ with service "vault" -}}
|
|
||||||
[http.services.vault]
|
|
||||||
[http.services.vault.loadBalancer]
|
|
||||||
[http.services.vault.loadBalancer.sticky.cookie]
|
|
||||||
{{ range . -}}
|
|
||||||
[[http.services.vault.loadBalancer.servers]]
|
|
||||||
url = "http://{{ .Address }}:{{ .Port }}"
|
|
||||||
{{ end }}
|
|
||||||
{{- end }}
|
|
||||||
EOH
|
|
||||||
destination = "local/config/conf/route-hashi.toml"
|
|
||||||
change_mode = "noop"
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
{{ with service "syslogng" -}}
|
|
||||||
[tcp.routers]
|
|
||||||
[tcp.routers.syslogtcp]
|
|
||||||
entryPoints = ["syslogtcp"]
|
|
||||||
service = "syslogngtcp"
|
|
||||||
rule = "HostSNI(`*`)"
|
|
||||||
|
|
||||||
[tcp.services]
|
|
||||||
[tcp.services.syslogngtcp]
|
|
||||||
[tcp.services.syslogngtcp.loadBalancer]
|
|
||||||
{{ range . -}}
|
|
||||||
[[tcp.services.syslogngtcp.loadBalancer.servers]]
|
|
||||||
address = "{{ .Address }}:{{ .Port }}"
|
|
||||||
{{ end -}}
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{ with service "syslogng" -}}
|
|
||||||
[udp.routers]
|
|
||||||
[udp.routers.syslogudp]
|
|
||||||
entryPoints = ["syslogudp"]
|
|
||||||
service = "syslogngudp"
|
|
||||||
|
|
||||||
[udp.services]
|
|
||||||
[udp.services.syslogngudp]
|
|
||||||
[udp.services.syslogngudp.loadBalancer]
|
|
||||||
{{ range . -}}
|
|
||||||
[[udp.services.syslogngudp.loadBalancer.servers]]
|
|
||||||
address = "{{ .Address }}:{{ .Port }}"
|
|
||||||
{{ end -}}
|
|
||||||
{{ end }}
|
|
||||||
EOH
|
|
||||||
destination = "local/config/conf/route-syslog-ng.toml"
|
|
||||||
change_mode = "noop"
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
[http.middlewares]
|
|
||||||
{{ with secret "kv/data/traefik" }}
|
|
||||||
{{ if .Data.data.usersfile }}
|
|
||||||
[http.middlewares.basic-auth.basicAuth]
|
|
||||||
usersFile = "/etc/traefik/usersfile"
|
|
||||||
{{ end }}
|
|
||||||
{{ end }}
|
|
||||||
EOH
|
|
||||||
destination = "local/config/conf/middlewares.toml"
|
|
||||||
change_mode = "noop"
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
{{ with secret "kv/data/traefik" }}
|
|
||||||
{{ .Data.data.usersfile }}
|
|
||||||
{{ end }}
|
|
||||||
EOH
|
|
||||||
destination = "secrets/usersfile"
|
|
||||||
change_mode = "noop"
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
cpu = 100
|
|
||||||
memory = 100
|
|
||||||
memory_max = 500
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,16 +0,0 @@
|
|||||||
variable "base_hostname" {
|
|
||||||
type = string
|
|
||||||
description = "Base hostname to serve content from"
|
|
||||||
default = "dev.homelab"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nomad_job" "traefik" {
|
|
||||||
hcl2 {
|
|
||||||
enabled = true
|
|
||||||
vars = {
|
|
||||||
"base_hostname" = "${var.base_hostname}",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
jobspec = file("${path.module}/traefik.nomad")
|
|
||||||
}
|
|
@ -1,5 +0,0 @@
|
|||||||
variable "base_hostname" {
|
|
||||||
type = string
|
|
||||||
description = "Base hostname to serve content from"
|
|
||||||
default = "dev.homelab"
|
|
||||||
}
|
|
@ -1,40 +0,0 @@
|
|||||||
# This file is maintained automatically by "terraform init".
|
|
||||||
# Manual edits may be lost in future updates.
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/consul" {
|
|
||||||
version = "2.15.1"
|
|
||||||
hashes = [
|
|
||||||
"h1:PexyQBRLDA+SR+sWlzYBZswry5O5h/tTfj87CaECtLc=",
|
|
||||||
"zh:1806830a3cf103e65e772a7d28fd4df2788c29a029fb2def1326bc777ad107ed",
|
|
||||||
"zh:252be544fb4c9daf09cad7d3776daf5fa66b62740d3ea9d6d499a7b1697c3433",
|
|
||||||
"zh:50985fe02a8e5ae47c75d7c28c911b25d7dc4716cff2ed55ca05889ab77a1f73",
|
|
||||||
"zh:54cf0ec90538703c66937c77e8d72a38d5af47437eb0b8b55eb5836c5d288878",
|
|
||||||
"zh:704f536c621337e06fffef6d5f49ac81f52d249f937250527c12884cb83aefed",
|
|
||||||
"zh:896d8ef6d0b555299f124eb25bce8a17d735da14ef21f07582098d301f47da30",
|
|
||||||
"zh:976277a85b0a0baafe267cc494f766448d1da5b6936ddcb3ce393bd4d22f08d2",
|
|
||||||
"zh:c7faa9a2b11bc45833a3e8e340f22f1ecf01597eaeffa7669234b4549d7dfa85",
|
|
||||||
"zh:caf851ef9c8ce482864badf7058f9278d4537112fa236efd8f1a9315801d9061",
|
|
||||||
"zh:db203435d58b0ac842540861b3307a623423275d85754c171773f3b210ae5b24",
|
|
||||||
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
|
|
||||||
"zh:f710a37190429045d109edd35de69db3b5f619919c2fa04c77a3a639fea9fd7d",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/nomad" {
|
|
||||||
version = "1.4.17"
|
|
||||||
hashes = [
|
|
||||||
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
|
|
||||||
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
|
|
||||||
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
|
|
||||||
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
|
|
||||||
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
|
|
||||||
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
|
|
||||||
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
|
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
|
||||||
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
|
|
||||||
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
|
|
||||||
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
|
|
||||||
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
|
|
||||||
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
|
|
||||||
]
|
|
||||||
}
|
|
@ -25,6 +25,7 @@ job "adminer" {
|
|||||||
|
|
||||||
upstreams {
|
upstreams {
|
||||||
destination_name = "mysql-server"
|
destination_name = "mysql-server"
|
||||||
|
# TODO: how do I get these to not bind to the host eth0 address
|
||||||
local_bind_port = 4040
|
local_bind_port = 4040
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
job "mysql-server" {
|
job "mysql-server" {
|
||||||
datacenters = ["dc1"]
|
datacenters = ["dc1"]
|
||||||
type = "service"
|
type = "service"
|
||||||
priority = 80
|
|
||||||
|
|
||||||
group "mysql-server" {
|
group "mysql-server" {
|
||||||
count = 1
|
count = 1
|
||||||
@ -57,18 +56,6 @@ job "mysql-server" {
|
|||||||
task "mysql-server" {
|
task "mysql-server" {
|
||||||
driver = "docker"
|
driver = "docker"
|
||||||
|
|
||||||
config {
|
|
||||||
image = "mysql:8"
|
|
||||||
ports = ["db"]
|
|
||||||
}
|
|
||||||
|
|
||||||
vault {
|
|
||||||
policies = [
|
|
||||||
"access-tables",
|
|
||||||
"nomad-task",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
volume_mount {
|
volume_mount {
|
||||||
volume = "mysql-data"
|
volume = "mysql-data"
|
||||||
destination = "/var/lib/mysql"
|
destination = "/var/lib/mysql"
|
||||||
@ -76,18 +63,14 @@ job "mysql-server" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
env = {
|
env = {
|
||||||
|
"MYSQL_ROOT_PASSWORD" = "supersecretpassword"
|
||||||
# Allow connections from any host
|
# Allow connections from any host
|
||||||
"MYSQL_ROOT_HOST" = "%"
|
"MYSQL_ROOT_HOST" = "%"
|
||||||
}
|
}
|
||||||
|
|
||||||
template {
|
config {
|
||||||
data = <<EOH
|
image = "mysql:8"
|
||||||
{{ with secret "kv/data/mysql" }}
|
ports = ["db"]
|
||||||
MYSQL_ROOT_PASSWORD={{ .Data.data.root_password }}
|
|
||||||
{{ end }}
|
|
||||||
EOH
|
|
||||||
destination = "secrets/db.env"
|
|
||||||
env = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resources {
|
resources {
|
||||||
|
@ -42,12 +42,6 @@ resource "consul_config_entry" "mysql_intents" {
|
|||||||
Precedence = 9
|
Precedence = 9
|
||||||
Type = "consul"
|
Type = "consul"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "grafana"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
]
|
]
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
job "redis" {
|
job "redis" {
|
||||||
datacenters = ["dc1"]
|
datacenters = ["dc1"]
|
||||||
type = "service"
|
type = "service"
|
||||||
priority = 60
|
|
||||||
|
|
||||||
group "cache" {
|
group "cache" {
|
||||||
count = 1
|
count = 1
|
||||||
@ -48,19 +47,18 @@ job "redis" {
|
|||||||
# }
|
# }
|
||||||
}
|
}
|
||||||
|
|
||||||
task "redis" {
|
task "main" {
|
||||||
driver = "docker"
|
driver = "docker"
|
||||||
|
|
||||||
config {
|
config {
|
||||||
image = "redis:6"
|
image = "redis:6"
|
||||||
args = ["redis-server", "--save", "60", "1", "--loglevel", "warning", "--dir", "${NOMAD_ALLOC_DIR}/data"]
|
args = ["redis-server", "--save", "60", "1", "--loglevel", "warning"]
|
||||||
ports = ["main"]
|
ports = ["main"]
|
||||||
}
|
}
|
||||||
|
|
||||||
resources {
|
resources {
|
||||||
cpu = 100
|
cpu = 100
|
||||||
memory = 512
|
memory = 1024
|
||||||
memory_max = 1024
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
provider "registry.terraform.io/hashicorp/nomad" {
|
provider "registry.terraform.io/hashicorp/nomad" {
|
||||||
version = "1.4.16"
|
version = "1.4.16"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
|
|
||||||
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||||
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||||
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
@ -5,7 +5,6 @@ provider "registry.terraform.io/hashicorp/consul" {
|
|||||||
version = "2.15.0"
|
version = "2.15.0"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=",
|
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=",
|
||||||
"h1:tAb2gwW+oZ8/t2j7lExdqpNrxmaWsHbyA2crFWClPb0=",
|
|
||||||
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c",
|
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c",
|
||||||
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931",
|
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931",
|
||||||
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624",
|
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624",
|
||||||
@ -23,7 +22,6 @@ provider "registry.terraform.io/hashicorp/consul" {
|
|||||||
provider "registry.terraform.io/hashicorp/nomad" {
|
provider "registry.terraform.io/hashicorp/nomad" {
|
||||||
version = "1.4.16"
|
version = "1.4.16"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
|
|
||||||
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||||
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||||
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
343
nomad/metrics/exporters.nomad
Normal file
343
nomad/metrics/exporters.nomad
Normal file
@ -0,0 +1,343 @@
|
|||||||
|
job "metrics" {
|
||||||
|
datacenters = ["dc1"]
|
||||||
|
type = "system"
|
||||||
|
|
||||||
|
group "exporters" {
|
||||||
|
|
||||||
|
network {
|
||||||
|
mode = "bridge"
|
||||||
|
|
||||||
|
port "cadvisor" {
|
||||||
|
host_network = "nomad-bridge"
|
||||||
|
to = 8080
|
||||||
|
}
|
||||||
|
|
||||||
|
port "node_exporter" {
|
||||||
|
host_network = "nomad-bridge"
|
||||||
|
to = 9100
|
||||||
|
}
|
||||||
|
|
||||||
|
port "promtail" {
|
||||||
|
host_network = "nomad-bridge"
|
||||||
|
to = 9080
|
||||||
|
}
|
||||||
|
|
||||||
|
port "expose" {
|
||||||
|
host_network = "nomad-bridge"
|
||||||
|
}
|
||||||
|
|
||||||
|
port "cadvisor_envoy_metrics" {
|
||||||
|
host_network = "nomad-bridge"
|
||||||
|
to = 9102
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
name = "cadvisor"
|
||||||
|
port = "cadvisor"
|
||||||
|
|
||||||
|
meta {
|
||||||
|
metrics_addr = "${NOMAD_ADDR_expose}"
|
||||||
|
envoy_metrics_addr = "${NOMAD_ADDR_cadvisor_envoy_metrics}"
|
||||||
|
nomad_dc = "${NOMAD_DC}"
|
||||||
|
nomad_node_name = "${node.unique.name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
connect {
|
||||||
|
sidecar_service {
|
||||||
|
proxy {
|
||||||
|
local_service_port = 8080
|
||||||
|
|
||||||
|
expose {
|
||||||
|
path {
|
||||||
|
path = "/metrics"
|
||||||
|
protocol = "http"
|
||||||
|
local_path_port = 8080
|
||||||
|
listener_port = "expose"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config {
|
||||||
|
envoy_prometheus_bind_addr = "0.0.0.0:9102"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sidecar_task {
|
||||||
|
resources {
|
||||||
|
cpu = 50
|
||||||
|
memory = 50
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
check {
|
||||||
|
type = "http"
|
||||||
|
path = "/metrics"
|
||||||
|
port = "cadvisor"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "10s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task "cadvisor" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
# image = "iamthefij/cadvisor:0.37.5"
|
||||||
|
image = "gcr.io/cadvisor/cadvisor:v0.39.3"
|
||||||
|
args = ["--docker_only=true"]
|
||||||
|
|
||||||
|
ports = ["cadvisor"]
|
||||||
|
|
||||||
|
# volumes = [
|
||||||
|
# "/:/rootfs:ro",
|
||||||
|
# "/var/run:/var/run:rw",
|
||||||
|
# "/sys:/sys:ro",
|
||||||
|
# "/var/lib/docker/:/var/lib/docker:ro",
|
||||||
|
# "/cgroup:/cgroup:ro",
|
||||||
|
# "/etc/machine-id:/etc/machine-id:ro",
|
||||||
|
# ]
|
||||||
|
|
||||||
|
mount {
|
||||||
|
type = "bind"
|
||||||
|
source = "/"
|
||||||
|
target = "/rootfs"
|
||||||
|
readonly = true
|
||||||
|
}
|
||||||
|
|
||||||
|
mount {
|
||||||
|
type = "bind"
|
||||||
|
source = "/var/run"
|
||||||
|
target = "/var/run"
|
||||||
|
readonly = false
|
||||||
|
}
|
||||||
|
|
||||||
|
mount {
|
||||||
|
type = "bind"
|
||||||
|
source = "/sys"
|
||||||
|
target = "/sys"
|
||||||
|
readonly = true
|
||||||
|
}
|
||||||
|
|
||||||
|
mount {
|
||||||
|
type = "bind"
|
||||||
|
source = "/var/lib/docker"
|
||||||
|
target = "/var/lib/docker"
|
||||||
|
readonly = true
|
||||||
|
}
|
||||||
|
|
||||||
|
# mount {
|
||||||
|
# type = "bind"
|
||||||
|
# source = "/cgroup"
|
||||||
|
# target = "/cgroup"
|
||||||
|
# readonly = true
|
||||||
|
# }
|
||||||
|
|
||||||
|
mount {
|
||||||
|
type = "bind"
|
||||||
|
source = "/etc/machine-id"
|
||||||
|
target = "/etc/machine-id"
|
||||||
|
readonly = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 50
|
||||||
|
memory = 100
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
name = "nodeexporter"
|
||||||
|
port = "node_exporter"
|
||||||
|
|
||||||
|
meta {
|
||||||
|
metrics_addr = "${NOMAD_ADDR_node_exporter}"
|
||||||
|
nomad_dc = "${NOMAD_DC}"
|
||||||
|
nomad_node_name = "${node.unique.name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
connect {
|
||||||
|
sidecar_service {
|
||||||
|
proxy {
|
||||||
|
local_service_port = 9100
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sidecar_task {
|
||||||
|
resources {
|
||||||
|
cpu = 50
|
||||||
|
memory = 50
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
check {
|
||||||
|
type = "http"
|
||||||
|
path = "/metrics"
|
||||||
|
port = "node_exporter"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "10s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task "node_exporter" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "prom/node-exporter:v1.0.1"
|
||||||
|
args = ["--path.rootfs", "/host"]
|
||||||
|
|
||||||
|
ports = ["node_exporter"]
|
||||||
|
|
||||||
|
mount {
|
||||||
|
type = "bind"
|
||||||
|
source = "/"
|
||||||
|
target = "/host"
|
||||||
|
readonly = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 50
|
||||||
|
memory = 50
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
name = "promtail"
|
||||||
|
port = "promtail"
|
||||||
|
|
||||||
|
meta {
|
||||||
|
metrics_addr = "${NOMAD_ADDR_promtail}"
|
||||||
|
nomad_dc = "${NOMAD_DC}"
|
||||||
|
nomad_node_name = "${node.unique.name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
connect {
|
||||||
|
sidecar_service {
|
||||||
|
proxy {
|
||||||
|
local_service_port = 9080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sidecar_task {
|
||||||
|
resources {
|
||||||
|
cpu = 50
|
||||||
|
memory = 50
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
check {
|
||||||
|
type = "http"
|
||||||
|
path = "/metrics"
|
||||||
|
port = "promtail"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "10s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task "promtail" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "grafana/promtail:2.2.1"
|
||||||
|
args = ["-config.file=/etc/promtail/promtail.yml"]
|
||||||
|
ports = ["promtail"]
|
||||||
|
|
||||||
|
# Mount config
|
||||||
|
mount {
|
||||||
|
type = "bind"
|
||||||
|
target = "/etc/promtail/promtail.yml"
|
||||||
|
source = "local/promtail.yml"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Bind mount host machine-id and log directories
|
||||||
|
mount {
|
||||||
|
type = "bind"
|
||||||
|
source = "/etc/machine-id"
|
||||||
|
target = "/etc/machine-id"
|
||||||
|
readonly = true
|
||||||
|
}
|
||||||
|
|
||||||
|
mount {
|
||||||
|
type = "bind"
|
||||||
|
source = "/etc/machine-id"
|
||||||
|
target = "/etc/machine-id"
|
||||||
|
readonly = true
|
||||||
|
}
|
||||||
|
|
||||||
|
mount {
|
||||||
|
type = "bind"
|
||||||
|
source = "/var/log/journal/"
|
||||||
|
target = "/var/log/journal/"
|
||||||
|
readonly = true
|
||||||
|
}
|
||||||
|
|
||||||
|
mount {
|
||||||
|
type = "bind"
|
||||||
|
source = "/run/log/journal/"
|
||||||
|
target = "/run/log/journal/"
|
||||||
|
readonly = true
|
||||||
|
}
|
||||||
|
|
||||||
|
mount {
|
||||||
|
type = "bind"
|
||||||
|
source = "/var/log/audit"
|
||||||
|
target = "/var/log/audit"
|
||||||
|
readonly = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template {
|
||||||
|
data = <<EOF
|
||||||
|
---
|
||||||
|
server:
|
||||||
|
http_listen_address: 0.0.0.0
|
||||||
|
http_listen_port: 9080
|
||||||
|
|
||||||
|
clients:
|
||||||
|
- url: http://${NOMAD_UPSTREAM_ADDR_loki}/loki/api/v1/push
|
||||||
|
|
||||||
|
scrape_configs:
|
||||||
|
|
||||||
|
- job_name: journal
|
||||||
|
journal:
|
||||||
|
json: false
|
||||||
|
max_age: 12h
|
||||||
|
path: /var/log/journal
|
||||||
|
labels:
|
||||||
|
job: systemd-journal
|
||||||
|
relabel_configs:
|
||||||
|
- source_labels: ['__journal__systemd_unit']
|
||||||
|
target_label: unit
|
||||||
|
- source_labels: ['__journal__hostname']
|
||||||
|
target_label: hostname
|
||||||
|
- source_labels: ['__journal__transport']
|
||||||
|
target_label: journal_transport
|
||||||
|
# Docker log labels
|
||||||
|
- source_labels: ['__journal_syslog_identifier']
|
||||||
|
target_label: syslog_identifier
|
||||||
|
- source_labels: ['__journal_image_name']
|
||||||
|
target_label: docker_image_name
|
||||||
|
- source_labels: ['__journal_container_name']
|
||||||
|
target_label: docker_container_name
|
||||||
|
- source_labels: ['__journal_container_id']
|
||||||
|
target_label: docker_container_id
|
||||||
|
- source_labels: ['__journal_com_docker_compose_project']
|
||||||
|
target_label: docker_compose_project
|
||||||
|
- source_labels: ['__journal_com_docker_compose_service']
|
||||||
|
target_label: docker_compose_service
|
||||||
|
EOF
|
||||||
|
destination = "local/promtail.yml"
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 50
|
||||||
|
memory = 50
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
101
nomad/metrics/grafana.nomad
Normal file
101
nomad/metrics/grafana.nomad
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
job "grafana" {
|
||||||
|
datacenters = ["dc1"]
|
||||||
|
|
||||||
|
group "grafana" {
|
||||||
|
count = 1
|
||||||
|
|
||||||
|
# TODO: Add backup task or job
|
||||||
|
|
||||||
|
network {
|
||||||
|
mode = "bridge"
|
||||||
|
|
||||||
|
port "web" {
|
||||||
|
host_network = "loopback"
|
||||||
|
to = 3000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ephemeral_disk {
|
||||||
|
migrate = true
|
||||||
|
sticky = true
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
name = "grafana"
|
||||||
|
port = "web"
|
||||||
|
|
||||||
|
connect {
|
||||||
|
sidecar_service {
|
||||||
|
proxy {
|
||||||
|
local_service_port = 3000
|
||||||
|
|
||||||
|
upstreams {
|
||||||
|
destination_name = "prometheus"
|
||||||
|
local_bind_port = 9090
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sidecar_task {
|
||||||
|
resources {
|
||||||
|
cpu = 50
|
||||||
|
memory = 50
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
check {
|
||||||
|
type = "http"
|
||||||
|
path = "/"
|
||||||
|
port = "web"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "10s"
|
||||||
|
}
|
||||||
|
|
||||||
|
tags = [
|
||||||
|
"traefik.enable=true",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
task "grafana" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "grafana/grafana:7.3.6"
|
||||||
|
ports = ["web"]
|
||||||
|
|
||||||
|
mount {
|
||||||
|
type = "bind"
|
||||||
|
target = "/etc/grafana"
|
||||||
|
source = "local/config"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
env = {
|
||||||
|
"GF_SECURITY_ADMIN_PASSWORD" = "password",
|
||||||
|
"GF_INSTALL_PLUGINS" = "grafana-clock-panel,grafana-piechart-panel,grafana-polystat-panel",
|
||||||
|
}
|
||||||
|
|
||||||
|
%{ for config_file in fileset(join("/", [module_path, "grafana"]), "**") ~}
|
||||||
|
template {
|
||||||
|
data = <<EOF
|
||||||
|
${file(join("/", [module_path, "grafana", config_file]))}
|
||||||
|
EOF
|
||||||
|
change_mode = "signal"
|
||||||
|
change_signal = "SIGHUP"
|
||||||
|
destination = "local/config/${config_file}"
|
||||||
|
# Change template delimeter for dashboard files that use json and have double curly braces and square braces
|
||||||
|
%{ if length(regexall(".*/dashboard/.*", config_file)) > 0 ~}
|
||||||
|
left_delimiter = "<<<<"
|
||||||
|
right_delimiter = ">>>>"
|
||||||
|
%{ endif ~}
|
||||||
|
}
|
||||||
|
%{ endfor ~}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 100
|
||||||
|
memory = 200
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -281,18 +281,11 @@ log_queries =
|
|||||||
|
|
||||||
#################################### Auth Proxy ##########################
|
#################################### Auth Proxy ##########################
|
||||||
[auth.proxy]
|
[auth.proxy]
|
||||||
{{ with service "traefik" -}}
|
|
||||||
enabled = true
|
enabled = true
|
||||||
header_name = X-WEBAUTH-USER
|
header_name = X-WEBAUTH-USER
|
||||||
header_property = username
|
header_property = username
|
||||||
auto_sign_up = true
|
auto_sign_up = true
|
||||||
{{- $last := len . | subtract 1 -}}
|
whitelist = 192.168.2.20
|
||||||
{{- $services := . -}}
|
|
||||||
whitelist = {{ range $i := loop $last -}}
|
|
||||||
{{- with index $services $i }}{{ .Address }},{{ end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- with index . $last }}{{ .Address }}{{ end -}}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
#################################### Basic Auth ##########################
|
#################################### Basic Auth ##########################
|
||||||
[auth.basic]
|
[auth.basic]
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -3,7 +3,7 @@ apiVersion: 1
|
|||||||
|
|
||||||
datasources:
|
datasources:
|
||||||
- name: Loki
|
- name: Loki
|
||||||
url: http://{{ env "NOMAD_UPSTREAM_ADDR_loki" }}
|
url: http://loki:3100
|
||||||
type: loki
|
type: loki
|
||||||
access: proxy
|
access: proxy
|
||||||
isDefault: false
|
isDefault: false
|
@ -3,7 +3,7 @@ apiVersion: 1
|
|||||||
|
|
||||||
datasources:
|
datasources:
|
||||||
- name: Prometheus
|
- name: Prometheus
|
||||||
url: http://{{ env "NOMAD_UPSTREAM_ADDR_prometheus" }}
|
url: http://prom:9090
|
||||||
type: prometheus
|
type: prometheus
|
||||||
access: proxy
|
access: proxy
|
||||||
isDefault: true
|
isDefault: true
|
@ -1,6 +1,15 @@
|
|||||||
---
|
---
|
||||||
{{ with secret "kv/data/grafana" -}}
|
|
||||||
notifiers:
|
notifiers:
|
||||||
|
# - name: Telegram
|
||||||
|
# type: telegram
|
||||||
|
# uid: telegram-1
|
||||||
|
# org_id: 1
|
||||||
|
# is_default: false
|
||||||
|
# settings:
|
||||||
|
# chatid: ${TELEGRAM_CHATID}
|
||||||
|
# bottoken: ${TELEGRAM_BOTTOKEN}
|
||||||
|
# uploadImage: true
|
||||||
|
{{ with secret "kv/data/grafana" -}}
|
||||||
- name: Personal email
|
- name: Personal email
|
||||||
type: email
|
type: email
|
||||||
uid: email-1
|
uid: email-1
|
@ -1,10 +1,15 @@
|
|||||||
resource "nomad_job" "exporters" {
|
variable "consul_address" {
|
||||||
hcl2 {
|
type = string
|
||||||
enabled = true
|
description = "address of consul server for dynamic scraping"
|
||||||
}
|
}
|
||||||
|
|
||||||
jobspec = file("${path.module}/exporters.nomad")
|
# resource "nomad_job" "exporters" {
|
||||||
}
|
# hcl2 {
|
||||||
|
# enabled = true
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# jobspec = file("${path.module}/exporters.nomad")
|
||||||
|
# }
|
||||||
|
|
||||||
data "consul_nodes" "all-nodes" {
|
data "consul_nodes" "all-nodes" {
|
||||||
query_options {
|
query_options {
|
||||||
@ -15,6 +20,12 @@ data "consul_nodes" "all-nodes" {
|
|||||||
resource "nomad_job" "prometheus" {
|
resource "nomad_job" "prometheus" {
|
||||||
hcl2 {
|
hcl2 {
|
||||||
enabled = true
|
enabled = true
|
||||||
|
vars = {
|
||||||
|
# TODO: May not need this because we have an env variable for that
|
||||||
|
# "consul_address" = "${var.consul_address}",
|
||||||
|
# TODO: Should this be a list?
|
||||||
|
"consul_address" = "http://${data.consul_nodes.all-nodes.nodes[0].address}:8500",
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
jobspec = file("${path.module}/prometheus.nomad")
|
jobspec = file("${path.module}/prometheus.nomad")
|
@ -1,3 +1,9 @@
|
|||||||
|
variable "consul_address" {
|
||||||
|
type = string
|
||||||
|
description = "Full address of Consul instance to get catalog from"
|
||||||
|
default = "http://127.0.0.1:5400"
|
||||||
|
}
|
||||||
|
|
||||||
job "prometheus" {
|
job "prometheus" {
|
||||||
datacenters = ["dc1"]
|
datacenters = ["dc1"]
|
||||||
|
|
||||||
@ -59,7 +65,7 @@ job "prometheus" {
|
|||||||
ports = ["web"]
|
ports = ["web"]
|
||||||
args = [
|
args = [
|
||||||
"--config.file=/etc/prometheus/config/prometheus.yml",
|
"--config.file=/etc/prometheus/config/prometheus.yml",
|
||||||
"--storage.tsdb.path=${NOMAD_ALLOC_DIR}/data/tsdb",
|
"--storage.tsdb.path=/prometheus",
|
||||||
"--web.listen-address=0.0.0.0:9090",
|
"--web.listen-address=0.0.0.0:9090",
|
||||||
"--web.console.libraries=/usr/share/prometheus/console_libraries",
|
"--web.console.libraries=/usr/share/prometheus/console_libraries",
|
||||||
"--web.console.templates=/usr/share/prometheus/consoles",
|
"--web.console.templates=/usr/share/prometheus/consoles",
|
||||||
@ -85,13 +91,27 @@ scrape_configs:
|
|||||||
- targets:
|
- targets:
|
||||||
- 0.0.0.0:9090
|
- 0.0.0.0:9090
|
||||||
|
|
||||||
|
- job_name: "nomad_server"
|
||||||
|
metrics_path: "/v1/metrics"
|
||||||
|
params:
|
||||||
|
format:
|
||||||
|
- "prometheus"
|
||||||
|
consul_sd_configs:
|
||||||
|
# - server: "${var.consul_address}"
|
||||||
|
- server: "${CONSUL_HTTP_ADDR}"
|
||||||
|
services:
|
||||||
|
- "nomad"
|
||||||
|
tags:
|
||||||
|
- "http"
|
||||||
|
|
||||||
- job_name: "nomad_client"
|
- job_name: "nomad_client"
|
||||||
metrics_path: "/v1/metrics"
|
metrics_path: "/v1/metrics"
|
||||||
params:
|
params:
|
||||||
format:
|
format:
|
||||||
- "prometheus"
|
- "prometheus"
|
||||||
consul_sd_configs:
|
consul_sd_configs:
|
||||||
- server: "http://{{env "attr.unique.network.ip-address"}}:8500"
|
# - server: "${var.consul_address}"
|
||||||
|
- server: "${CONSUL_HTTP_ADDR}"
|
||||||
services:
|
services:
|
||||||
- "nomad-client"
|
- "nomad-client"
|
||||||
|
|
||||||
@ -101,7 +121,8 @@ scrape_configs:
|
|||||||
format:
|
format:
|
||||||
- "prometheus"
|
- "prometheus"
|
||||||
consul_sd_configs:
|
consul_sd_configs:
|
||||||
- server: "http://{{env "attr.unique.network.ip-address"}}:8500"
|
# - server: "${var.consul_address}"
|
||||||
|
- server: "${CONSUL_HTTP_ADDR}"
|
||||||
services:
|
services:
|
||||||
- "consul"
|
- "consul"
|
||||||
relabel_configs:
|
relabel_configs:
|
||||||
@ -112,7 +133,8 @@ scrape_configs:
|
|||||||
- job_name: "exporters"
|
- job_name: "exporters"
|
||||||
metrics_path: "/metrics"
|
metrics_path: "/metrics"
|
||||||
consul_sd_configs:
|
consul_sd_configs:
|
||||||
- server: "http://{{env "attr.unique.network.ip-address"}}:8500"
|
# - server: "${var.consul_address}"
|
||||||
|
- server: "${CONSUL_HTTP_ADDR}"
|
||||||
relabel_configs:
|
relabel_configs:
|
||||||
- source_labels: [__meta_consul_service]
|
- source_labels: [__meta_consul_service]
|
||||||
action: drop
|
action: drop
|
||||||
@ -134,7 +156,8 @@ scrape_configs:
|
|||||||
- job_name: "envoy"
|
- job_name: "envoy"
|
||||||
metrics_path: "/metrics"
|
metrics_path: "/metrics"
|
||||||
consul_sd_configs:
|
consul_sd_configs:
|
||||||
- server: "http://{{env "attr.unique.network.ip-address"}}:8500"
|
# - server: "${var.consul_address}"
|
||||||
|
- server: "${CONSUL_HTTP_ADDR}"
|
||||||
relabel_configs:
|
relabel_configs:
|
||||||
- source_labels: [__meta_consul_service]
|
- source_labels: [__meta_consul_service]
|
||||||
action: keep
|
action: keep
|
||||||
@ -160,7 +183,7 @@ scrape_configs:
|
|||||||
|
|
||||||
resources {
|
resources {
|
||||||
cpu = 100
|
cpu = 100
|
||||||
memory = 300
|
memory = 200
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -2,28 +2,26 @@
|
|||||||
# Manual edits may be lost in future updates.
|
# Manual edits may be lost in future updates.
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/consul" {
|
provider "registry.terraform.io/hashicorp/consul" {
|
||||||
version = "2.16.2"
|
version = "2.15.0"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:epldE7sZPBTQHnWEA4WlNJIOVT1UEX+/02SMg5nniaE=",
|
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=",
|
||||||
"zh:0a2e11ca2ba650954951a087a1daec95eee2f3000456b295409a9880c4a10b1a",
|
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c",
|
||||||
"zh:34f6bda06a0d1c213fa8d87d4313687681e67bc8c40c4cbaa7dbe59ce24a4f7e",
|
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931",
|
||||||
"zh:5b85cf93db11ee890f720c317a38158927071feb634855786a0c0cd65825a43c",
|
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624",
|
||||||
"zh:75ef915f3d087e6045751a66fbb7066a852a0944ec8c97200d1134dd84df7ffc",
|
"zh:32fa31329731db2bf4b7d0f09096416ca146f05b58f4482bbd4ee0f28cefbbcc",
|
||||||
"zh:8a4a95697bd91ad51a581c12fe50ac61a114afba27895d027f77ac4154a7ea15",
|
"zh:59136b73d3abe7cc5b06d9e12d123ad21298ca86ed49a4060a3cd7c2a28a74a1",
|
||||||
"zh:973d538c8d72793861a1ac9718249a9493f417a2b5096846367560054fd843b9",
|
"zh:a191f3210773ca25c543a92f2d392b85e6a053d596293655b1f25b33eb843b4c",
|
||||||
"zh:9feb2bdc06fdc2d8370cc9aad9a0c69e7e5ae38aac43f315c3f57507c57be030",
|
"zh:b8b6033cf0687eadc1099f11d9fb2ca9429ff40c2d85bd6cb047c0f6bc5d5d8d",
|
||||||
"zh:c5709672d0afecbbe298bf519741ebcb9d04f02a73b5ee0c186dfa241aa5a524",
|
"zh:bb7d67ed28aa9b28fc5154161af003383f940b2beda0d4577857cad700f39cd1",
|
||||||
"zh:c65c60570de6da7190e1e7762577655a463caeb59bc5d38e33034821ed0cbcb9",
|
"zh:be615288f59327b975532a1999deab60a022e6819fe80e5a32526155210ecbba",
|
||||||
"zh:c958d6282650fc472aade61d5df4300936033f43cfb898293ef86aceccdfdf1d",
|
"zh:de1e3d5c34eef87eb301e74717754babb6dc8e19e3a964919e1165c5a076a719",
|
||||||
"zh:cdd3632c81e1d11d3becd193aaa061688840f39147950c45c4301d042743ae6a",
|
"zh:eb8c61b20d8ce2bfff9f735ca8456a0d6368af13aa1f43866f61c70f88cc491c",
|
||||||
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/nomad" {
|
provider "registry.terraform.io/hashicorp/nomad" {
|
||||||
version = "1.4.16"
|
version = "1.4.16"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
|
|
||||||
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||||
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||||
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
20
nomad/nextcloud/.terraform.lock.hcl
Normal file
20
nomad/nextcloud/.terraform.lock.hcl
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
# This file is maintained automatically by "terraform init".
|
||||||
|
# Manual edits may be lost in future updates.
|
||||||
|
|
||||||
|
provider "registry.terraform.io/hashicorp/nomad" {
|
||||||
|
version = "1.4.16"
|
||||||
|
hashes = [
|
||||||
|
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||||
|
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||||
|
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
||||||
|
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
|
||||||
|
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
|
||||||
|
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
|
||||||
|
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
|
||||||
|
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
|
||||||
|
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
|
||||||
|
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
|
||||||
|
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
|
||||||
|
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
|
||||||
|
]
|
||||||
|
}
|
85
nomad/packer/cloud-config
Normal file
85
nomad/packer/cloud-config
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
#cloud-config
|
||||||
|
|
||||||
|
hostname: node1
|
||||||
|
|
||||||
|
users:
|
||||||
|
name: iamthefij
|
||||||
|
gecos: Ian Fijolek
|
||||||
|
groups: users, sudoers
|
||||||
|
# sudo: ALL=(ALL) NOPASSWD:ALL
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDqVH0C0Vf5cA+QnUlkYHnJ9hWs6hUiOuoIOS7+8fSlK05Chy8WbLijE8MEA6R4dvkWtWnmx8bJnpwJl/mMHendX86ko879EonHNGSLBvbHeMJTjvSVmH2UdLCMhG4+nhj7WgAC7z7o/EtRohD0BQYFAkGaC7PYWSJMExi0sCStPjqjFdDHXrsrLR0Xho2tcLEsW6jZboj5D0j8fcFN2Yn3c3yiHdS3UqHatP1QwaqVLZnujcJZXpOBZqON45SoWy+N4c0Xm0bNc/cZLU3+cPnHKdwBMsJ17Np0CA9PWuC+/6CR/f1de4+SjcMnYtpNOQ0PMlKo0FH1Iim6a2zu6Ia7 ianfijolek-home
|
||||||
|
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXmLEpRejmI4+dWXW4bpFBXVNsdeyMQHPh57tFNqewCyuWLs0TQYoMlCVe90GYOLz670aeWQ98otImIOmkhgmhilntTP4fSWonAjdATh9sHfPP9ZdhlxoMlr2rcdkudNY2IZh0OxfBGXiwGwMiB57ZHBntls1xTcztWm95e19Ys7UnjY/ewwhKbNHz4ibhdCR37UjWE5fl0J28Iea1FPWJhOLlO0bo2iMolHQ2r1++eA0qFT1T6irLuXWEfzK10XDcDCxtyloBYSc6s5ICDbYda68eIqIcAhrwXwr1eAGvW+0Q/C0HQvg8nICS09Mz6BSRrHsfVtxF709yW/A9i7dW5LxL2KGQfyWLQs5CiimvSRFe2d932nnl0Yi0j4z4co5nJs5U3XGdZA7b+gN9iumNY91dofhOk46OlUdLoZ3nrVpCFGHsFiOkTpArgneCjiyImjk35WETJWytp68mTwQrepVHT4WwqvLi28sGdV2m+9IeJD/w535+xJ5GcNwZ4CYdFOte8z+2k4sVIT1qmUxB2wfcDKrPRTyUrXw8f1EcdXNB02yAq8RVzetVW5XrR1rJ1Ht+YhkH+553DPOSwqBMyzH6VBf6SPsqzwo3Z0ZuBEdBmLdh1o8Rrxu3+dApETUYBbeXQGkbYwEuhsEUk8+fnl5gp19sZSXgM/yrpk170w== ansible-home-devterm
|
||||||
|
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDEgJzZmKLLA1D055xAGqyzaCnKo2s9aBeSPT9OY+7bDqHxYE/HUTp5zf3uO7yQ970/fI3XmDHoUE08ZyLOBXECPL0zsSljYx2tDivhEg8VwORfbp2J2cUVcd1EciZX36OuZWmRitqnYSzVglbRoAjGbmwap20vWjvsnc2mD9wiJl4Jj7VZsS8S97Di/Zos2ksLWeh7i8jrOxVXQtb6UGu3d1CLd/XXIkVSMIBSQBHwq5yB+13p1+RdMl7mosflzX6bm2gaB+bMBIXbak1BkQRxTS2FSdMDebJ9NCrj17R0SOJjtUCRfwG5gV2CGqp1E/WcEIfKGmHlaIMYx8B9MmDnZvcmoBAuZ83+xYPdzs6/b68VRMRonw9U+xLNjJTsNvQstyFx8afINABjrPrMdvMDmS6Of7GrXycKH6bZ9H2WZzK0WGFbrjz7WCl4WUTkbhmJlGqyq7USWx33eLgpexn0N0bR4HgsoFNvwsIFLpBMPuYtdsHKJ7Md4N8OPC/5PUj7enn9zr0jJm1SB4c3kyrw3V9dtUpEkZwD5UHMX3BYgZ8Mk/EUf1avbHs+wk2D94GgKKVKphNYUFJFeQ2MmvGIHXZqT5Y+LmolaCMeiQHRN4hR8lWZ3YEW1EPfb8EuEWwDtZBdkTdq3WBD/5dxLVGxKfWqyARd6DQKbrEjyUb2/Q== ansible-host
|
||||||
|
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICNlio04oYMNhFNp1tH142OEPX6s6Foul7xJBEJrxA23 ifij@C02FQ1A8Q05Q
|
||||||
|
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC+mAHEhjAGuSrK7otwuh0P0cnzpde78XVxy6TnrVIcAxOii4tt66f8L+AkacWxBvST+OXfBR0EPMuIr8ya8KsHVA2f7W+XTcH/s3WwHuNvSIf7M3Pt1bGOVtU53HAxeMkNjjJ0kPK9MRiYeONIcj2wuZUwIywH+zCJZiRuYjWTEcz4h6hlaBfwdBtuC5WcoQYUwyuudT0iYFMvG4VA1fk6rqEtM9DkLuCGgXZJb6kn5v4ZSHo7tTelpEAyooaPX7Z94YQ87/oNdUi5NFQRZ6tZFQPLAm3U67IwS3u+Di84vt/rkDL/nnIVbzN9w+DFl60RHG61sf4rnFj4In7e/ypD/IY3NPjFgKJa1ItzoNlJcx+W5RtvP0TjqaUjkbVeUUzE6enRxRGRnHmW5Rz2VbkeQnywEwhy5EZ8I8/exWuvgP5UrW7byUUcMMIHRYMvTf4yqS3+ycoLxQpRncTg5oAY7gMpwXBoPOGFheiP7A6AAKTbZjgT+0uWxlZhxiERyeudDQyhWIQryDBtXtr0JgK/kudn3w4aFvf9Q+mR3hLepSIprHqBzgYTdIjBcX6yyn1CDZyBpI1DOKC2pApou/Pj1iM7nPRu5vjs/C27JaEbarn3nPYNSflUmn9kwKYP3p+HsKczPHhkphcJRGn8J+8BSJPTYTBmtsIMix2Ale/ObQ== WorkingCopy@IansiPad-28012021
|
||||||
|
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCl1REER7jgH2G0uSinEA48++6F+OffkyyLuBA6KXg38+i+BmRnwETzyaBYoahD/QD9lrfGicUXUM4Vp4cX/hYvWbXc4VVwYzMBpZ6w3ZW0jzLYlILU9e1iUnMqkKkVHd+rYS/djTEBA3qev6Sn1IBg9t/LSE1+zLn2xH15RusKsCOzL0o/yCiSWtiLipGuywaNt6ZcmiJQmk87HhS68uQIVQr9EoG2gcNJt/1Nd0ykuBWmLZq8QXp40P4TBDCIOWBjHLjaknm1+yroooAHV1oNaPymSMXgXK2HqdvvEjyUc6H1euIcWuU+vORiAxdZDaIQVD8Y4+Slp1RsG1t2ICXY9/htZdulTkUQCGaCSMh3o7tq0RDA/oPiAmEpvzGe7NR5R74lkj6sVIFK/zu+3w8MgaMBQXiG8EmFj7G9UBWW5g53h6sG0nho8z7wJ4m14l2RwNT9d1PyLw6YVbmBAIFyMiI20c9ITbVECCIVrGW1S6pAC4EGvUfx5zlayW+CmZk86Ut9NXWhPGw2Whd2J/J7Q6TFXD/ASd0elTqMn6CeqrALZQnR/LpXmlqfrWI42Qiqh1Mz1IhZhNff2grVpCK2rxYpIsom3Yn+mZn8hZYSQ8BNF3VoQmNK0Og/t3iUekBvQLRk26z0bNLNdWHNz+uofBbEiOyxCwiJF0fQpxk/Yw== ansible-home-ianubuntu
|
||||||
|
|
||||||
|
|
||||||
|
apt:
|
||||||
|
sources:
|
||||||
|
hashicorp:
|
||||||
|
source: deb https://apt.releases.hashicorp.com $RELEASE main
|
||||||
|
key: |
|
||||||
|
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||||
|
|
||||||
|
mQINBF60TuYBEADLS1MP7XrMlRkn1Y54cb2UclUMH8HkIRfBrhk5Leo9kNZc/2QD
|
||||||
|
LmdQbi3UbZkz0uVkHqbFDgV5lAnukCnxgr9BqnL0GJpO78le7gCCbM5bR4rTJ6Ar
|
||||||
|
OOtIKf25smGTIpbSwNdj8BOLqiExGFj/9L5X9S5kfq3vtuYt+lmxKkIrEPjSYnFR
|
||||||
|
TQ2mTL8RM932GJod/5VJ2+6YvrCjtPu5/rW02H1U2ZHiTtX6ZGnIvv/sprKyFRqT
|
||||||
|
x4Ib+o9XwXof/LuxTMpVwIHSzCYanH5hPc7yRGKzIntBS+dDom+h9smx7FTgpHwt
|
||||||
|
QRFGLtVoHXqON6nXTLFDkEzxr+fXq/bgB1Kc1TuzvoK601ztQGhhDaEPloKqNWM8
|
||||||
|
Ho7JU1RpnoWr5jOFTYiPM9uyCtFNsJmD9mt4K8sQQN7T2inR5Us0o510FqePRFeX
|
||||||
|
wOJUMi1CbeYqVHfKQ5cWYujcK8pv3l1a6dSBmFfcdxtwIoA16JzCrgsCeumTDvKu
|
||||||
|
hOiTctb28srL/9WwlijUzZy6R2BGBbhP937f2NbMS/rpby7M1WizKeo2tkKVyK+w
|
||||||
|
SUWSw6EtFJi7kRSkH7rvy/ysU9I2ma88TyvyOgIz1NRRXYsW7+brgwXnuJraOLaB
|
||||||
|
5aiuhlngKpTPvP9CFib7AW2QOXustMZ7pOUREmxgS4kqxo74CuFws163TwARAQAB
|
||||||
|
tFFIYXNoaUNvcnAgU2VjdXJpdHkgKEhhc2hpQ29ycCBQYWNrYWdlIFNpZ25pbmcp
|
||||||
|
IDxzZWN1cml0eStwYWNrYWdpbmdAaGFzaGljb3JwLmNvbT6JAk4EEwEIADgWIQTo
|
||||||
|
oDLglNjrTqGJ0nDaQYyIoyGfewUCXrRO5gIbAwULCQgHAgYVCgkICwIEFgIDAQIe
|
||||||
|
AQIXgAAKCRDaQYyIoyGfe6/WD/9dTM/1OSgbvSPpPJOOcn5L1nOKRBJpztr4V0ky
|
||||||
|
GoCDakIQ/sykbcuHXP79FGLzrM8zQOsbvVp/Z2lsWBnxkT8KWM+8LZxYToRGdZhr
|
||||||
|
huFPHV9df0vAsZGisu4ejHDneHOTO3KqVotkky34jUSjBL7Q8uwXHY9r+5hb452N
|
||||||
|
vafN1w0Y1QVhb6JjjwWHR8Rf9qkSIEi6m9o8a1M54yQC2y/Zrs6+4F3zZ4uYfTvz
|
||||||
|
MyFfj0P5VmAoaowLSRdb2/JTObu0+zpKN+PjZA8BcnOf/pvqmEz83FIfo6zJLScx
|
||||||
|
TVaAwj5Iz/jS04x7EvBuIP3vpgv1R6r+t0qU/7hpu7Oc0dsxhL+C8BpVY26/2hvX
|
||||||
|
ozN5eG0ysSwexqwls+bnRgd6KdoHlWFNfbW8RCPKyb/s+tmFqGAY/QmxMkukgnXQ
|
||||||
|
WvBoa0Gdv2AFVLYup9tEO1zF4zBPh5oQwAXDNudLTHJ4KmyEwWsOQJUjNB4y4a7j
|
||||||
|
iGgK77T4KKXpo7pVDP8Ur+tmNH/d+/YFjxrfJvWt4ypE5dZmFO/FrUMvIGglOLDt
|
||||||
|
A+SiQe73IpEebB8PiqNlqJ2NU7artuRxYQVColt+/1puIHwV+h0SnMoUEvYqAtxP
|
||||||
|
J/N3JaiytWlesPPFWvhU/JGUAld5coEU2gbYtlenV/YmdjilIBu50sMSPGF5/6gv
|
||||||
|
BAA/DbkCDQRetE7mARAA0OH1pn0vdEfSm1kdqIDP3BXBD0BRHNNgGpyXXRRJFaip
|
||||||
|
bmpu7jSv3FsvN/NmG3BcLXXLFvwY/eIOr6fxRye+a5FSQEtvBnI1GHNmD5GAVT/H
|
||||||
|
KiwrT5e3ReR/FQS7hCXWU4OA2bKmSEdkJ952NhyYeyAKbkOBgbnlEhtWOAdMI7ws
|
||||||
|
peHAlHDqfGVOKXDh+FddCUQj/yZ2rblSzFdcC9gtcJSyHWgOQdVAEesEZ16hcZoj
|
||||||
|
+6O+6BXOQWOo7EPD7lA9a1qesBkSRcxQn48IVVZ2Qx2P2FtCfF+SFX+HQdqJGl15
|
||||||
|
qxE5CXTuJCMmCVnWhvcLW405uF/HmMFXdqGobEDiQsFFQrfpPVOi4T90VkW8P81s
|
||||||
|
uPoAlWht1CppNnmhWlvPQsPK/oSMBBOvOEH1EnWJate8yIkveNbqzrE7Xt3sjF6k
|
||||||
|
yqXaF+qW8OcDvSH/fgvVd21G10Cm77Z2WaKWvfi221oWj+WrgT8cCYv0AVmaLRMe
|
||||||
|
dajuYlPRQ8KaZaESza2eXggOMP5LQs/mQgfHfwSRekSbKg/L6ctp+xrZ0DPj4iIl
|
||||||
|
8+H4DxTILopAFWXA1a+uMVp8mV77gA9PyV3nIkrwgaZQ8MdhoKwvN/+SbvhpdzyF
|
||||||
|
UekzMP/HOaC6JgAomluwnFCdMDFa3FMCF3QUcIyY556QdoFD7g6033xqV6vL+d8A
|
||||||
|
EQEAAYkCNgQYAQgAIBYhBOigMuCU2OtOoYnScNpBjIijIZ97BQJetE7mAhsMAAoJ
|
||||||
|
ENpBjIijIZ97lecP+wTgSqhCz3TlUshR8lVrzECueIg3jh3+lY56am9X4MoZ2DAW
|
||||||
|
IXKjWKVWO55WPYD15A7+TbDyb4zh55m81LxSpV0CSRN4aPuixosWP4d0l+363D2F
|
||||||
|
oudz+QyvoK5J2sKFPMfhdTgGsEYVO/Zbhus5oNi0kjUTD9U7jHWPS3ilvk/g2F+k
|
||||||
|
T68lL9+oooleeT+kcBvbKt487JUOwMrkmHqNZdh8qmvMASAuqBcEcqjz96kVEMJY
|
||||||
|
bhn2skexKfIncoo/btixzJUbnplpDfibFxUHhvWWdwIv4kl3YnrCKKGSDoJcG1mV
|
||||||
|
sQegK4jWVGrqY8MnCI48iotP18ZxyqOycsZvs2jNmFlKwD9s1mrlr97HZ1MYbLWr
|
||||||
|
Hq06owH0AzVRM7tzMK7EuHkFLcoa8qh3oijn8O0B7xNOKpTZ2DjajQ/1w8nqmMi5
|
||||||
|
Z3Wie6ivKng/7p6c6HDrKjoQYc0/fuh1YnL60JG2Arn1OwdBsLDlzPL+Ro5iNwoJ
|
||||||
|
hZ+stxoZT48iAIWonBsLU11Y+MSwWdN1Eh411HTTunrEs6SafMEhnPi7vvUIZhny
|
||||||
|
Es0qOM/IUR1I0VtsurSn8aA6Y2Bp73+HuqFLx13/tPKBIUo6D7n/ywUlDCo7wtCw
|
||||||
|
aSgXPw6uF+0CyLOQ0haf2j6w1OB8ayEGSkTPER5rImCJf3MGw8IECGrErAd+
|
||||||
|
=EMKC
|
||||||
|
-----END PGP PUBLIC KEY BLOCK-----
|
||||||
|
|
||||||
|
packages:
|
||||||
|
- python3
|
||||||
|
- python3-pip
|
||||||
|
- git
|
||||||
|
- nomad
|
||||||
|
- consul
|
||||||
|
- vault
|
||||||
|
|
||||||
|
# vim: set ft=yaml.cloudinit :
|
35
nomad/packer/ubuntu-cloud-init.pkr.hcl
Normal file
35
nomad/packer/ubuntu-cloud-init.pkr.hcl
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
packer {
|
||||||
|
required_plugins {
|
||||||
|
docker = {
|
||||||
|
version = ">= 0.0.7"
|
||||||
|
source = "github.com/hashicorp/docker"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
source "qemu" "focal-arm64" {
|
||||||
|
qemu_binary = "qemu-system-aarch64"
|
||||||
|
# machine_type = "raspi3b"
|
||||||
|
machine_type = "virt"
|
||||||
|
headless = true
|
||||||
|
# Can't use boot command with this true
|
||||||
|
# disable_vnc = true
|
||||||
|
|
||||||
|
iso_url = "https://cloud-images.ubuntu.com/releases/focal/release-20220308/ubuntu-20.04-server-cloudimg-arm64.img"
|
||||||
|
iso_checksum = "sha256:e905900cd0a0d716a72f83dc94a6e2260275dc0e867c84196a8d6d1bc783b304"
|
||||||
|
|
||||||
|
output_directory = "focal_arm64"
|
||||||
|
shutdown_command = "echo 'packer' | sudo -S shutdown -P now"
|
||||||
|
disk_size = "5000M"
|
||||||
|
format = "raw"
|
||||||
|
ssh_username = "root"
|
||||||
|
ssh_password = "s0m3password"
|
||||||
|
ssh_timeout = "20m"
|
||||||
|
boot_wait = "10s"
|
||||||
|
# boot_command = []
|
||||||
|
}
|
||||||
|
|
||||||
|
build {
|
||||||
|
sources = ["source.qemu.focal-arm64"]
|
||||||
|
}
|
@ -30,17 +30,14 @@ provider "vault" {
|
|||||||
token = var.vault_token
|
token = var.vault_token
|
||||||
}
|
}
|
||||||
|
|
||||||
# Something that should exist in a post bootstrap module, right now module includes bootstrapping
|
data "vault_nomad_access_token" "admin" {
|
||||||
# which requries Admin
|
backend = "nomad"
|
||||||
# data "vault_nomad_access_token" "deploy" {
|
role = "admin"
|
||||||
# backend = "nomad"
|
}
|
||||||
# role = "deploy"
|
|
||||||
# }
|
|
||||||
|
|
||||||
# Configure the Nomad provider
|
# Configure the Nomad provider
|
||||||
provider "nomad" {
|
provider "nomad" {
|
||||||
address = length(var.nomad_address) == 0 ? local.nomad_node_address : var.nomad_address
|
address = length(var.nomad_address) == 0 ? local.nomad_node_address : var.nomad_address
|
||||||
secret_id = var.nomad_secret_id
|
secret_id = length(var.nomad_secret_id) == 0 ? data.vault_nomad_access_token.admin.secret_id : var.nomad_secret_id
|
||||||
# secret_id = length(var.nomad_secret_id) == 0 ? data.vault_nomad_access_token.admin.secret_id : var.nomad_secret_id
|
|
||||||
region = "global"
|
region = "global"
|
||||||
}
|
}
|
||||||
|
@ -1,45 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Recovery Consul
|
|
||||||
hosts: consul_instances
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Stop Consul
|
|
||||||
systemd:
|
|
||||||
name: consul
|
|
||||||
state: stopped
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Get node-id
|
|
||||||
slurp:
|
|
||||||
src: /opt/consul/node-id
|
|
||||||
register: consul_node_id
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Node Id
|
|
||||||
debug:
|
|
||||||
msg: "node_id: {{ consul_node_id.content }}"
|
|
||||||
|
|
||||||
- name: Address
|
|
||||||
debug:
|
|
||||||
msg: "address: {{ ansible_default_ipv4.address }}"
|
|
||||||
|
|
||||||
- name: Save
|
|
||||||
copy:
|
|
||||||
dest: "/opt/consul/raft/peers.json"
|
|
||||||
content: |
|
|
||||||
[
|
|
||||||
{% for host in ansible_play_hosts|reject('equalto', inventory_hostname) -%}
|
|
||||||
{
|
|
||||||
"id": "{{ hostvars[host].consul_node_id.content }}",
|
|
||||||
"address": "{{ hostvars[host].ansible_default_ipv4.address }}:8300",
|
|
||||||
"non_voter": false
|
|
||||||
}{% if not loop.last %},{% endif %}
|
|
||||||
{% endfor -%}
|
|
||||||
]
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Restart Consul
|
|
||||||
systemd:
|
|
||||||
name: consul
|
|
||||||
state: restarted
|
|
||||||
become: true
|
|
@ -1,45 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Recovery Nomad
|
|
||||||
hosts: nomad_instances
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Stop Nomad
|
|
||||||
systemd:
|
|
||||||
name: nomad
|
|
||||||
state: stopped
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Get node-id
|
|
||||||
slurp:
|
|
||||||
src: /var/nomad/server/node-id
|
|
||||||
register: nomad_node_id
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Node Id
|
|
||||||
debug:
|
|
||||||
msg: "node_id: {{ nomad_node_id.content }}"
|
|
||||||
|
|
||||||
- name: Address
|
|
||||||
debug:
|
|
||||||
msg: "address: {{ ansible_default_ipv4.address }}"
|
|
||||||
|
|
||||||
- name: Save
|
|
||||||
copy:
|
|
||||||
dest: /var/nomad/server/raft/peers.json
|
|
||||||
content: |
|
|
||||||
[
|
|
||||||
{% for host in ansible_play_hosts|reject('equalto', inventory_hostname) -%}
|
|
||||||
{
|
|
||||||
"id": "{{ hostvars[host].nomad_node_id.content }}",
|
|
||||||
"address": "{{ hostvars[host].ansible_default_ipv4.address }}:4647",
|
|
||||||
"non_voter": false
|
|
||||||
}{% if not loop.last %},{% endif %}
|
|
||||||
{% endfor -%}
|
|
||||||
]
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Restart Nomad
|
|
||||||
systemd:
|
|
||||||
name: nomad
|
|
||||||
state: restarted
|
|
||||||
become: true
|
|
38
nomad/redis/.terraform.lock.hcl
Normal file
38
nomad/redis/.terraform.lock.hcl
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
# This file is maintained automatically by "terraform init".
|
||||||
|
# Manual edits may be lost in future updates.
|
||||||
|
|
||||||
|
provider "registry.terraform.io/hashicorp/consul" {
|
||||||
|
version = "2.15.0"
|
||||||
|
hashes = [
|
||||||
|
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=",
|
||||||
|
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c",
|
||||||
|
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931",
|
||||||
|
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624",
|
||||||
|
"zh:32fa31329731db2bf4b7d0f09096416ca146f05b58f4482bbd4ee0f28cefbbcc",
|
||||||
|
"zh:59136b73d3abe7cc5b06d9e12d123ad21298ca86ed49a4060a3cd7c2a28a74a1",
|
||||||
|
"zh:a191f3210773ca25c543a92f2d392b85e6a053d596293655b1f25b33eb843b4c",
|
||||||
|
"zh:b8b6033cf0687eadc1099f11d9fb2ca9429ff40c2d85bd6cb047c0f6bc5d5d8d",
|
||||||
|
"zh:bb7d67ed28aa9b28fc5154161af003383f940b2beda0d4577857cad700f39cd1",
|
||||||
|
"zh:be615288f59327b975532a1999deab60a022e6819fe80e5a32526155210ecbba",
|
||||||
|
"zh:de1e3d5c34eef87eb301e74717754babb6dc8e19e3a964919e1165c5a076a719",
|
||||||
|
"zh:eb8c61b20d8ce2bfff9f735ca8456a0d6368af13aa1f43866f61c70f88cc491c",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "registry.terraform.io/hashicorp/nomad" {
|
||||||
|
version = "1.4.16"
|
||||||
|
hashes = [
|
||||||
|
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||||
|
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||||
|
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
||||||
|
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
|
||||||
|
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
|
||||||
|
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
|
||||||
|
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
|
||||||
|
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
|
||||||
|
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
|
||||||
|
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
|
||||||
|
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
|
||||||
|
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
|
||||||
|
]
|
||||||
|
}
|
@ -7,7 +7,7 @@ roles:
|
|||||||
- src: https://github.com/IamTheFij/ansible-nomad.git
|
- src: https://github.com/IamTheFij/ansible-nomad.git
|
||||||
name: ansible-nomad
|
name: ansible-nomad
|
||||||
scm: git
|
scm: git
|
||||||
version: my-main
|
version: install-repo
|
||||||
- src: https://github.com/ansible-community/ansible-vault.git
|
- src: https://github.com/ansible-community/ansible-vault.git
|
||||||
name: ansible-vault
|
name: ansible-vault
|
||||||
scm: git
|
scm: git
|
||||||
|
@ -1,11 +1,6 @@
|
|||||||
# Can't run this as part of root and as a submodule because of tf state
|
module "acls" {
|
||||||
# module "acls" {
|
source = "./acls"
|
||||||
# source = "./acls"
|
}
|
||||||
#
|
|
||||||
# consul_address = var.consul_address
|
|
||||||
# nomad_secret_id = var.nomad_secret_id
|
|
||||||
# vault_token = var.vault_token
|
|
||||||
# }
|
|
||||||
|
|
||||||
# module "storage_plugins" {
|
# module "storage_plugins" {
|
||||||
# source = "./storage_plugins"
|
# source = "./storage_plugins"
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
# sticky_disk = bool
|
# sticky_disk = bool
|
||||||
# args = json(list[str])
|
# args = json(list[str])
|
||||||
# resources = dict(cpu = int, mem = int)
|
# resources = dict(cpu = int, mem = int)
|
||||||
# env = json(dict(str: any))
|
|
||||||
# templates = json(list(dict(
|
# templates = json(list(dict(
|
||||||
# data = str,
|
# data = str,
|
||||||
# dest = str,
|
# dest = str,
|
||||||
@ -15,19 +14,9 @@
|
|||||||
# left_delimiter = str,
|
# left_delimiter = str,
|
||||||
# right_delimiter = str,
|
# right_delimiter = str,
|
||||||
# )))
|
# )))
|
||||||
# host_volumes = json(list(dict(
|
|
||||||
# name = str,
|
|
||||||
# dest = str,
|
|
||||||
# read_only = bool,
|
|
||||||
# )))
|
|
||||||
# healthcheck = "/"
|
# healthcheck = "/"
|
||||||
# upstreams = json(list(dict(
|
|
||||||
# destination_name = str,
|
|
||||||
# local_bind_port = int
|
|
||||||
# )))
|
|
||||||
# mysql = bool
|
# mysql = bool
|
||||||
# redis = bool
|
# redis = bool
|
||||||
# vault = bool
|
|
||||||
job "[[.name]]" {
|
job "[[.name]]" {
|
||||||
region = "global"
|
region = "global"
|
||||||
datacenters = ["dc1"]
|
datacenters = ["dc1"]
|
||||||
@ -38,14 +27,14 @@ job "[[.name]]" {
|
|||||||
[[ with .count ]]count = [[ . ]][[end]]
|
[[ with .count ]]count = [[ . ]][[end]]
|
||||||
network {
|
network {
|
||||||
mode = "bridge"
|
mode = "bridge"
|
||||||
[[ if not (empty .service_port) -]]
|
[[ if not (empty .service_port) ]]
|
||||||
port "main" {
|
port "main" {
|
||||||
[[ if default false .ingress -]]
|
[[ if default false .ingress ]]
|
||||||
host_network = "loopback"
|
host_network = "loopback"
|
||||||
[[ end -]]
|
[[ end ]]
|
||||||
to = [[.service_port]]
|
to = [[.service_port]]
|
||||||
}
|
}
|
||||||
[[ end -]]
|
[[ end ]]
|
||||||
}
|
}
|
||||||
|
|
||||||
[[ if default false .sticky_disk ]]
|
[[ if default false .sticky_disk ]]
|
||||||
@ -55,16 +44,6 @@ job "[[.name]]" {
|
|||||||
}
|
}
|
||||||
[[ end ]]
|
[[ end ]]
|
||||||
|
|
||||||
[[ with .host_volumes -]]
|
|
||||||
[[ range $v := . | parseJSON -]]
|
|
||||||
volume "[[ $v.name ]]" {
|
|
||||||
type = "host"
|
|
||||||
read_only = [[ $v.read_only ]]
|
|
||||||
source = "[[ $v.name ]]"
|
|
||||||
}
|
|
||||||
[[ end ]]
|
|
||||||
[[ end -]]
|
|
||||||
|
|
||||||
[[ if not (empty .service_port) ]]
|
[[ if not (empty .service_port) ]]
|
||||||
service {
|
service {
|
||||||
name = "[[.name | replace "_" "-"]]"
|
name = "[[.name | replace "_" "-"]]"
|
||||||
@ -75,40 +54,30 @@ job "[[.name]]" {
|
|||||||
sidecar_service {
|
sidecar_service {
|
||||||
proxy {
|
proxy {
|
||||||
local_service_port = [[.service_port]]
|
local_service_port = [[.service_port]]
|
||||||
[[ if default false .mysql -]]
|
[[ if default false .mysql ]]
|
||||||
upstreams {
|
upstreams {
|
||||||
destination_name = "mysql-server"
|
destination_name = "mysql-server"
|
||||||
local_bind_port = 4040
|
local_bind_port = 4040
|
||||||
}
|
}
|
||||||
[[ end -]]
|
[[ end -]]
|
||||||
[[ if default false .redis -]]
|
[[ if default false .redis ]]
|
||||||
upstreams {
|
upstreams {
|
||||||
destination_name = "redis"
|
destination_name = "redis"
|
||||||
local_bind_port = 6379
|
local_bind_port = 6379
|
||||||
}
|
}
|
||||||
[[ end -]]
|
|
||||||
[[ with .upstreams -]]
|
|
||||||
[[range $u := . | parseJSON -]]
|
|
||||||
upstreams {
|
|
||||||
destination_name = "[[ $u.destination_name ]]"
|
|
||||||
local_bind_port = [[ $u.local_bind_port ]]
|
|
||||||
}
|
|
||||||
[[ end ]]
|
[[ end ]]
|
||||||
[[ end -]]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sidecar_task {
|
sidecar_task {
|
||||||
resources {
|
resources {
|
||||||
cpu = 50
|
cpu = 50
|
||||||
memory = 20
|
memory = 50
|
||||||
memory_max = 50
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
[[ end ]]
|
[[ end ]]
|
||||||
|
|
||||||
[[ if not (eq .healthcheck "") -]]
|
|
||||||
check {
|
check {
|
||||||
type = "http"
|
type = "http"
|
||||||
path = "[[ or .healthcheck "/" ]]"
|
path = "[[ or .healthcheck "/" ]]"
|
||||||
@ -116,19 +85,15 @@ job "[[.name]]" {
|
|||||||
interval = "10s"
|
interval = "10s"
|
||||||
timeout = "10s"
|
timeout = "10s"
|
||||||
}
|
}
|
||||||
[[ end -]]
|
|
||||||
|
|
||||||
tags = [
|
tags = [
|
||||||
[[ if default false .ingress -]]
|
[[ if default false .ingress -]]
|
||||||
"traefik.enable=true",
|
"traefik.enable=true",
|
||||||
"traefik.http.routers.[[.name]].entryPoints=websecure",
|
"traefik.http.routers.[[.name]].entryPoints=websecure",
|
||||||
[[ if not (empty .ingress_rule) -]]
|
|
||||||
"traefik.http.routers.[[.name]].rule=[[.ingress_rule]]",
|
|
||||||
[[ end -]]
|
|
||||||
[[ end -]]
|
[[ end -]]
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
[[ end -]]
|
[[ end ]]
|
||||||
|
|
||||||
task "[[.name]]" {
|
task "[[.name]]" {
|
||||||
driver = "docker"
|
driver = "docker"
|
||||||
@ -137,51 +102,32 @@ job "[[.name]]" {
|
|||||||
image = "[[.image]]"
|
image = "[[.image]]"
|
||||||
[[ if not (empty .service_port) -]]
|
[[ if not (empty .service_port) -]]
|
||||||
ports = ["main"]
|
ports = ["main"]
|
||||||
[[ end -]]
|
[[- end ]]
|
||||||
[[ if not (empty .args) -]]
|
[[ if not (empty .args) -]]
|
||||||
args = ["[[ .args | parseJSON | join `", "` ]]"]
|
args = ["[[ .args | parseJSON | join `", "` ]]"]
|
||||||
[[ end -]]
|
[[- end ]]
|
||||||
|
|
||||||
[[ with .templates -]]
|
[[ with .templates]]
|
||||||
[[ range $t := . | parseJSON -]]
|
[[ range $t := . | parseJSON ]]
|
||||||
mount {
|
mount {
|
||||||
type = "bind"
|
type = "bind"
|
||||||
target = "[[ $t.dest ]]"
|
target = "[[ $t.dest ]]"
|
||||||
source = "local/[[ $t.dest ]]"
|
source = "local/[[ $t.dest ]]"
|
||||||
}
|
}
|
||||||
[[ end ]]
|
[[ end ]]
|
||||||
[[ end -]]
|
[[ end ]]
|
||||||
}
|
}
|
||||||
|
|
||||||
[[ if default false .vault -]]
|
|
||||||
vault {
|
|
||||||
policies = [
|
|
||||||
"access-tables",
|
|
||||||
"nomad-task",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
[[ end -]]
|
|
||||||
|
|
||||||
[[ with .env -]]
|
[[ with .env -]]
|
||||||
env = {
|
env = {
|
||||||
[[ range $k, $v := . | parseJSON -]]
|
[[- range $k, $v := . ]]
|
||||||
"[[$k]]" = "[[$v]]"
|
"[[$k]]" = "[[$v]]"
|
||||||
[[ end -]]
|
[[- end ]]
|
||||||
}
|
|
||||||
[[ end -]]
|
|
||||||
|
|
||||||
[[ with .host_volumes -]]
|
|
||||||
[[ range $v := . | parseJSON -]]
|
|
||||||
volume_mount {
|
|
||||||
volume = "[[ $v.name ]]"
|
|
||||||
destination = "[[ $v.dest ]]"
|
|
||||||
read_only = [[ $v.read_only ]]
|
|
||||||
}
|
}
|
||||||
[[ end ]]
|
[[ end ]]
|
||||||
[[ end -]]
|
|
||||||
|
|
||||||
[[ with .templates -]]
|
[[ with .templates ]]
|
||||||
[[ range $t := . | parseJSON -]]
|
[[ range $t := . | parseJSON ]]
|
||||||
template {
|
template {
|
||||||
data = <<EOF
|
data = <<EOF
|
||||||
[[ $t.data ]]
|
[[ $t.data ]]
|
||||||
@ -193,15 +139,15 @@ EOF
|
|||||||
[[ with $t.change_signal ]]change_signal = "[[ . ]]"[[ end -]]
|
[[ with $t.change_signal ]]change_signal = "[[ . ]]"[[ end -]]
|
||||||
[[ with $t.env ]]env = [[ . ]][[ end ]]
|
[[ with $t.env ]]env = [[ . ]][[ end ]]
|
||||||
}
|
}
|
||||||
[[ end -]]
|
[[ end ]]
|
||||||
[[ end -]]
|
[[ end ]]
|
||||||
|
|
||||||
[[ with .resources -]]
|
[[ with .resources ]]
|
||||||
resources {
|
resources {
|
||||||
cpu = [[ .cpu ]]
|
cpu = [[ .cpu ]]
|
||||||
memory = [[ .memory ]]
|
memory = [[ .memory ]]
|
||||||
}
|
}
|
||||||
[[ end -]]
|
[[ end ]]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,49 @@
|
|||||||
module "services" {
|
|
||||||
source = "./services"
|
|
||||||
|
|
||||||
depends_on = [module.databases, module.core]
|
module "nextcloud" {
|
||||||
|
source = "./nextcloud"
|
||||||
|
|
||||||
|
depends_on = [module.databases]
|
||||||
|
}
|
||||||
|
|
||||||
|
module "backups" {
|
||||||
|
source = "./backups"
|
||||||
|
|
||||||
|
depends_on = [module.databases]
|
||||||
|
}
|
||||||
|
|
||||||
|
module "media" {
|
||||||
|
source = "./media"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nomad_job" "whoami" {
|
||||||
|
hcl2 {
|
||||||
|
enabled = true
|
||||||
|
vars = {
|
||||||
|
"count" = "${2 * length(data.consul_service.nomad.service)}",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
jobspec = file("${path.module}/whoami.nomad")
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "consul_config_entry" "global_access" {
|
||||||
|
name = "*"
|
||||||
|
kind = "service-intentions"
|
||||||
|
|
||||||
|
config_json = jsonencode({
|
||||||
|
Sources = [
|
||||||
|
{
|
||||||
|
Action = "allow"
|
||||||
|
Name = "traefik"
|
||||||
|
Precedence = 6
|
||||||
|
Type = "consul"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action = "deny"
|
||||||
|
Name = "*"
|
||||||
|
Precedence = 5
|
||||||
|
Type = "consul"
|
||||||
|
},
|
||||||
|
]
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
@ -1,40 +0,0 @@
|
|||||||
# This file is maintained automatically by "terraform init".
|
|
||||||
# Manual edits may be lost in future updates.
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/consul" {
|
|
||||||
version = "2.16.2"
|
|
||||||
hashes = [
|
|
||||||
"h1:epldE7sZPBTQHnWEA4WlNJIOVT1UEX+/02SMg5nniaE=",
|
|
||||||
"zh:0a2e11ca2ba650954951a087a1daec95eee2f3000456b295409a9880c4a10b1a",
|
|
||||||
"zh:34f6bda06a0d1c213fa8d87d4313687681e67bc8c40c4cbaa7dbe59ce24a4f7e",
|
|
||||||
"zh:5b85cf93db11ee890f720c317a38158927071feb634855786a0c0cd65825a43c",
|
|
||||||
"zh:75ef915f3d087e6045751a66fbb7066a852a0944ec8c97200d1134dd84df7ffc",
|
|
||||||
"zh:8a4a95697bd91ad51a581c12fe50ac61a114afba27895d027f77ac4154a7ea15",
|
|
||||||
"zh:973d538c8d72793861a1ac9718249a9493f417a2b5096846367560054fd843b9",
|
|
||||||
"zh:9feb2bdc06fdc2d8370cc9aad9a0c69e7e5ae38aac43f315c3f57507c57be030",
|
|
||||||
"zh:c5709672d0afecbbe298bf519741ebcb9d04f02a73b5ee0c186dfa241aa5a524",
|
|
||||||
"zh:c65c60570de6da7190e1e7762577655a463caeb59bc5d38e33034821ed0cbcb9",
|
|
||||||
"zh:c958d6282650fc472aade61d5df4300936033f43cfb898293ef86aceccdfdf1d",
|
|
||||||
"zh:cdd3632c81e1d11d3becd193aaa061688840f39147950c45c4301d042743ae6a",
|
|
||||||
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/nomad" {
|
|
||||||
version = "1.4.19"
|
|
||||||
hashes = [
|
|
||||||
"h1:EdBny2gaLr/IE+l+6csyCKeIGFMYZ/4tHKpcbS7ArgE=",
|
|
||||||
"zh:2f3ceeb3318a6304026035b0ac9ee3e52df04913bb9ee78827e58c5398b41254",
|
|
||||||
"zh:3fbe76c7d957d20dfe3c8c0528b33084651f22a95be9e0452b658e0922916e2a",
|
|
||||||
"zh:595671a05828cfe6c42ef73aac894ac39f81a52cc662a76f37eb74ebe04ddf75",
|
|
||||||
"zh:5d76e8788d2af3e60daf8076babf763ec887480bbb9734baccccd8fcddf4f03e",
|
|
||||||
"zh:676985afeaca6e67b22d60d43fd0ed7055763029ffebc3026089fe2fd3b4a288",
|
|
||||||
"zh:69152ce6164ac999a640cff962ece45208270e1ac37c10dac484eeea5cf47275",
|
|
||||||
"zh:6da0b15c05b81f947ec8e139bd81eeeb05c0d36eb5a967b985d0625c60998b40",
|
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
|
||||||
"zh:822c0a3bbada5e38099a379db8b2e339526843699627c3be3664cc3b3752bab7",
|
|
||||||
"zh:af23af2f98a84695b25c8eba7028a81ad4aad63c44aefb79e01bbe2dc82e7f78",
|
|
||||||
"zh:e36cac9960b7506d92925b667254322520966b9c3feb3ca6102e57a1fb9b1761",
|
|
||||||
"zh:ffd1e096c1cc35de879c740a91918e9f06b627818a3cb4b1d87b829b54a6985f",
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,200 +0,0 @@
|
|||||||
job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
|
|
||||||
datacenters = ["dc1"]
|
|
||||||
%{ if batch_node == null ~}
|
|
||||||
type = "system"
|
|
||||||
%{ else ~}
|
|
||||||
type = "batch"
|
|
||||||
|
|
||||||
parameterized {
|
|
||||||
meta_required = ["job_name"]
|
|
||||||
meta_optional = ["task", "snapshot"]
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
meta {
|
|
||||||
task = "backup"
|
|
||||||
snapshot = "latest"
|
|
||||||
}
|
|
||||||
%{ endif ~}
|
|
||||||
|
|
||||||
%{ if batch_node == null ~}
|
|
||||||
constraint {
|
|
||||||
attribute = "$${node.unique.name}"
|
|
||||||
operator = "set_contains_any"
|
|
||||||
# Only deploy to nodes running tasks to backup
|
|
||||||
value = "n1,n2"
|
|
||||||
}
|
|
||||||
%{ else ~}
|
|
||||||
constraint {
|
|
||||||
attribute = "$${node.unique.name}"
|
|
||||||
value = "${batch_node}"
|
|
||||||
}
|
|
||||||
%{ endif ~}
|
|
||||||
|
|
||||||
group "backup" {
|
|
||||||
|
|
||||||
network {
|
|
||||||
mode = "bridge"
|
|
||||||
|
|
||||||
port "metrics" {
|
|
||||||
to = 8080
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
volume "all-volumes" {
|
|
||||||
type = "host"
|
|
||||||
read_only = false
|
|
||||||
source = "all-volumes"
|
|
||||||
}
|
|
||||||
|
|
||||||
service {
|
|
||||||
name = "backups"
|
|
||||||
port = "metrics"
|
|
||||||
|
|
||||||
# Add connect to mysql
|
|
||||||
connect {
|
|
||||||
sidecar_service {
|
|
||||||
proxy {
|
|
||||||
local_service_port = 8080
|
|
||||||
|
|
||||||
upstreams {
|
|
||||||
destination_name = "mysql-server"
|
|
||||||
local_bind_port = 6060
|
|
||||||
}
|
|
||||||
|
|
||||||
config {
|
|
||||||
protocol = "tcp"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sidecar_task {
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 50
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
meta {
|
|
||||||
metrics_addr = "$${NOMAD_ADDR_metrics}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task "backup" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
volume_mount {
|
|
||||||
volume = "all-volumes"
|
|
||||||
destination = "/data"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "iamthefij/resticscheduler"
|
|
||||||
ports = ["metrics"]
|
|
||||||
args = [
|
|
||||||
%{ if batch_node != null ~}
|
|
||||||
"-once",
|
|
||||||
"-$${NOMAD_META_task}",
|
|
||||||
"$${NOMAD_META_job_name}",
|
|
||||||
%{ endif ~}
|
|
||||||
"/jobs/node-jobs.hcl",
|
|
||||||
]
|
|
||||||
|
|
||||||
mount {
|
|
||||||
type = "bind"
|
|
||||||
target = "/jobs"
|
|
||||||
source = "jobs"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
vault {
|
|
||||||
policies = [
|
|
||||||
"access-tables",
|
|
||||||
"nomad-task",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
env = {
|
|
||||||
"MYSQL_HOST" = "$${NOMAD_UPSTREAM_IP_mysql_server}"
|
|
||||||
"MYSQL_PORT" = "$${NOMAD_UPSTREAM_PORT_mysql_server}"
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
# Probably want to use database credentials that have access to dump all tables
|
|
||||||
data = <<EOF
|
|
||||||
{{ with secret "kv/data/nextcloud" -}}
|
|
||||||
MYSQL_DATABASE={{ .Data.data.db_name }}
|
|
||||||
MYSQL_USER={{ .Data.data.db_user }}
|
|
||||||
MYSQL_PASSWORD={{ .Data.data.db_pass }}
|
|
||||||
{{ end -}}
|
|
||||||
{{ with secret "kv/data/backups" -}}
|
|
||||||
BACKUP_PASSPHRASE={{ .Data.data.backup_passphrase }}
|
|
||||||
RCLONE_FTP_HOST={{ .Data.data.nas_ftp_host }}
|
|
||||||
RCLONE_FTP_USER={{ .Data.data.nas_ftp_user }}
|
|
||||||
RCLONE_FTP_PASS={{ .Data.data.nas_ftp_pass | toJSON }}
|
|
||||||
RCLONE_FTP_EXPLICIT_TLS=true
|
|
||||||
RCLONE_FTP_NO_CHECK_CERTIFICATE=true
|
|
||||||
{{ end -}}
|
|
||||||
EOF
|
|
||||||
destination = "secrets/db.env"
|
|
||||||
env = true
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
CONSUL_HTTP_ADDR={{ env "attr.unique.network.ip-address" }}:8500
|
|
||||||
EOH
|
|
||||||
destination = "local/consul.env"
|
|
||||||
env = true
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
template {
|
|
||||||
# Build jobs based on node
|
|
||||||
data = <<EOF
|
|
||||||
# Current node is {{ env "node.unique.name" }}
|
|
||||||
{{ if eq (env "node.unique.name") "n2" -}}
|
|
||||||
# Consul backup
|
|
||||||
${file("${module_path}/jobs/consul.hcl")}
|
|
||||||
{{ end -}}
|
|
||||||
|
|
||||||
{{ range service "nextcloud" -}}
|
|
||||||
# Nextcloud .Node {{ .Node }}
|
|
||||||
{{ if eq .Node (env "node.unique.name") -}}
|
|
||||||
${file("${module_path}/jobs/nextcloud.hcl")}
|
|
||||||
{{ end -}}
|
|
||||||
{{ end -}}
|
|
||||||
|
|
||||||
{{ range service "lldap" -}}
|
|
||||||
# Lldap .Node {{ .Node }}
|
|
||||||
{{ if eq .Node (env "node.unique.name") -}}
|
|
||||||
${file("${module_path}/jobs/lldap.hcl")}
|
|
||||||
{{ end -}}
|
|
||||||
{{ end -}}
|
|
||||||
|
|
||||||
{{ range service "sonarr" -}}
|
|
||||||
# Lldap .Node {{ .Node }}
|
|
||||||
{{ if eq .Node (env "node.unique.name") -}}
|
|
||||||
${file("${module_path}/jobs/sonarr.hcl")}
|
|
||||||
{{ end -}}
|
|
||||||
{{ end -}}
|
|
||||||
|
|
||||||
{{ range service "nzbget" -}}
|
|
||||||
# Lldap .Node {{ .Node }}
|
|
||||||
{{ if eq .Node (env "node.unique.name") -}}
|
|
||||||
${file("${module_path}/jobs/nzbget.hcl")}
|
|
||||||
{{ end -}}
|
|
||||||
{{ end -}}
|
|
||||||
EOF
|
|
||||||
destination = "jobs/node-jobs.hcl"
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 256
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,24 +0,0 @@
|
|||||||
resource "nomad_job" "backups" {
|
|
||||||
jobspec = templatefile("${path.module}/backup.nomad", {
|
|
||||||
module_path = "${path.module}",
|
|
||||||
batch_node = null,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
# Get Nomad clients from Consul
|
|
||||||
# data "consul_service" "nomad" {
|
|
||||||
# name = "nomad-client"
|
|
||||||
# }
|
|
||||||
|
|
||||||
resource "nomad_job" "backups-oneoff" {
|
|
||||||
# TODO: Get list of nomad hosts dynamically
|
|
||||||
for_each = toset(["n1", "n2"])
|
|
||||||
# for_each = toset([
|
|
||||||
# for node in data.consul_service.nomad.service :
|
|
||||||
# node.node_name
|
|
||||||
# ])
|
|
||||||
jobspec = templatefile("${path.module}/backup.nomad", {
|
|
||||||
module_path = "${path.module}",
|
|
||||||
batch_node = each.key,
|
|
||||||
})
|
|
||||||
}
|
|
@ -1,27 +0,0 @@
|
|||||||
job "lldap" {
|
|
||||||
schedule = "@daily"
|
|
||||||
|
|
||||||
config {
|
|
||||||
repo = "rclone::ftp,env_auth:/nomad/lldap"
|
|
||||||
passphrase = env("BACKUP_PASSPHRASE")
|
|
||||||
}
|
|
||||||
|
|
||||||
# sqlite "Backup database" {
|
|
||||||
# path = "/data/lldap/users.db"
|
|
||||||
# # sqlite3 /data/lldap/users.db .backup /data/lldap/users.db.bak
|
|
||||||
# dump_to = "/data/lldap/users.db.bak"
|
|
||||||
# }
|
|
||||||
|
|
||||||
backup {
|
|
||||||
paths = ["/data/lldap"]
|
|
||||||
# Because path is absolute
|
|
||||||
restore_opts {
|
|
||||||
Target = "/"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
forget {
|
|
||||||
KeepLast = 2
|
|
||||||
Prune = true
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,21 +0,0 @@
|
|||||||
job "nzbget" {
|
|
||||||
schedule = "@daily"
|
|
||||||
|
|
||||||
config {
|
|
||||||
repo = "rclone::ftp,env_auth:/nomad/nzbget"
|
|
||||||
passphrase = env("BACKUP_PASSPHRASE")
|
|
||||||
}
|
|
||||||
|
|
||||||
backup {
|
|
||||||
paths = ["/data/nzbget"]
|
|
||||||
# Because path is absolute
|
|
||||||
restore_opts {
|
|
||||||
Target = "/"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
forget {
|
|
||||||
KeepLast = 2
|
|
||||||
Prune = true
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,27 +0,0 @@
|
|||||||
job "sonarr" {
|
|
||||||
schedule = "@daily"
|
|
||||||
|
|
||||||
config {
|
|
||||||
repo = "rclone::ftp,env_auth:/nomad/sonarr"
|
|
||||||
passphrase = env("BACKUP_PASSPHRASE")
|
|
||||||
}
|
|
||||||
|
|
||||||
# sqlite "Backup database" {
|
|
||||||
# path = "/data/lldap/users.db"
|
|
||||||
# # sqlite3 /data/lldap/users.db .backup /data/lldap/users.db.bak
|
|
||||||
# dump_to = "/data/lldap/users.db.bak"
|
|
||||||
# }
|
|
||||||
|
|
||||||
backup {
|
|
||||||
paths = ["/data/sonarr"]
|
|
||||||
# Because path is absolute
|
|
||||||
restore_opts {
|
|
||||||
Target = "/"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
forget {
|
|
||||||
KeepLast = 2
|
|
||||||
Prune = true
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,29 +0,0 @@
|
|||||||
resource "consul_service" "homeassistant" {
|
|
||||||
name = "hass"
|
|
||||||
node = consul_node.homeassistant.name
|
|
||||||
port = 8123
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.consulcatalog.connect=false",
|
|
||||||
"traefik.http.routers.hass.entryPoints=websecure",
|
|
||||||
]
|
|
||||||
|
|
||||||
check {
|
|
||||||
check_id = "homeassistant:hass"
|
|
||||||
status = "passing"
|
|
||||||
name = "Home Assistant Health Check"
|
|
||||||
http = "192.168.3.65:8123"
|
|
||||||
interval = "30s"
|
|
||||||
timeout = "10s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "consul_node" "homeassistant" {
|
|
||||||
name = "homeassistant"
|
|
||||||
address = "192.168.3.65"
|
|
||||||
|
|
||||||
meta = {
|
|
||||||
"external-node" = "true"
|
|
||||||
"external-probe" = "true"
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,201 +0,0 @@
|
|||||||
job "ipdvr" {
|
|
||||||
region = "global"
|
|
||||||
datacenters = ["dc1"]
|
|
||||||
|
|
||||||
type = "service"
|
|
||||||
|
|
||||||
group "nzbget" {
|
|
||||||
network {
|
|
||||||
mode = "bridge"
|
|
||||||
port "main" {
|
|
||||||
host_network = "loopback"
|
|
||||||
to = 6789
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
volume "nzbget-data" {
|
|
||||||
type = "host"
|
|
||||||
read_only = false
|
|
||||||
source = "nzbget-data"
|
|
||||||
}
|
|
||||||
|
|
||||||
volume "download" {
|
|
||||||
type = "host"
|
|
||||||
read_only = false
|
|
||||||
source = "download"
|
|
||||||
}
|
|
||||||
|
|
||||||
service {
|
|
||||||
name = "nzbget"
|
|
||||||
port = "main"
|
|
||||||
|
|
||||||
connect {
|
|
||||||
sidecar_service {
|
|
||||||
proxy {
|
|
||||||
local_service_port = 6789
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sidecar_task {
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 20
|
|
||||||
memory_max = 50
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# check {
|
|
||||||
# type = "http"
|
|
||||||
# path = "/"
|
|
||||||
# port = "main"
|
|
||||||
# interval = "10s"
|
|
||||||
# timeout = "10s"
|
|
||||||
# }
|
|
||||||
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.nzbget.entryPoints=websecure",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
task "nzbget" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "linuxserver/nzbget"
|
|
||||||
ports = ["main"]
|
|
||||||
}
|
|
||||||
|
|
||||||
env = {
|
|
||||||
"PGID" = 100
|
|
||||||
"PUID" = 1001
|
|
||||||
"TZ" = "America/Los_Angeles"
|
|
||||||
}
|
|
||||||
|
|
||||||
volume_mount {
|
|
||||||
volume = "nzbget-data"
|
|
||||||
destination = "/config"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
volume_mount {
|
|
||||||
volume = "download"
|
|
||||||
destination = "/downloads"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
cpu = 200
|
|
||||||
memory = 200
|
|
||||||
memory_max = 500
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
group "sonarr" {
|
|
||||||
network {
|
|
||||||
mode = "bridge"
|
|
||||||
port "main" {
|
|
||||||
host_network = "loopback"
|
|
||||||
to = 8989
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
volume "sonarr-data" {
|
|
||||||
type = "host"
|
|
||||||
read_only = false
|
|
||||||
source = "sonarr-data"
|
|
||||||
}
|
|
||||||
|
|
||||||
volume "tv-sonarr" {
|
|
||||||
type = "host"
|
|
||||||
read_only = false
|
|
||||||
source = "tv-sonarr"
|
|
||||||
}
|
|
||||||
|
|
||||||
volume "download" {
|
|
||||||
type = "host"
|
|
||||||
read_only = false
|
|
||||||
source = "download"
|
|
||||||
}
|
|
||||||
|
|
||||||
service {
|
|
||||||
name = "sonarr"
|
|
||||||
port = "main"
|
|
||||||
|
|
||||||
connect {
|
|
||||||
sidecar_service {
|
|
||||||
proxy {
|
|
||||||
local_service_port = 8989
|
|
||||||
upstreams {
|
|
||||||
destination_name = "nzbget"
|
|
||||||
local_bind_port = 6789
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sidecar_task {
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 20
|
|
||||||
memory_max = 50
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# check {
|
|
||||||
# type = "http"
|
|
||||||
# path = "/"
|
|
||||||
# port = "main"
|
|
||||||
# interval = "10s"
|
|
||||||
# timeout = "10s"
|
|
||||||
# }
|
|
||||||
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.sonarr.entryPoints=websecure",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
task "sonarr" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "linuxserver/sonarr"
|
|
||||||
ports = ["main"]
|
|
||||||
}
|
|
||||||
|
|
||||||
env = {
|
|
||||||
"PGID" = 100
|
|
||||||
"PUID" = 1001
|
|
||||||
"TZ" = "America/Los_Angeles"
|
|
||||||
}
|
|
||||||
|
|
||||||
volume_mount {
|
|
||||||
volume = "sonarr-data"
|
|
||||||
destination = "/config"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
volume_mount {
|
|
||||||
volume = "tv-sonarr"
|
|
||||||
destination = "/tv"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
volume_mount {
|
|
||||||
volume = "download"
|
|
||||||
destination = "/downloads"
|
|
||||||
read_only = false
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
cpu = 100
|
|
||||||
memory = 300
|
|
||||||
memory_max = 500
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,109 +0,0 @@
|
|||||||
# module "nextcloud" {
|
|
||||||
# source = "./nextcloud"
|
|
||||||
#
|
|
||||||
# depends_on = [module.databases]
|
|
||||||
# }
|
|
||||||
|
|
||||||
module "backups" {
|
|
||||||
source = "./backups"
|
|
||||||
|
|
||||||
# In parent module
|
|
||||||
# depends_on = [module.databases]
|
|
||||||
}
|
|
||||||
|
|
||||||
module "media" {
|
|
||||||
source = "./media"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nomad_job" "whoami" {
|
|
||||||
hcl2 {
|
|
||||||
enabled = true
|
|
||||||
vars = {
|
|
||||||
"count" = 1,
|
|
||||||
# "count" = "${2 * length(data.consul_service.nomad.service)}",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
jobspec = file("${path.module}/whoami.nomad")
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nomad_job" "ipdvr" {
|
|
||||||
jobspec = file("${path.module}/ip-dvr.nomad")
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "consul_config_entry" "nzbget_intents" {
|
|
||||||
depends_on = [nomad_job.ipdvr]
|
|
||||||
|
|
||||||
name = "nzbget"
|
|
||||||
kind = "service-intentions"
|
|
||||||
|
|
||||||
config_json = jsonencode({
|
|
||||||
Sources = [
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "sonarr"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
# module "nzbget" {
|
|
||||||
# source "./levant"
|
|
||||||
#
|
|
||||||
# template_path = "service.nomad"
|
|
||||||
# variables = {
|
|
||||||
# name = "nzbget"
|
|
||||||
# image = "linuxserver/nzbget"
|
|
||||||
# service_port = 6789
|
|
||||||
# ingress = true
|
|
||||||
# env = jsonencode({
|
|
||||||
# PGID = 100
|
|
||||||
# PUID = 1001
|
|
||||||
# TZ = "America/Los_Angeles"
|
|
||||||
# })
|
|
||||||
# host_volumes = jsonencode([
|
|
||||||
# {
|
|
||||||
# name = "download"
|
|
||||||
# dest = "/srv/volumes/download"
|
|
||||||
# read_only = false
|
|
||||||
# },
|
|
||||||
# ])
|
|
||||||
# }
|
|
||||||
# }
|
|
||||||
#
|
|
||||||
# module "sonarr" {
|
|
||||||
# source = "./levant"
|
|
||||||
#
|
|
||||||
# template_path = "service.nomad"
|
|
||||||
# variables = {
|
|
||||||
# name = "sonarr"
|
|
||||||
# image = "linuxserver/sonarr"
|
|
||||||
# service_port = 8989
|
|
||||||
# ingress = true
|
|
||||||
# env = jsonencode({
|
|
||||||
# PGID = 100
|
|
||||||
# PUID = 1001
|
|
||||||
# TZ = "America/Los_Angeles"
|
|
||||||
#
|
|
||||||
# })
|
|
||||||
# host_volumes = jsonencode([
|
|
||||||
# {
|
|
||||||
# name = "sonarr-data"
|
|
||||||
# dest = "/config"
|
|
||||||
# read_only = false
|
|
||||||
# },
|
|
||||||
# {
|
|
||||||
# name = "tv-sonarr"
|
|
||||||
# dest = "/srv/volumes/media-write/TV Shows"
|
|
||||||
# read_only = false
|
|
||||||
# },
|
|
||||||
# {
|
|
||||||
# name = "download"
|
|
||||||
# dest = "/srv/volumes/download"
|
|
||||||
# read_only = false
|
|
||||||
# },
|
|
||||||
# ])
|
|
||||||
# }
|
|
||||||
# }
|
|
@ -9,29 +9,29 @@
|
|||||||
roles:
|
roles:
|
||||||
- role: ansible-consul
|
- role: ansible-consul
|
||||||
vars:
|
vars:
|
||||||
consul_version: "1.13.3-1"
|
consul_version: "1.12.3-1"
|
||||||
consul_install_upgrade: true
|
consul_install_upgrade: true
|
||||||
consul_install_from_repo: true
|
consul_install_from_repo: true
|
||||||
consul_os_repo_prerequisites: []
|
consul_os_repo_prerequisites: []
|
||||||
|
|
||||||
consul_node_role: server
|
consul_node_role: server
|
||||||
consul_bootstrap_expect: true
|
consul_bootstrap_expect: true
|
||||||
consul_bootstrap_expect_value: "{{ [(play_hosts | length), 3] | min }}"
|
|
||||||
|
|
||||||
consul_user: consul
|
consul_user: consul
|
||||||
consul_manage_user: true
|
consul_manage_user: true
|
||||||
consul_group: bin
|
consul_group: bin
|
||||||
consul_manage_group: true
|
consul_manage_group: true
|
||||||
|
|
||||||
|
consul_architecture_map:
|
||||||
|
x86_64: amd64
|
||||||
|
armhfv6: arm
|
||||||
|
armv7l: arm
|
||||||
|
|
||||||
# consul_tls_enable: true
|
# consul_tls_enable: true
|
||||||
consul_connect_enabled: true
|
consul_connect_enabled: true
|
||||||
consul_ports_grpc: 8502
|
consul_ports_grpc: 8502
|
||||||
consul_client_address: "0.0.0.0"
|
consul_client_address: "0.0.0.0"
|
||||||
|
|
||||||
# Autopilot
|
|
||||||
consul_autopilot_enable: true
|
|
||||||
consul_autopilot_cleanup_dead_Servers: true
|
|
||||||
|
|
||||||
# Enable metrics
|
# Enable metrics
|
||||||
consul_config_custom:
|
consul_config_custom:
|
||||||
telemetry:
|
telemetry:
|
||||||
@ -60,12 +60,29 @@
|
|||||||
|
|
||||||
# If DNS is broken after dnsmasq, then need to set /etc/resolv.conf to something
|
# If DNS is broken after dnsmasq, then need to set /etc/resolv.conf to something
|
||||||
# pointing to 127.0.0.1 and possibly restart Docker and Nomad
|
# pointing to 127.0.0.1 and possibly restart Docker and Nomad
|
||||||
- name: Update resolv.conf
|
|
||||||
lineinfile:
|
- name: Add values
|
||||||
dest: /etc/resolv.conf
|
delegate_to: localhost
|
||||||
create: true
|
run_once: true
|
||||||
line: "nameserver 127.0.0.1"
|
block:
|
||||||
become: true
|
- name: Install python-consul
|
||||||
|
pip:
|
||||||
|
name: python-consul
|
||||||
|
extra_args: --index-url https://pypi.org/simple
|
||||||
|
|
||||||
|
- name: Set hostname
|
||||||
|
consul_kv:
|
||||||
|
host: "{{ inventory_hostname }}"
|
||||||
|
key: global/base_hostname
|
||||||
|
# TODO: propogate this through via Consul and Nomad templates rather than Terraform
|
||||||
|
value: dev.homelab
|
||||||
|
|
||||||
|
- name: Write values
|
||||||
|
consul_kv:
|
||||||
|
host: "{{ inventory_hostname }}"
|
||||||
|
key: "{{ item.key }}"
|
||||||
|
value: "{{ item.value }}"
|
||||||
|
loop: "{{ consul_values | default({}) | dict2items }}"
|
||||||
|
|
||||||
- name: Setup Vault cluster
|
- name: Setup Vault cluster
|
||||||
hosts: vault_instances
|
hosts: vault_instances
|
||||||
@ -76,7 +93,7 @@
|
|||||||
roles:
|
roles:
|
||||||
- name: ansible-vault
|
- name: ansible-vault
|
||||||
vars:
|
vars:
|
||||||
vault_version: 1.12.0-1
|
vault_version: 1.10.0
|
||||||
vault_install_hashi_repo: true
|
vault_install_hashi_repo: true
|
||||||
vault_harden_file_perms: true
|
vault_harden_file_perms: true
|
||||||
vault_bin_path: /usr/bin
|
vault_bin_path: /usr/bin
|
||||||
@ -93,6 +110,7 @@
|
|||||||
status_code: 200, 429, 472, 473, 501, 503
|
status_code: 200, 429, 472, 473, 501, 503
|
||||||
body_format: json
|
body_format: json
|
||||||
return_content: true
|
return_content: true
|
||||||
|
run_once: true
|
||||||
register: vault_status
|
register: vault_status
|
||||||
|
|
||||||
- name: Initialize Vault
|
- name: Initialize Vault
|
||||||
@ -145,24 +163,24 @@
|
|||||||
- unseal_keys_hex is defined
|
- unseal_keys_hex is defined
|
||||||
- vault_status.json["sealed"]
|
- vault_status.json["sealed"]
|
||||||
|
|
||||||
- name: Install Docker
|
- name: Bootstrap Vault secrets
|
||||||
hosts: nomad_instances
|
delegate_to: localhost
|
||||||
become: true
|
run_once: true
|
||||||
vars:
|
block:
|
||||||
docker_architecture_map:
|
- name: Install hvac
|
||||||
x86_64: amd64
|
pip:
|
||||||
armv7l: armhf
|
name: hvac
|
||||||
aarch64: arm64
|
extra_args: --index-url https://pypi.org/simple
|
||||||
docker_apt_arch: "{{ docker_architecture_map[ansible_architecture] }}"
|
|
||||||
docker_compose_arch: "{{ (ansible_architecture == 'armv7l') | ternary('armv7', ansible_architecture) }}"
|
|
||||||
roles:
|
|
||||||
- geerlingguy.docker
|
|
||||||
|
|
||||||
tasks:
|
- name: Write values
|
||||||
- name: Remove snapd
|
community.hashi_vault.vault_write:
|
||||||
package:
|
url: "http://{{ inventory_hostname }}:8200"
|
||||||
name: snapd
|
token: "{{ root_token }}"
|
||||||
state: absent
|
path: "kv/data/{{ item.key }}"
|
||||||
|
data:
|
||||||
|
data:
|
||||||
|
"{{ item.value }}"
|
||||||
|
loop: "{{ hashi_vault_values | default({}) | dict2items }}"
|
||||||
|
|
||||||
# Not on Ubuntu 20.04
|
# Not on Ubuntu 20.04
|
||||||
# - name: Install Podman
|
# - name: Install Podman
|
||||||
@ -201,29 +219,15 @@
|
|||||||
state: mounted
|
state: mounted
|
||||||
fstype: nfs4
|
fstype: nfs4
|
||||||
|
|
||||||
- name: Create Media Library RW NFS mount
|
- name: Install Docker
|
||||||
ansible.posix.mount:
|
hosts: nomad_instances
|
||||||
src: 192.168.2.10:/Multimedia
|
become: true
|
||||||
path: /srv/volumes/media-write
|
vars:
|
||||||
opts: proto=tcp,port=2049,rw
|
deb_arch: "{% if ansible_architecture == 'x86_64' %}amd64{% elif ansible_architecture == 'armv7l' %}armhf{% endif %}"
|
||||||
state: mounted
|
docker_apt_arch: "{{ deb_arch }}"
|
||||||
fstype: nfs4
|
docker_compose_arch: "{{ (ansible_architecture == 'armv7l') | ternary('armv7', ansible_architecture) }}"
|
||||||
|
roles:
|
||||||
- name: Create Download RW NFS mount
|
- geerlingguy.docker
|
||||||
ansible.posix.mount:
|
|
||||||
src: 192.168.2.10:/Download
|
|
||||||
path: /srv/volumes/download
|
|
||||||
opts: proto=tcp,port=2049,rw
|
|
||||||
state: mounted
|
|
||||||
fstype: nfs4
|
|
||||||
|
|
||||||
- name: Create Container NAS RW NFS mount
|
|
||||||
ansible.posix.mount:
|
|
||||||
src: 192.168.2.10:/Container
|
|
||||||
path: /srv/volumes/container
|
|
||||||
opts: proto=tcp,port=2049,rw
|
|
||||||
state: mounted
|
|
||||||
fstype: nfs4
|
|
||||||
|
|
||||||
- name: Build Nomad cluster
|
- name: Build Nomad cluster
|
||||||
hosts: nomad_instances
|
hosts: nomad_instances
|
||||||
@ -235,60 +239,45 @@
|
|||||||
- name: motioneye-recordings
|
- name: motioneye-recordings
|
||||||
path: /srv/volumes/motioneye-recordings
|
path: /srv/volumes/motioneye-recordings
|
||||||
owner: "root"
|
owner: "root"
|
||||||
group: "root"
|
group: "bin"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
read_only: false
|
read_only: false
|
||||||
- name: media-read
|
- name: media-read
|
||||||
path: /srv/volumes/media-write
|
path: /srv/volumes/media-read
|
||||||
read_only: true
|
|
||||||
- name: media-write
|
|
||||||
path: /srv/volumes/media-write
|
|
||||||
owner: "root"
|
owner: "root"
|
||||||
group: "root"
|
group: "root"
|
||||||
mode: "0755"
|
mode: "0777"
|
||||||
read_only: false
|
read_only: true
|
||||||
- name: tv-sonarr
|
|
||||||
path: "/srv/volumes/media-write/TV Shows"
|
|
||||||
owner: 1001
|
|
||||||
group: 100
|
|
||||||
mode: "0755"
|
|
||||||
read_only: false
|
|
||||||
- name: download
|
|
||||||
path: /srv/volumes/download
|
|
||||||
owner: 1001
|
|
||||||
group: 100
|
|
||||||
mode: "0755"
|
|
||||||
read_only: false
|
|
||||||
- name: nzbget-data
|
|
||||||
path: /srv/volumes/container/nzbget/config
|
|
||||||
read_only: false
|
|
||||||
- name: gitea-data
|
|
||||||
path: /srv/volumes/container/gitea
|
|
||||||
read_only: false
|
|
||||||
- name: all-volumes
|
- name: all-volumes
|
||||||
path: /srv/volumes
|
path: /srv/volumes
|
||||||
owner: "root"
|
owner: "root"
|
||||||
group: "root"
|
group: "root"
|
||||||
mode: "0755"
|
mode: "0777"
|
||||||
read_only: false
|
read_only: false
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
- name: ansible-nomad
|
- name: ansible-nomad
|
||||||
vars:
|
vars:
|
||||||
nomad_version: "1.4.1-1"
|
nomad_version: "1.3.2-1"
|
||||||
|
nomad_install_remotely: true
|
||||||
nomad_install_upgrade: true
|
nomad_install_upgrade: true
|
||||||
nomad_allow_purge_config: true
|
nomad_allow_purge_config: true
|
||||||
|
|
||||||
nomad_meta:
|
|
||||||
# There are issues with v1.23.0 on arm64
|
|
||||||
connect.sidecar_image: envoyproxy/envoy:v1.23.1
|
|
||||||
|
|
||||||
# Where nomad gets installed to
|
# Where nomad gets installed to
|
||||||
nomad_bin_dir: /usr/bin
|
nomad_bin_dir: /usr/bin
|
||||||
nomad_install_from_repo: true
|
nomad_install_from_repo: true
|
||||||
|
|
||||||
nomad_bootstrap_expect: "{{ [(play_hosts | length), 3] | min }}"
|
# nomad_user: root
|
||||||
nomad_raft_protocol: 3
|
# nomad_manage_user: true
|
||||||
|
# nomad_group: bin
|
||||||
|
# nomad_manage_group: true
|
||||||
|
|
||||||
|
# Properly map install arch
|
||||||
|
nomad_architecture_map:
|
||||||
|
x86_64: amd64
|
||||||
|
armhfv6: arm
|
||||||
|
armv7l: arm
|
||||||
|
|
||||||
nomad_autopilot: true
|
nomad_autopilot: true
|
||||||
nomad_encrypt_enable: true
|
nomad_encrypt_enable: true
|
||||||
# nomad_use_consul: true
|
# nomad_use_consul: true
|
||||||
@ -334,6 +323,9 @@
|
|||||||
|
|
||||||
# Create networks for binding task ports
|
# Create networks for binding task ports
|
||||||
nomad_host_networks:
|
nomad_host_networks:
|
||||||
|
# - name: public
|
||||||
|
# interface: eth0
|
||||||
|
# reserved_ports: "22"
|
||||||
- name: nomad-bridge
|
- name: nomad-bridge
|
||||||
interface: nomad
|
interface: nomad
|
||||||
reserved_ports: "22"
|
reserved_ports: "22"
|
||||||
@ -345,19 +337,8 @@
|
|||||||
nomad_acl_enabled: true
|
nomad_acl_enabled: true
|
||||||
|
|
||||||
# Enable vault integration
|
# Enable vault integration
|
||||||
# HACK: Only talk to local Vault for now because it doesn't have HTTPS
|
nomad_vault_address: "http://vault.service.consul:8200"
|
||||||
# TODO: Would be really great to have this over https and point to vault.consul.service
|
|
||||||
# nomad_vault_address: "https://vault.service.consul:8200"
|
|
||||||
# Right now, each node only talks to it's local Vault, so if that node is rebooted and
|
|
||||||
# that vault is sealed, it will not have access to vault. This is a problem if a node
|
|
||||||
# must reboot.
|
|
||||||
nomad_vault_address: "http://127.0.0.1:8200"
|
|
||||||
# TODO: This fails on first run because the Nomad-Vault integration can't be set up
|
|
||||||
# until Nomad has started. Could maybe figure out if ACLs have been set up and leave
|
|
||||||
# these out until the later play, maybe just bootstrap the nomad-cluster role in Vault
|
|
||||||
# befor Nomad is set up
|
|
||||||
nomad_vault_create_from_role: "nomad-cluster"
|
nomad_vault_create_from_role: "nomad-cluster"
|
||||||
# TODO: (security) Probably want to restict this to a narrower scoped token
|
|
||||||
nomad_vault_enabled: "{{ root_token is defined }}"
|
nomad_vault_enabled: "{{ root_token is defined }}"
|
||||||
nomad_vault_token: "{{ root_token | default('') }}"
|
nomad_vault_token: "{{ root_token | default('') }}"
|
||||||
|
|
||||||
@ -365,36 +346,25 @@
|
|||||||
ui:
|
ui:
|
||||||
enabled: true
|
enabled: true
|
||||||
consul:
|
consul:
|
||||||
ui_url: "https://{{ ansible_hostname }}:8500/ui"
|
ui_url: "http://{{ ansible_hostname }}:8500/ui"
|
||||||
vault:
|
vault:
|
||||||
ui_url: "https://{{ ansible_hostname }}:8200/ui"
|
ui_url: "http://{{ ansible_hostname }}:8200/ui"
|
||||||
consul:
|
consul:
|
||||||
tags:
|
tags:
|
||||||
- "traefik.enable=true"
|
- "traefik.enable=true"
|
||||||
- "traefik.consulcatalog.connect=true"
|
- "traefik.consulcatalog.connect=true"
|
||||||
- "traefik.http.routers.nomadclient.entrypoints=websecure"
|
- "traefik.http.routers.nomadclient.entrypoints=websecure"
|
||||||
|
|
||||||
- name: Bootstrap Nomad ACLs and scheduler
|
|
||||||
hosts: nomad_instances
|
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: Start Nomad
|
- name: Start Nomad
|
||||||
systemd:
|
systemd:
|
||||||
state: started
|
state: started
|
||||||
name: nomad
|
name: nomad
|
||||||
|
|
||||||
- name: Nomad API reachable?
|
- name: Bootstrap Nomad ACLs
|
||||||
uri:
|
hosts: nomad_instances
|
||||||
url: "http://127.0.0.1:4646/v1/status/leader"
|
|
||||||
method: GET
|
|
||||||
status_code: 200
|
|
||||||
register: nomad_check_result
|
|
||||||
retries: 6
|
|
||||||
until: nomad_check_result is succeeded
|
|
||||||
delay: 10
|
|
||||||
changed_when: false
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
|
tasks:
|
||||||
- name: Bootstrap ACLs
|
- name: Bootstrap ACLs
|
||||||
command:
|
command:
|
||||||
argv:
|
argv:
|
||||||
@ -414,6 +384,16 @@
|
|||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
|
- name: Look for policy
|
||||||
|
command:
|
||||||
|
argv:
|
||||||
|
- nomad
|
||||||
|
- acl
|
||||||
|
- policy
|
||||||
|
- list
|
||||||
|
run_once: true
|
||||||
|
register: policies
|
||||||
|
|
||||||
- name: Read secret
|
- name: Read secret
|
||||||
command:
|
command:
|
||||||
argv:
|
argv:
|
||||||
@ -427,35 +407,9 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
register: read_secretid
|
register: read_secretid
|
||||||
|
|
||||||
- name: Enable service scheduler preemption
|
|
||||||
command:
|
|
||||||
argv:
|
|
||||||
- nomad
|
|
||||||
- operator
|
|
||||||
- scheduler
|
|
||||||
- set-config
|
|
||||||
- -preempt-system-scheduler=true
|
|
||||||
- -preempt-service-scheduler=true
|
|
||||||
environment:
|
|
||||||
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
|
|
||||||
delegate_to: "{{ play_hosts[0] }}"
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: Look for policy
|
|
||||||
command:
|
|
||||||
argv:
|
|
||||||
- nomad
|
|
||||||
- acl
|
|
||||||
- policy
|
|
||||||
- list
|
|
||||||
environment:
|
|
||||||
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
|
|
||||||
run_once: true
|
|
||||||
register: policies
|
|
||||||
|
|
||||||
- name: Copy policy
|
- name: Copy policy
|
||||||
copy:
|
copy:
|
||||||
src: ./acls/nomad-anon-policy.hcl
|
src: ./acls/nomad-anon-bootstrap.hcl
|
||||||
dest: /tmp/anonymous.policy.hcl
|
dest: /tmp/anonymous.policy.hcl
|
||||||
delegate_to: "{{ play_hosts[0] }}"
|
delegate_to: "{{ play_hosts[0] }}"
|
||||||
register: anon_policy
|
register: anon_policy
|
||||||
@ -468,7 +422,7 @@
|
|||||||
- acl
|
- acl
|
||||||
- policy
|
- policy
|
||||||
- apply
|
- apply
|
||||||
- -description="Anon read only"
|
- -description="Anon RW"
|
||||||
- anonymous
|
- anonymous
|
||||||
- /tmp/anonymous.policy.hcl
|
- /tmp/anonymous.policy.hcl
|
||||||
environment:
|
environment:
|
||||||
@ -476,24 +430,3 @@
|
|||||||
when: policies.stdout == "No policies found" or anon_policy.changed
|
when: policies.stdout == "No policies found" or anon_policy.changed
|
||||||
delegate_to: "{{ play_hosts[0] }}"
|
delegate_to: "{{ play_hosts[0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: Set up Nomad backend and roles in Vault
|
|
||||||
community.general.terraform:
|
|
||||||
project_path: ./acls
|
|
||||||
force_init: true
|
|
||||||
variables:
|
|
||||||
consul_address: "{{ play_hosts[0] }}:8500"
|
|
||||||
vault_token: "{{ root_token }}"
|
|
||||||
nomad_secret_id: "{{ read_secretid.stdout }}"
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
notify:
|
|
||||||
- Restart Nomad
|
|
||||||
|
|
||||||
handlers:
|
|
||||||
- name: Restart Nomad
|
|
||||||
systemd:
|
|
||||||
state: restarted
|
|
||||||
name: nomad
|
|
||||||
retries: 6
|
|
||||||
delay: 5
|
|
||||||
|
@ -23,9 +23,8 @@ job "syslogng" {
|
|||||||
|
|
||||||
connect {
|
connect {
|
||||||
sidecar_service {
|
sidecar_service {
|
||||||
proxy {
|
|
||||||
local_service_port = 1514
|
local_service_port = 1514
|
||||||
|
proxy {
|
||||||
upstreams {
|
upstreams {
|
||||||
destination_name = "loki"
|
destination_name = "loki"
|
||||||
local_bind_port = 1000
|
local_bind_port = 1000
|
||||||
@ -36,8 +35,7 @@ job "syslogng" {
|
|||||||
sidecar_task {
|
sidecar_task {
|
||||||
resources {
|
resources {
|
||||||
cpu = 50
|
cpu = 50
|
||||||
memory = 20
|
memory = 50
|
||||||
memory_max = 50
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -66,13 +64,13 @@ server:
|
|||||||
http_listen_port: 9080
|
http_listen_port: 9080
|
||||||
|
|
||||||
clients:
|
clients:
|
||||||
- url: http://{{ env "NOMAD_UPSTREAM_ADDR_loki" }}/loki/api/v1/push
|
- url: http://${NOMAD_UPSTREAM_ADDR_loki}/loki/api/v1/push
|
||||||
|
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
# TCP syslog receiver
|
# TCP syslog receiver
|
||||||
- job_name: syslog
|
- job_name: syslog
|
||||||
syslog:
|
syslog:
|
||||||
listen_address: 0.0.0.0:{{ env "NOMAD_PORT_main" }}
|
listen_address: 0.0.0.0:${NOMAD_PORT_main}
|
||||||
labels:
|
labels:
|
||||||
job: syslog
|
job: syslog
|
||||||
relabel_configs:
|
relabel_configs:
|
||||||
@ -84,7 +82,7 @@ EOF
|
|||||||
|
|
||||||
resources {
|
resources {
|
||||||
cpu = 50
|
cpu = 50
|
||||||
memory = 20
|
memory = 50
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -92,10 +90,17 @@ EOF
|
|||||||
group "syslogng" {
|
group "syslogng" {
|
||||||
count = 1
|
count = 1
|
||||||
|
|
||||||
|
constraint {
|
||||||
|
attribute = "${node.unique.name}"
|
||||||
|
# Needs to be on a predictable node for routing
|
||||||
|
# Maybe a loadbalancer could be used for routing from any node
|
||||||
|
value = "n2"
|
||||||
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
mode = "bridge"
|
mode = "bridge"
|
||||||
port "main" {
|
port "main" {
|
||||||
to = 514
|
static = 1514
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,8 +111,6 @@ EOF
|
|||||||
connect {
|
connect {
|
||||||
sidecar_service {
|
sidecar_service {
|
||||||
proxy {
|
proxy {
|
||||||
local_service_port = 514
|
|
||||||
|
|
||||||
upstreams {
|
upstreams {
|
||||||
destination_name = "syslogng-promtail"
|
destination_name = "syslogng-promtail"
|
||||||
local_bind_port = 1000
|
local_bind_port = 1000
|
||||||
@ -118,18 +121,25 @@ EOF
|
|||||||
sidecar_task {
|
sidecar_task {
|
||||||
resources {
|
resources {
|
||||||
cpu = 50
|
cpu = 50
|
||||||
memory = 20
|
memory = 50
|
||||||
memory_max = 50
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
check {
|
||||||
|
type = "tcp"
|
||||||
|
port = "main"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "10s"
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
task "syslogng" {
|
task "syslogng" {
|
||||||
driver = "docker"
|
driver = "docker"
|
||||||
|
|
||||||
config {
|
config {
|
||||||
image = "balabit/syslog-ng:3.37.1"
|
image = "balbit/syslog-ng:latest"
|
||||||
ports = ["main"]
|
ports = ["main"]
|
||||||
args = ["--no-caps"]
|
args = ["--no-caps"]
|
||||||
|
|
||||||
@ -142,12 +152,11 @@ EOF
|
|||||||
|
|
||||||
template {
|
template {
|
||||||
data = <<EOF
|
data = <<EOF
|
||||||
@version: 3.37
|
@version: 3.22
|
||||||
@include "scl.conf"
|
|
||||||
|
|
||||||
source s_network {
|
source s_external {
|
||||||
default-network-drivers(
|
syslog(ip(0.0.0.0) port(1514) transport("tcp"));
|
||||||
);
|
syslog(ip(0.0.0.0) port(1514) transport("udp"));
|
||||||
};
|
};
|
||||||
|
|
||||||
source s_internal {
|
source s_internal {
|
||||||
@ -156,18 +165,18 @@ source s_internal {
|
|||||||
|
|
||||||
destination d_loki {
|
destination d_loki {
|
||||||
# Forward to Connect proxy to Promtail
|
# Forward to Connect proxy to Promtail
|
||||||
syslog("{{ env "NOMAD_UPSTREAM_IP_syslogng-promtail" }}" transport("tcp") port({{ env "NOMAD_UPSTREAM_PORT_syslogng-promtail" }}));
|
syslog("${NOMAD_UPSTREAM_IP_syslogngpromtail}" transport("tcp") port(${NOMAD_UPSTREAM_PORT_syslogngpromtail}));
|
||||||
};
|
};
|
||||||
|
|
||||||
log { source(s_internal); destination(d_loki); };
|
log { source(s_internal); destination(d_loki); };
|
||||||
log { source(s_network); destination(d_loki); };
|
log { source(s_external); destination(d_loki); };
|
||||||
EOF
|
EOF
|
||||||
destination = "local/syslog-ng.conf"
|
destination = "local/syslog-ng.conf"
|
||||||
}
|
}
|
||||||
|
|
||||||
resources {
|
resources {
|
||||||
cpu = 50
|
cpu = 50
|
||||||
memory = 10
|
memory = 50
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
38
nomad/traefik/.terraform.lock.hcl
Normal file
38
nomad/traefik/.terraform.lock.hcl
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
# This file is maintained automatically by "terraform init".
|
||||||
|
# Manual edits may be lost in future updates.
|
||||||
|
|
||||||
|
provider "registry.terraform.io/hashicorp/consul" {
|
||||||
|
version = "2.15.0"
|
||||||
|
hashes = [
|
||||||
|
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=",
|
||||||
|
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c",
|
||||||
|
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931",
|
||||||
|
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624",
|
||||||
|
"zh:32fa31329731db2bf4b7d0f09096416ca146f05b58f4482bbd4ee0f28cefbbcc",
|
||||||
|
"zh:59136b73d3abe7cc5b06d9e12d123ad21298ca86ed49a4060a3cd7c2a28a74a1",
|
||||||
|
"zh:a191f3210773ca25c543a92f2d392b85e6a053d596293655b1f25b33eb843b4c",
|
||||||
|
"zh:b8b6033cf0687eadc1099f11d9fb2ca9429ff40c2d85bd6cb047c0f6bc5d5d8d",
|
||||||
|
"zh:bb7d67ed28aa9b28fc5154161af003383f940b2beda0d4577857cad700f39cd1",
|
||||||
|
"zh:be615288f59327b975532a1999deab60a022e6819fe80e5a32526155210ecbba",
|
||||||
|
"zh:de1e3d5c34eef87eb301e74717754babb6dc8e19e3a964919e1165c5a076a719",
|
||||||
|
"zh:eb8c61b20d8ce2bfff9f735ca8456a0d6368af13aa1f43866f61c70f88cc491c",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "registry.terraform.io/hashicorp/nomad" {
|
||||||
|
version = "1.4.16"
|
||||||
|
hashes = [
|
||||||
|
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||||
|
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||||
|
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
||||||
|
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
|
||||||
|
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
|
||||||
|
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
|
||||||
|
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
|
||||||
|
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
|
||||||
|
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
|
||||||
|
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
|
||||||
|
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
|
||||||
|
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
|
||||||
|
]
|
||||||
|
}
|
191
nomad/traefik/traefik.nomad
Normal file
191
nomad/traefik/traefik.nomad
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
variable "consul_address" {
|
||||||
|
type = string
|
||||||
|
description = "Full address of Consul instance to get catalog from"
|
||||||
|
default = "http://127.0.0.1:5400"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "base_hostname" {
|
||||||
|
type = string
|
||||||
|
description = "Base hostname to serve content from"
|
||||||
|
default = "dev.homelab"
|
||||||
|
}
|
||||||
|
|
||||||
|
job "traefik" {
|
||||||
|
datacenters = ["dc1"]
|
||||||
|
type = "system"
|
||||||
|
priority = 100
|
||||||
|
|
||||||
|
constraint {
|
||||||
|
attribute = "${node.class}"
|
||||||
|
value = "ingress"
|
||||||
|
}
|
||||||
|
|
||||||
|
update {
|
||||||
|
max_parallel = 1
|
||||||
|
auto_revert = true
|
||||||
|
}
|
||||||
|
|
||||||
|
group "traefik" {
|
||||||
|
|
||||||
|
network {
|
||||||
|
port "web" {
|
||||||
|
static = 80
|
||||||
|
}
|
||||||
|
port "websecure" {
|
||||||
|
static = 443
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
name = "traefik"
|
||||||
|
port = "web"
|
||||||
|
|
||||||
|
check {
|
||||||
|
type = "http"
|
||||||
|
path = "/ping"
|
||||||
|
port = "web"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "2s"
|
||||||
|
}
|
||||||
|
|
||||||
|
connect {
|
||||||
|
native = true
|
||||||
|
}
|
||||||
|
|
||||||
|
tags = [
|
||||||
|
"traefik.enable=true",
|
||||||
|
"traefik.http.routers.traefik_dashboard.entryPoints=websecure",
|
||||||
|
"traefik.http.routers.traefik_dashboard.rule=Host(`traefik.${var.base_hostname}`)",
|
||||||
|
"traefik.http.routers.traefik_dashboard.service=api@internal",
|
||||||
|
"traefik.http.routers.traefik_dashboard.tls=true",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
task "traefik" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "traefik:2.6"
|
||||||
|
|
||||||
|
ports = ["web", "websecure"]
|
||||||
|
network_mode = "host"
|
||||||
|
|
||||||
|
mount {
|
||||||
|
type = "bind"
|
||||||
|
target = "/etc/traefik"
|
||||||
|
source = "config"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template {
|
||||||
|
# Avoid conflict with TOML lists [[ ]] and Go templates {{ }}
|
||||||
|
left_delimiter = "<<"
|
||||||
|
right_delimiter = ">>"
|
||||||
|
data = <<EOH
|
||||||
|
[log]
|
||||||
|
level = "DEBUG"
|
||||||
|
|
||||||
|
[entryPoints]
|
||||||
|
[entryPoints.web]
|
||||||
|
address = ":80"
|
||||||
|
[entryPoints.web.http]
|
||||||
|
[entryPoints.web.http.redirections]
|
||||||
|
[entryPoints.web.http.redirections.entrypoint]
|
||||||
|
to = "websecure"
|
||||||
|
scheme = "https"
|
||||||
|
|
||||||
|
[entryPoints.websecure]
|
||||||
|
address = ":443"
|
||||||
|
[entryPoints.websecure.http.tls]
|
||||||
|
# certResolver = "letsEncrypt"
|
||||||
|
|
||||||
|
[entryPoints.metrics]
|
||||||
|
address = ":8989"
|
||||||
|
|
||||||
|
[api]
|
||||||
|
dashboard = true
|
||||||
|
|
||||||
|
[ping]
|
||||||
|
entrypoint = "web"
|
||||||
|
|
||||||
|
[metrics]
|
||||||
|
[metrics.prometheus]
|
||||||
|
entrypoint = "metrics"
|
||||||
|
# manualRouting = true
|
||||||
|
|
||||||
|
[providers.file]
|
||||||
|
directory = "/etc/traefik/conf"
|
||||||
|
watch = true
|
||||||
|
|
||||||
|
[providers.consulCatalog]
|
||||||
|
connectAware = true
|
||||||
|
connectByDefault = true
|
||||||
|
exposedByDefault = false
|
||||||
|
defaultRule = "Host(`{{normalize .Name}}.${var.base_hostname}`)"
|
||||||
|
[providers.consulCatalog.endpoint]
|
||||||
|
address = "http://<< env "CONSUL_HTTP_ADDR" >>"
|
||||||
|
EOH
|
||||||
|
destination = "/config/traefik.toml"
|
||||||
|
}
|
||||||
|
|
||||||
|
template {
|
||||||
|
# Avoid conflict with TOML lists [[ ]] and Go templates {{ }}
|
||||||
|
left_delimiter = "<<"
|
||||||
|
right_delimiter = ">>"
|
||||||
|
data = <<EOH
|
||||||
|
[http]
|
||||||
|
[http.routers]
|
||||||
|
[http.routers.nomad]
|
||||||
|
entryPoints = ["websecure"]
|
||||||
|
# middlewares = []
|
||||||
|
service = "nomad"
|
||||||
|
rule = "Host(`nomad.${var.base_hostname}`)"
|
||||||
|
[http.routers.consul]
|
||||||
|
entryPoints = ["websecure"]
|
||||||
|
# middlewares = []
|
||||||
|
service = "consul"
|
||||||
|
rule = "Host(`consul.${var.base_hostname}`)"
|
||||||
|
[http.routers.vault]
|
||||||
|
entryPoints = ["websecure"]
|
||||||
|
# middlewares = []
|
||||||
|
service = "vault"
|
||||||
|
rule = "Host(`vault.${var.base_hostname}`)"
|
||||||
|
|
||||||
|
[http.services]
|
||||||
|
<< with service "nomad-client" ->>
|
||||||
|
[http.services.nomad]
|
||||||
|
[http.services.nomad.loadBalancer]
|
||||||
|
<< range . ->>
|
||||||
|
[[http.services.nomad.loadBalancer.servers]]
|
||||||
|
url = "http://<< .Address >>:<< .Port >>"
|
||||||
|
<< end >>
|
||||||
|
<<- end >>
|
||||||
|
<< with service "consul" ->>
|
||||||
|
[http.services.consul]
|
||||||
|
[http.services.consul.loadBalancer]
|
||||||
|
<< range . ->>
|
||||||
|
[[http.services.consul.loadBalancer.servers]]
|
||||||
|
# Not using .Port because that's an RPC port
|
||||||
|
url = "http://<< .Address >>:8500"
|
||||||
|
<< end >>
|
||||||
|
<<- end >>
|
||||||
|
<< with service "vault" ->>
|
||||||
|
[http.services.vault]
|
||||||
|
[http.services.vault.loadBalancer]
|
||||||
|
<< range . ->>
|
||||||
|
[[http.services.vault.loadBalancer.servers]]
|
||||||
|
url = "http://<< .Address >>:<< .Port >>"
|
||||||
|
<< end >>
|
||||||
|
<<- end >>
|
||||||
|
EOH
|
||||||
|
destination = "/config/conf/route-hashi.toml"
|
||||||
|
change_mode = "noop"
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 50
|
||||||
|
memory = 50
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
29
nomad/traefik/traefik.tf
Normal file
29
nomad/traefik/traefik.tf
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
variable "base_hostname" {
|
||||||
|
type = string
|
||||||
|
description = "Base hostname to serve content from"
|
||||||
|
default = "dev.homelab"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "consul_address" {
|
||||||
|
type = string
|
||||||
|
description = "address of consul server for dynamic routes"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "consul_nodes" "all-nodes" {
|
||||||
|
query_options {
|
||||||
|
datacenter = "dc1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nomad_job" "traefik" {
|
||||||
|
hcl2 {
|
||||||
|
enabled = true
|
||||||
|
vars = {
|
||||||
|
# "consul_address" = "${var.consul_address}",
|
||||||
|
"consul_address" = "http://${data.consul_nodes.all-nodes.nodes[0].address}:8500",
|
||||||
|
"base_hostname" = "${var.base_hostname}",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
jobspec = file("${path.module}/traefik.nomad")
|
||||||
|
}
|
@ -1,27 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Unseal Vault
|
|
||||||
hosts: vault_instances
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Get Vault status
|
|
||||||
uri:
|
|
||||||
url: http://127.0.0.1:8200/v1/sys/health
|
|
||||||
method: GET
|
|
||||||
status_code: 200, 429, 472, 473, 501, 503
|
|
||||||
body_format: json
|
|
||||||
return_content: true
|
|
||||||
register: vault_status
|
|
||||||
|
|
||||||
- name: Unseal Vault
|
|
||||||
no_log: true
|
|
||||||
command:
|
|
||||||
argv:
|
|
||||||
- "vault"
|
|
||||||
- "operator"
|
|
||||||
- "unseal"
|
|
||||||
- "-address=http://127.0.0.1:8200/"
|
|
||||||
- "{{ item }}"
|
|
||||||
loop: "{{ unseal_keys_hex }}"
|
|
||||||
when:
|
|
||||||
- unseal_keys_hex is defined
|
|
||||||
- vault_status.json["sealed"]
|
|
5
nomad/vault-kv.tf
Normal file
5
nomad/vault-kv.tf
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
resource "vault_mount" "kv" {
|
||||||
|
path = "kv"
|
||||||
|
type = "kv-v2"
|
||||||
|
description = "Catch all kv mount"
|
||||||
|
}
|
@ -16,8 +16,3 @@ hashi_vault_values:
|
|||||||
alert_email_addresses: email@example.com
|
alert_email_addresses: email@example.com
|
||||||
backups:
|
backups:
|
||||||
backup_passphrase: tellnoone
|
backup_passphrase: tellnoone
|
||||||
|
|
||||||
vault_userpass:
|
|
||||||
- name: admin
|
|
||||||
password: foo
|
|
||||||
policies: default
|
|
||||||
|
@ -34,7 +34,7 @@ job "whoami" {
|
|||||||
sidecar_task {
|
sidecar_task {
|
||||||
resources {
|
resources {
|
||||||
cpu = 50
|
cpu = 50
|
||||||
memory = 20
|
memory = 50
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -50,7 +50,6 @@ job "whoami" {
|
|||||||
tags = [
|
tags = [
|
||||||
"traefik.enable=true",
|
"traefik.enable=true",
|
||||||
"traefik.http.routers.whoami.entryPoints=websecure",
|
"traefik.http.routers.whoami.entryPoints=websecure",
|
||||||
"traefik.http.routers.whoami.middlewares=basic-auth@file",
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,7 +64,7 @@ job "whoami" {
|
|||||||
|
|
||||||
resources {
|
resources {
|
||||||
cpu = 50
|
cpu = 50
|
||||||
memory = 20
|
memory = 50
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
Loading…
Reference in New Issue
Block a user