Selaa lähdekoodia

rebase changes

orbitzs 4 vuotta sitten
vanhempi
commit
58748d346c
61 muutettua tiedostoa jossa 1858 lisäystä ja 0 poistoa
  1. 7 0
      .gitignore
  2. 33 0
      docker-registry2/docker-compose.yml
  3. 25 0
      docker-registry2/registry-config/credentials.yml
  4. 15 0
      docker-registry2/registry-config/domain.csr
  5. 27 0
      docker-registry2/registry-config/domain.key
  6. 2 0
      docker-registry2/registry-config/htpasswd
  7. 93 0
      docker-registry2/registry-config/signed.crt
  8. 5 0
      openvpn/.env
  9. 1 0
      openvpn/.gitignore
  10. 34 0
      openvpn/docker-compose.yml
  11. 11 0
      openvpn/gen_profile.sh
  12. 110 0
      openvpn/hamster.ovpn
  13. 112 0
      openvpn/harehole.ovpn
  14. 112 0
      openvpn/rabbit2.ovpn
  15. 1 0
      stackedit/.env
  16. 1 0
      stackedit/.gitignore
  17. 24 0
      stackedit/config/default.d/10-bitnami.ini
  18. 11 0
      stackedit/config/default.d/README
  19. 720 0
      stackedit/config/default.ini
  20. 8 0
      stackedit/config/local.d/README
  21. 94 0
      stackedit/config/local.ini
  22. 99 0
      stackedit/config/vm.args
  23. 49 0
      stackedit/docker-compose.yml
  24. 1 0
      syncthing/.gitignore
  25. 5 0
      wireguard/.env
  26. 6 0
      wireguard/data/config/.donoteditthisfile
  27. 4 0
      wireguard/data/config/coredns/Corefile
  28. 10 0
      wireguard/data/config/peer1/peer1.conf
  29. BIN
      wireguard/data/config/peer1/peer1.png
  30. 1 0
      wireguard/data/config/peer1/privatekey-peer1
  31. 1 0
      wireguard/data/config/peer1/publickey-peer1
  32. 10 0
      wireguard/data/config/peer2/peer2.conf
  33. BIN
      wireguard/data/config/peer2/peer2.png
  34. 1 0
      wireguard/data/config/peer2/privatekey-peer2
  35. 1 0
      wireguard/data/config/peer2/publickey-peer2
  36. 10 0
      wireguard/data/config/peer3/peer3.conf
  37. BIN
      wireguard/data/config/peer3/peer3.png
  38. 1 0
      wireguard/data/config/peer3/privatekey-peer3
  39. 1 0
      wireguard/data/config/peer3/publickey-peer3
  40. 10 0
      wireguard/data/config/peer4/peer4.conf
  41. BIN
      wireguard/data/config/peer4/peer4.png
  42. 1 0
      wireguard/data/config/peer4/privatekey-peer4
  43. 1 0
      wireguard/data/config/peer4/publickey-peer4
  44. 1 0
      wireguard/data/config/privatekey
  45. 1 0
      wireguard/data/config/publickey
  46. 1 0
      wireguard/data/config/server/privatekey-server
  47. 1 0
      wireguard/data/config/server/publickey-server
  48. 1 0
      wireguard/data/config/server_privatekey
  49. 10 0
      wireguard/data/config/templates/peer.conf
  50. 6 0
      wireguard/data/config/templates/server.conf
  51. 27 0
      wireguard/data/config/wg0.conf
  52. 12 0
      wireguard/data/config/wg0.conf.bak
  53. 5 0
      wireguard/data/wireguard-ui/config.json
  54. 9 0
      wireguard/db/server/interfaces.json
  55. 5 0
      wireguard/db/server/keypair.json
  56. 24 0
      wireguard/docker-compose.yml
  57. 45 0
      wireguard/docker-compose.yml.bak
  58. 51 0
      wireguard/docker-compose.yml.bak2
  59. 1 0
      wireguard/privatekey
  60. 0 0
      wireguard/publickey
  61. BIN
      wireguard/wg-manager/database.db

+ 7 - 0
.gitignore

@@ -0,0 +1,7 @@
+docker-registry/
+#docker-registry2/
+openvpn-as/
+#openvpn/
+syncthing/data1/
+#wireguard/
+ubuntu/

+ 33 - 0
docker-registry2/docker-compose.yml

@@ -0,0 +1,33 @@
+version: '2.0'
+services:
+  registry:
+    image: registry:2.7
+    ports:
+      - 5000:5000
+    environment:
+      REGISTRY_HTTP_ADDR: 0.0.0.0:5000
+      REGISTRY_HTTP_TLS_CERTIFICATE: /var/docker-registry/registry-config/signed.crt
+      REGISTRY_HTTP_TLS_KEY: /var/docker-registry/registry-config/domain.key
+      REGISTRY_AUTH: htpasswd
+      REGISTRY_AUTH_HTPASSWD_REALM: Registry
+      REGISTRY_AUTH_HTPASSWD_PATH: /var/docker-registry/registry-config/htpasswd
+      
+ 
+    volumes:
+      - ./registry-data:/var/lib/registry
+      - ./registry-config/credentials.yml:/etc/docker/registry/config.yml
+      - ./registry-config/htpasswd:/var/docker-registry/registry-config/htpasswd
+      - ./registry-config/signed.crt:/var/docker-registry/registry-config/signed.crt
+      - ./registry-config/domain.key:/var/docker-registry/registry-config/domain.key
+
+  ui:
+    image: joxit/docker-registry-ui:latest
+    ports:
+      - 8099:80
+    environment:
+      - REGISTRY_TITLE=ALGOMETIC Docker  # 自定义主页显示的Registry名称
+      - REGISTRY_URL=https://192.168.1.118:5000 # 改成自己的IP,不建议使用localhost代替
+      - SINGLE_REGISTRY=true
+    depends_on:
+      - registry
+

+ 25 - 0
docker-registry2/registry-config/credentials.yml

@@ -0,0 +1,25 @@
+version: 0.1
+log:
+  fields:
+    service: registry
+storage:
+  delete:
+    enabled: true
+  cache:
+    blobdescriptor: inmemory
+  filesystem:
+    rootdirectory: /var/lib/registry
+http:
+  addr: :5000
+  headers:
+    X-Content-Type-Options: [nosniff]
+    Access-Control-Allow-Origin: ['https://hub.algometic.com']  # 改成自己的IP,不建议使用localhost代替
+    Access-Control-Allow-Methods: ['HEAD', 'GET', 'OPTIONS', 'DELETE']
+    Access-Control-Allow-Headers: ['Authorization', 'Accept']
+    Access-Control-Max-Age: [1728000]
+    Access-Control-Allow-Credentials: [true]
+    Access-Control-Expose-Headers: ['Docker-Content-Digest']
+auth:
+  htpasswd:
+    realm: basic-realm
+    path: /var/docker-registry/registry-config/htpasswd  # 密码文件放置

+ 15 - 0
docker-registry2/registry-config/domain.csr

@@ -0,0 +1,15 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICYTCCAUkCAQAwHDEaMBgGA1UEAwwRaHViLmFsZ29tZXRpYy5jb20wggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDWFOTi4ul1yYXyetds+IhtQRbvYHsu
+BdEvnPTE9w0FdOW7Br0VftPUUKk4seh4AnNwIzSZ//UTRvmkdQTbM0sPInn1nK6k
+w3CGdbrprwPtCgp5hyjO0aML4in+EdQJQnqGczZKG10JPY91RxdYoaE8nzGdPctK
+m4N1LJGXY2t3Iw+fZXJwV2LU7JJP6vHxJ7hITw1EBqKr4arp9uwbIKtLAyd9SPMr
+H8itgkzq8v5a0JgZ7UyIaRMdn3M9EjzUMMjo0vRq7ZX1f+JBNFqai0kpsvMAudnV
+Ji1lZavNKw+bUaOKo6zDF14Y/DpE5soMCLNSBZjIs8LEHcIecCEV5YD3AgMBAAGg
+ADANBgkqhkiG9w0BAQsFAAOCAQEAfShRIZgqk49tbp+RTsD2CTxMpnwLeSxappt9
+52w9fM0KFMS7LxbNjzXTFRxBoOu4VzQFMh0RasXNAs04S9QColmgplFcCUkVbQJc
+TJlFGNc7/0iled76qfScd9G5BoeyJjVfyBa4JUgoL7nxBKJhAD4hbsJgXRYY5U/J
+GFvMdSJdnB5QwcKTi+FZphCh5L5oSQjGIM4FJMLDwxyy6Q3hyszIIhuA0w6lwPkH
+x46rjzd9eGqIIT+YWU6Czqe5SR3yHRSkSN3G/Y/PCLy/lrMTJE3wBcN371ppwLN8
+PBOrBepgYY7QxHBMO3cxdWxziFpdB9deVFwwlnGgc5/gFtwoxQ==
+-----END CERTIFICATE REQUEST-----

+ 27 - 0
docker-registry2/registry-config/domain.key

@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEA1hTk4uLpdcmF8nrXbPiIbUEW72B7LgXRL5z0xPcNBXTluwa9
+FX7T1FCpOLHoeAJzcCM0mf/1E0b5pHUE2zNLDyJ59ZyupMNwhnW66a8D7QoKeYco
+ztGjC+Ip/hHUCUJ6hnM2ShtdCT2PdUcXWKGhPJ8xnT3LSpuDdSyRl2NrdyMPn2Vy
+cFdi1OyST+rx8Se4SE8NRAaiq+Gq6fbsGyCrSwMnfUjzKx/IrYJM6vL+WtCYGe1M
+iGkTHZ9zPRI81DDI6NL0au2V9X/iQTRamotJKbLzALnZ1SYtZWWrzSsPm1GjiqOs
+wxdeGPw6RObKDAizUgWYyLPCxB3CHnAhFeWA9wIDAQABAoIBACO4UsSEpf2QRXhD
+BOTaxzCrlp8UUxlHwTKXlsOykQqjCZ3eLviCM+gOGV4rWSWFCyvfw+waKBMyWX8W
+Q1wrv89jqkPQA9fcAE9/H1PUbuxDK6JBag6wLetSrj0FToLJA9ahLKLK1AgxrFke
+JogUV+ncihO5ds0ZC6Mmb5h57zluYuh4I2rYjX7V5hISBg2T62mFW+QceUQi3zn4
+pV3GEGdDieddH///wK3qW6wn1ml+AzcAoWMf9BzmZScn6lXcTnHvEqj/eXbp2Vqk
+HM29KdOpt3GDmQY8hj2f9sEUVOsShe6yLkMAhfR3DH3DgKgZRJWhDk6DsRtZLA28
+dUVVHfECgYEA+cZLi0FFUcCK7Q8uQIlzgxcvF+UrX4PQPlt9iYYaQOZMgs7bxrhR
+YZ3zgg3G8lWpmtwcbh0D/1RvUATJWMFRg8PRRP/+bmt1tuUZ7G37Hwm29FYcnDq6
+ee0lapLAarQx49Dqe+pncPuupQlaLlVGFQGERa4v0rO+qeu3GLbxI3kCgYEA22rb
+feeY6A3pJqWE/sO7xRDkpgr3UsfkZWV7/80Poqp60veh1wq/9ynk9G4TGdMu4rdk
+p1zHQfq2xfvt7WuDy8CnleXizlV/edh5rfkDQ3Wz9NoRRtS/jF6obNeMtRIGsvAa
+yuMQN4ncLkZvf5tHA/G+37J0Bu+E6tphf6G8u+8CgYAnvzt0TI24Zbh14u2MTZM5
+MC5JUimlyHcSBUMj6FgzLbLNOhfVNq2UqCsA7ipTk/gqcY6Ao1NohUo1ZJ2Z5yGO
+o1Gg9k/JyIXx9eE7M4E7vjMEDNP+tIXKM0e5/uJp2IT76Nn3oCSX4SU4lBVqvB0r
+6JLg98gacooLxjvbzfjvmQKBgAMcO6pIOymm8NQkaoM2ARZlC+iypRjeToWFb8GL
+hZo3snAFJJ0owNnfFSjF2q12Ceca1Glu7kAoLqkVziScbozX2UhNru5f1ePDRs5J
+hltRctCYur/3ShPST67jS1JRxBiLQMX5fARdhup9Ax2a9OHAboU7QVvLz2WI3PLz
+NvqvAoGAPZYjOlzW+uy9l01FSyYGZFxl6uHYvNz3VyoVP2/tY9H9VEXmaIVocfh3
+9M22jLK4j8FV6jk5+ShH0mSYBkFmf9g/NJUfNXXyWzRvNlPBTHFDD1AHNUyA0sk6
+2ZF4BI5XAh9zqBKDG8YqUuGK+vf5zjCJ1p19+oqKduatzawIsTw=
+-----END RSA PRIVATE KEY-----

+ 2 - 0
docker-registry2/registry-config/htpasswd

@@ -0,0 +1,2 @@
+laxaurus:$2y$05$bvR1I7F1xJxfA36/R6Upiu/sDfcc5v.x.i1H.tAQKSpXM4M6SjHfG
+admin:$2y$05$Ro9QP5BJc.NBsIcODNJ1qOfNtQ6Pq2mHgYy646Pb5FV4K0F7sTZb.

+ 93 - 0
docker-registry2/registry-config/signed.crt

@@ -0,0 +1,93 @@
+-----BEGIN CERTIFICATE-----
+MIIFJjCCBA6gAwIBAgISAyL3MVD+kRIeJyDbFO6bGilxMA0GCSqGSIb3DQEBCwUA
+MDIxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQD
+EwJSMzAeFw0yMTEyMDExMjMwMjlaFw0yMjAzMDExMjMwMjhaMBwxGjAYBgNVBAMT
+EWh1Yi5hbGdvbWV0aWMuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEA1hTk4uLpdcmF8nrXbPiIbUEW72B7LgXRL5z0xPcNBXTluwa9FX7T1FCpOLHo
+eAJzcCM0mf/1E0b5pHUE2zNLDyJ59ZyupMNwhnW66a8D7QoKeYcoztGjC+Ip/hHU
+CUJ6hnM2ShtdCT2PdUcXWKGhPJ8xnT3LSpuDdSyRl2NrdyMPn2VycFdi1OyST+rx
+8Se4SE8NRAaiq+Gq6fbsGyCrSwMnfUjzKx/IrYJM6vL+WtCYGe1MiGkTHZ9zPRI8
+1DDI6NL0au2V9X/iQTRamotJKbLzALnZ1SYtZWWrzSsPm1GjiqOswxdeGPw6RObK
+DAizUgWYyLPCxB3CHnAhFeWA9wIDAQABo4ICSjCCAkYwDgYDVR0PAQH/BAQDAgWg
+MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G
+A1UdDgQWBBQRI2PNbvfxE40Dwxg0yGtKbOIXrjAfBgNVHSMEGDAWgBQULrMXt1hW
+y65QCUDmH6+dixTCxjBVBggrBgEFBQcBAQRJMEcwIQYIKwYBBQUHMAGGFWh0dHA6
+Ly9yMy5vLmxlbmNyLm9yZzAiBggrBgEFBQcwAoYWaHR0cDovL3IzLmkubGVuY3Iu
+b3JnLzAcBgNVHREEFTATghFodWIuYWxnb21ldGljLmNvbTBMBgNVHSAERTBDMAgG
+BmeBDAECATA3BgsrBgEEAYLfEwEBATAoMCYGCCsGAQUFBwIBFhpodHRwOi8vY3Bz
+LmxldHNlbmNyeXB0Lm9yZzCCAQIGCisGAQQB1nkCBAIEgfMEgfAA7gB1ACl5vvCe
+OTkh8FZzn2Old+W+V32cYAr4+U1dJlwlXceEAAABfXYwf90AAAQDAEYwRAIgQtkE
+vH/Mn/dDDI6Dx4x9PcGQagaKKqrhPd6ROPM9BdICIFtAWf8Mf7WTCB6gMrEQQy36
+ZqBGNxoXLrOvOfiu3I+mAHUAQcjKsd8iRkoQxqE6CUKHXk4xixsD6+tLx2jwkGKW
+BvYAAAF9djCB5wAABAMARjBEAiACKKZgVhSe8OarcrS+EdmbJ5e1+VDGDd5g7IzQ
+Q06nOwIgVbR46UYFxSqUzXRXTN2KsnjSV/MEu5op7gOOfYkbklAwDQYJKoZIhvcN
+AQELBQADggEBACXKdnzy0Su5cqmckGZc2RJfNfpvCzl5TCTZLML1309h6xO2W1bk
+zWLGf4Wj5CE3P+Y/8mGU51cCJ22HwitMWaqd++Ds6pt4hwl+TC061h3kLJcaKKrQ
+UrLGyUExkFR7CQ5fE1Ai+YlmrN56Yfyh2mibCoHG/4H3xtLShKMowuhYUvA6Fnga
+1I8XgN9Tf9rJOiJOnjjQ5reQBewq36aKx/INXiiGsm/+XjuKU/HApxLUKOaDdaS0
+FI5UqyWFPHc0WXlf/PrfOxv5ZlJNMpPcWgo0p9hHxzb01MkfHHvS2wZPgkmPj8WC
+CBc2J4jN+aqw6wsoSdxH55P1HeFnRBjS/fA=
+-----END CERTIFICATE-----
+
+-----BEGIN CERTIFICATE-----
+MIIFFjCCAv6gAwIBAgIRAJErCErPDBinU/bWLiWnX1owDQYJKoZIhvcNAQELBQAw
+TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
+cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjAwOTA0MDAwMDAw
+WhcNMjUwOTE1MTYwMDAwWjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg
+RW5jcnlwdDELMAkGA1UEAxMCUjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQC7AhUozPaglNMPEuyNVZLD+ILxmaZ6QoinXSaqtSu5xUyxr45r+XXIo9cP
+R5QUVTVXjJ6oojkZ9YI8QqlObvU7wy7bjcCwXPNZOOftz2nwWgsbvsCUJCWH+jdx
+sxPnHKzhm+/b5DtFUkWWqcFTzjTIUu61ru2P3mBw4qVUq7ZtDpelQDRrK9O8Zutm
+NHz6a4uPVymZ+DAXXbpyb/uBxa3Shlg9F8fnCbvxK/eG3MHacV3URuPMrSXBiLxg
+Z3Vms/EY96Jc5lP/Ooi2R6X/ExjqmAl3P51T+c8B5fWmcBcUr2Ok/5mzk53cU6cG
+/kiFHaFpriV1uxPMUgP17VGhi9sVAgMBAAGjggEIMIIBBDAOBgNVHQ8BAf8EBAMC
+AYYwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMBIGA1UdEwEB/wQIMAYB
+Af8CAQAwHQYDVR0OBBYEFBQusxe3WFbLrlAJQOYfr52LFMLGMB8GA1UdIwQYMBaA
+FHm0WeZ7tuXkAXOACIjIGlj26ZtuMDIGCCsGAQUFBwEBBCYwJDAiBggrBgEFBQcw
+AoYWaHR0cDovL3gxLmkubGVuY3Iub3JnLzAnBgNVHR8EIDAeMBygGqAYhhZodHRw
+Oi8veDEuYy5sZW5jci5vcmcvMCIGA1UdIAQbMBkwCAYGZ4EMAQIBMA0GCysGAQQB
+gt8TAQEBMA0GCSqGSIb3DQEBCwUAA4ICAQCFyk5HPqP3hUSFvNVneLKYY611TR6W
+PTNlclQtgaDqw+34IL9fzLdwALduO/ZelN7kIJ+m74uyA+eitRY8kc607TkC53wl
+ikfmZW4/RvTZ8M6UK+5UzhK8jCdLuMGYL6KvzXGRSgi3yLgjewQtCPkIVz6D2QQz
+CkcheAmCJ8MqyJu5zlzyZMjAvnnAT45tRAxekrsu94sQ4egdRCnbWSDtY7kh+BIm
+lJNXoB1lBMEKIq4QDUOXoRgffuDghje1WrG9ML+Hbisq/yFOGwXD9RiX8F6sw6W4
+avAuvDszue5L3sz85K+EC4Y/wFVDNvZo4TYXao6Z0f+lQKc0t8DQYzk1OXVu8rp2
+yJMC6alLbBfODALZvYH7n7do1AZls4I9d1P4jnkDrQoxB3UqQ9hVl3LEKQ73xF1O
+yK5GhDDX8oVfGKF5u+decIsH4YaTw7mP3GFxJSqv3+0lUFJoi5Lc5da149p90Ids
+hCExroL1+7mryIkXPeFM5TgO9r0rvZaBFOvV2z0gp35Z0+L4WPlbuEjN/lxPFin+
+HlUjr8gRsI3qfJOQFy/9rKIJR0Y/8Omwt/8oTWgy1mdeHmmjk7j1nYsvC9JSQ6Zv
+MldlTTKB3zhThV1+XWYp6rjd5JW1zbVWEkLNxE7GJThEUG3szgBVGP7pSWTUTsqX
+nLRbwHOoq7hHwg==
+-----END CERTIFICATE-----
+
+-----BEGIN CERTIFICATE-----
+MIIFYDCCBEigAwIBAgIQQAF3ITfU6UK47naqPGQKtzANBgkqhkiG9w0BAQsFADA/
+MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT
+DkRTVCBSb290IENBIFgzMB4XDTIxMDEyMDE5MTQwM1oXDTI0MDkzMDE4MTQwM1ow
+TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
+cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQCt6CRz9BQ385ueK1coHIe+3LffOJCMbjzmV6B493XC
+ov71am72AE8o295ohmxEk7axY/0UEmu/H9LqMZshftEzPLpI9d1537O4/xLxIZpL
+wYqGcWlKZmZsj348cL+tKSIG8+TA5oCu4kuPt5l+lAOf00eXfJlII1PoOK5PCm+D
+LtFJV4yAdLbaL9A4jXsDcCEbdfIwPPqPrt3aY6vrFk/CjhFLfs8L6P+1dy70sntK
+4EwSJQxwjQMpoOFTJOwT2e4ZvxCzSow/iaNhUd6shweU9GNx7C7ib1uYgeGJXDR5
+bHbvO5BieebbpJovJsXQEOEO3tkQjhb7t/eo98flAgeYjzYIlefiN5YNNnWe+w5y
+sR2bvAP5SQXYgd0FtCrWQemsAXaVCg/Y39W9Eh81LygXbNKYwagJZHduRze6zqxZ
+Xmidf3LWicUGQSk+WT7dJvUkyRGnWqNMQB9GoZm1pzpRboY7nn1ypxIFeFntPlF4
+FQsDj43QLwWyPntKHEtzBRL8xurgUBN8Q5N0s8p0544fAQjQMNRbcTa0B7rBMDBc
+SLeCO5imfWCKoqMpgsy6vYMEG6KDA0Gh1gXxG8K28Kh8hjtGqEgqiNx2mna/H2ql
+PRmP6zjzZN7IKw0KKP/32+IVQtQi0Cdd4Xn+GOdwiK1O5tmLOsbdJ1Fu/7xk9TND
+TwIDAQABo4IBRjCCAUIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw
+SwYIKwYBBQUHAQEEPzA9MDsGCCsGAQUFBzAChi9odHRwOi8vYXBwcy5pZGVudHJ1
+c3QuY29tL3Jvb3RzL2RzdHJvb3RjYXgzLnA3YzAfBgNVHSMEGDAWgBTEp7Gkeyxx
++tvhS5B1/8QVYIWJEDBUBgNVHSAETTBLMAgGBmeBDAECATA/BgsrBgEEAYLfEwEB
+ATAwMC4GCCsGAQUFBwIBFiJodHRwOi8vY3BzLnJvb3QteDEubGV0c2VuY3J5cHQu
+b3JnMDwGA1UdHwQ1MDMwMaAvoC2GK2h0dHA6Ly9jcmwuaWRlbnRydXN0LmNvbS9E
+U1RST09UQ0FYM0NSTC5jcmwwHQYDVR0OBBYEFHm0WeZ7tuXkAXOACIjIGlj26Ztu
+MA0GCSqGSIb3DQEBCwUAA4IBAQAKcwBslm7/DlLQrt2M51oGrS+o44+/yQoDFVDC
+5WxCu2+b9LRPwkSICHXM6webFGJueN7sJ7o5XPWioW5WlHAQU7G75K/QosMrAdSW
+9MUgNTP52GE24HGNtLi1qoJFlcDyqSMo59ahy2cI2qBDLKobkx/J3vWraV0T9VuG
+WCLKTVXkcGdtwlfFRjlBz4pYg1htmf5X6DYO8A4jqv2Il9DjXA6USbW1FzXSLr9O
+he8Y4IWS6wY7bCkjCWDcRQJMEhg76fsO3txE+FiYruq9RUWhiF1myv4Q6W+CyBFC
+Dfvp7OOGAN6dEOM4+qR9sdjoSYKEBpsr6GtPAQw4dy753ec5
+-----END CERTIFICATE-----

+ 5 - 0
openvpn/.env

@@ -0,0 +1,5 @@
+CONTAINER_NAME=openvpn
+DATA_ROOT=./openvpn-data
+
+
+

+ 1 - 0
openvpn/.gitignore

@@ -0,0 +1 @@
+openvpn-data/conf/

+ 34 - 0
openvpn/docker-compose.yml

@@ -0,0 +1,34 @@
+version: "2.1"
+services:
+  openvpn:
+    cap_add:
+     - NET_ADMIN
+    image: kylemanna/openvpn:2.4
+    container_name: ${CONTAINER_NAME} 
+    ports:
+     - "1194:1194/udp"
+     - "8050:8080"
+     - "8989:8989"
+    restart: "no" 
+    volumes:
+     - ${DATA_ROOT}/conf:/etc/openvpn
+     - ${DATA_ROOT}/ccd_master:/etc/openvpn/ccd
+    
+  ovpn-admin:
+    image: flant/ovpn-admin:1.7.5 
+    container_name: ${CONTAINER_NAME}-admin
+    command: /app/ovpn-admin
+    environment:
+      OVPN_DEBUG: "True"
+      OVPN_VERBOSE: "True"
+      OVPN_NETWORK: "192.168.255.0/24"
+      EASYRSA_PATH: "/mnt/easyrsa"
+      OVPN_SERVER: "127.0.0.1:1194:tcp"
+      OVPN_INDEX_PATH: "/mnt/easyrsa/pki/index.txt"
+    network_mode: service:openvpn
+    volumes:
+      - ${DATA_ROOT}/conf:/mnt/easyrsa
+      - ${DATA_ROOT}/ccd_master:/mnt/ccd
+
+
+    

+ 11 - 0
openvpn/gen_profile.sh

@@ -0,0 +1,11 @@
+#!/bin/bash
+
+if [[ -z $1 ]];
+then
+	echo "Usage: gen_profile <client_name>"
+else
+
+	export CLIENTNAME=$1
+	docker-compose run --rm openvpn easyrsa build-client-full $CLIENTNAME
+	docker-compose run --rm openvpn ovpn_getclient $CLIENTNAME > $CLIENTNAME.ovpn
+fi

+ 110 - 0
openvpn/hamster.ovpn

@@ -0,0 +1,110 @@
+
+client
+nobind
+dev tun
+remote-cert-tls server
+
+remote vpn.algometic.com 1194 udp
+
+<key>
+-----BEGIN PRIVATE KEY-----
+MIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQCsJHGGB4JJfsFO
+FpC/iGgpPNCS3wY/g7sOgzo2p/JKA/gKWHqqVmhvxggPvdQJSAwYcuLndOovNWY0
+LvS8xBWRwu9nj0s/r3/GTVLpHIssa63Nj6q8+Sl4lSLkl76QDwLiKC8Ssky66Hh4
+JHsOxSu9APK8jLAajIrcdA8OX2mRBE+y67/jDx9lKSEZkxlEXJgHAy3MRsXkWTN0
+FEfQbcEMo0qPr9ep0JxJ1YrcRxYgOZ45Fm+u2/X+0z+zZ90/bJr4tX5y6peqkXBC
+VcIyedUCgKM6spNLpYARbkWr88xPpDvvwlmoDhNSHeHKbq/dVwvfRgti70rMGCeu
+KE1riAMZAgMBAAECggEBAJNgMAD7dMPPTVe5YftYoTSM6FSVtBe0g/UMzE32wywy
+8ayJy5sBly/6bvRrJJk9oX1c/n4CTmxUX0fV25mwlnbQWX5j45yGd/IiSRViZpSF
+K6uNkDNxbdvacqEaeP+Emodct9Bd7esnE9xn/yLKE4Qgx7A9EpfoMwpCIpG/hfWs
+P8P6OzCHZau7K4FHUBSY8a4pPwfW2gHXQMvHgpfMG8mWwBWjDnvXLxN47nObqeIH
+gtEfZLmIYsoGaJMlVWmUXYTJt4eey7cGI3F6F9SabxPSNi5xzKV54ww+ewhGvxXQ
+2id9rkP0M8/JtcozmJYBJ9KXgFxhKcJmntLpEF7XCjECgYEA1MLRI0PnioRCShaN
+8kEj377g5MPD27KFJTsy+vGvsH2FcuT/79zPzAp7eOs56lzmVrXxB1JjjWHiZZS0
+Ma5mSfLi+F82HNt8j1bxxkk2Mef+siGFj+zob46+Z8gyvGwSIvj6BX425qVxvxfd
+IBw71krWOFyYSELqlBt89QGONHsCgYEAzyBivdyOSRHh3Glyii+xgncPl2C1Ib79
+huYmKrn96Cm0gtYp7YlXRWdWtAWg3Zaa7VC6ZX3dHRArS+xNrwn/zh/xPOeQjCgn
+ZECBA78htfuH09unRwvZ8+JLYfltekTF8obfAZKIUKog7oBflDM8sM74JGeEVZ07
+zmfV+1ripHsCgYAGn2nTJzH15dgKZllUySJMvKxTcqSOGih62DTmCs/lNoMI6Ifu
+93igaVMemHPdD7c7y74M9EuVAw8BCnfxc/RLKPxXrt8IVuyxqYstGOpO82HCObjm
+mgFCN7dFk73Aj7ygyg8+TId49n1r7Vo8vaQ/L2Urq/YxiaOzqIdjOQEsMQJ/Ubd1
+PXLGsA1eDpeLmPk4D0Dp5c9Bpw6y94XViQXYJsBNd1EHOa/1vmwGyP9vd/zzIq22
+3FPL6Kt4aXvRKj3Hrorrohu565Hr+KF6RQAjTKjv7aW7WUxTsOSW2RCPIK4Baq3L
+TdOB9FEqrO34x39vclI1lCdDDoP6FHTi90fkIwKBgHasfwL6H/V+OxJHvFzyFN4V
+P61soUM1p0caApuQ/1DZ6cUX0r1rgcysyEer5Wv5HYfv92IHyG4gw3H5/DM999zp
+aFCwOCw+yJkZLj2NYsCPOnyNIJA7v5ikX19YN6o6MFeJ3OCQlc4IZdFfVsi91FOO
+WQBdzJMH+gHzXBUsnHlZ
+-----END PRIVATE KEY-----
+</key>
+<cert>
+-----BEGIN CERTIFICATE-----
+MIIDYjCCAkqgAwIBAgIRAJx/Vtnswf/lR9SESVhcZ34wDQYJKoZIhvcNAQELBQAw
+HDEaMBgGA1UEAwwRdnBuLmFsZ29tZXRpYy5jb20wHhcNMjExMjI3MTE0NjQ0WhcN
+MjQwMzMxMTE0NjQ0WjASMRAwDgYDVQQDDAdoYW1zdGVyMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEArCRxhgeCSX7BThaQv4hoKTzQkt8GP4O7DoM6Nqfy
+SgP4Clh6qlZob8YID73UCUgMGHLi53TqLzVmNC70vMQVkcLvZ49LP69/xk1S6RyL
+LGutzY+qvPkpeJUi5Je+kA8C4igvErJMuuh4eCR7DsUrvQDyvIywGoyK3HQPDl9p
+kQRPsuu/4w8fZSkhGZMZRFyYBwMtzEbF5FkzdBRH0G3BDKNKj6/XqdCcSdWK3EcW
+IDmeORZvrtv1/tM/s2fdP2ya+LV+cuqXqpFwQlXCMnnVAoCjOrKTS6WAEW5Fq/PM
+T6Q778JZqA4TUh3hym6v3VcL30YLYu9KzBgnrihNa4gDGQIDAQABo4GoMIGlMAkG
+A1UdEwQCMAAwHQYDVR0OBBYEFBi599JFNWu/7qksEjOBANT+y3GBMFcGA1UdIwRQ
+ME6AFLzutrumE7nkfqCQuyx20cj+l933oSCkHjAcMRowGAYDVQQDDBF2cG4uYWxn
+b21ldGljLmNvbYIUFZpXj4m+MBU0JYvoqCLtbg9BP+4wEwYDVR0lBAwwCgYIKwYB
+BQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBCwUAA4IBAQCD5MKifRGKG/6v
+bnvnQMDl9Q9hl19tEpcaaTylyeiLzaVs+LjZo0WqEg1nYfihtYZ5fZW8nGUaJ355
+1I24TRj/OmFlxMGG+aCTKQIV7uRNqZB1kK2/a0/Mzn7EEyOVJhH+ZqORkxvgAQwK
+4kOMkgk1IZfv9/28GLMUCfDqMvBLjOxQLq5seZES87L49uWCghNNUoEkc/kzqGPE
+HtVJKRirWRs6jqu5GgfZ55JyI0i3UCftKfatTQR4Yn64Sk1FvAm6dMaXb1K2oyXK
+n9kD8qgftwj2qtrGVwuF0f3i+yKjoopAGslQP6qWqX3aom4/khyQsXl14o4t9s1a
+ZrwiFCwE
+-----END CERTIFICATE-----
+</cert>
+<ca>
+-----BEGIN CERTIFICATE-----
+MIIDXTCCAkWgAwIBAgIUFZpXj4m+MBU0JYvoqCLtbg9BP+4wDQYJKoZIhvcNAQEL
+BQAwHDEaMBgGA1UEAwwRdnBuLmFsZ29tZXRpYy5jb20wHhcNMjExMjIzMTcwMzQ0
+WhcNMzExMjIxMTcwMzQ0WjAcMRowGAYDVQQDDBF2cG4uYWxnb21ldGljLmNvbTCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPcQ5URlwTarZvgUEWSKjcWM
+BuFs8CEJ0fn+4jSA04l/KXzlaoiIY3zUL7imG6udRRSEKOWm9x5RTT+HmSQOSlRQ
+YN5JsCDn8hqrqqO+nYMQ860Q4QBTCzNfUjzBt6mjo8bHYZSFSMc9oG4vpitxDZ4i
+9kpxAi4lo6jz+4KSx5Vbtb7Fae+MicYnDm2ae9O08yaOiiTze6QytIfuP9lxaypl
+JGtvJ/rj/H7FpgCHa3ZYuRTHbIt5kmXoAHrt1grwHrunQ/joymOZXy/GhV2IE1PA
+1rc/3iiNg+eh99xC2V3HIkxXfHcooMt9peIt2i7do28IRUE3HAWiLQnI1LcJ8MEC
+AwEAAaOBljCBkzAdBgNVHQ4EFgQUvO62u6YTueR+oJC7LHbRyP6X3fcwVwYDVR0j
+BFAwToAUvO62u6YTueR+oJC7LHbRyP6X3fehIKQeMBwxGjAYBgNVBAMMEXZwbi5h
+bGdvbWV0aWMuY29tghQVmlePib4wFTQli+ioIu1uD0E/7jAMBgNVHRMEBTADAQH/
+MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAhOBWEIYM2wAPmRK8sZ5X
+ln4jz5EDymTdpHZSu+1DSLDexPRPvntz4Py6DTVtJhJFfl5x6BLwLijnMP8FntLY
+08iNMavfgaLuiRZjePojjsA+2Pyb83xS3Jm3kLn8xSkDGQKhDTW84eGt/LYzEPuz
+uYQgMCupVuo99fCLzxZh2zV0sPiM759hjSl0IPulX3o7HQ/vEsq9dCzXKLg2CYLj
+5MjqSuD3mqBg+m1ifIhkoiE/bHw+k1O3inp5JumVdcbXAIZkgNQlEqzr+4meIyD3
+c1gfEVx20DgSieeMPttxXxB19saljOtpLvY9JfbzhJ5SAJjyLsWIUabShyXrJwWv
+yQ==
+-----END CERTIFICATE-----
+</ca>
+key-direction 1
+<tls-auth>
+#
+# 2048 bit OpenVPN static key
+#
+-----BEGIN OpenVPN Static key V1-----
+91a3938f0e980ba5776328c65524fdad
+8debb91e248152a0bff30bd17fbd2f4f
+e69e95a4db9691a82655907959ef693b
+5dbb430527bf10b731d802050ae2258e
+217887cae173a8e00420c4b7cbbfa33d
+0cf0c0c469647ca6a07b15f2fac43716
+b4568d8dcc977ae7d2d8525efbc52fb1
+e5e300b0ac94e862d09e82b79318ae00
+aa8c63d684ceb52fe1860c5a472bbb40
+c7e265cd63509d899acf2ddbcb79adfb
+5534a08f7937515569d85080d2f1c830
+9a679335facdb4081669c1b152f9c445
+1c783f04610b4f629c06b5752a0bdde1
+7e50cef65fa040e19dd3f76b3494311e
+fc332cee2ae3835806e6c7647fda4ed5
+527aac5b009c00bacdaa405b57b2f3ab
+-----END OpenVPN Static key V1-----
+</tls-auth>
+
+redirect-gateway def1

+ 112 - 0
openvpn/harehole.ovpn

@@ -0,0 +1,112 @@
+
+client
+nobind
+dev tun
+remote-cert-tls server
+
+remote vpn.algometic.com 1194 udp
+
+<key>
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFHDBOBgkqhkiG9w0BBQ0wQTApBgkqhkiG9w0BBQwwHAQIAZBLhl+A/wQCAggA
+MAwGCCqGSIb3DQIJBQAwFAYIKoZIhvcNAwcECKWput8IDC8BBIIEyGqNd3s3/lBO
+PwfdP3qD+P6Jy7PzMdFMoBbjER2cr12BI9WGyv16NbDAmtK4MX55OzssUma1tBqP
+serlfTEG6OxbwfQNiUKnscteqLzDXi4gN1JQtV2CkBfCU0PxzrTRlbx7Cjw6AGgq
+PRGgqGtlhrxZS5HGHy6so28JXa28qx/6yV/36msz59brrQh00qSgGjo7k4LhdXZ6
+pfKtTc7R49E2NuJd6tldWI3VfEJq/CEORTgLv3P6Jn/cXQhZ8trimUDf09kWTxYy
+zErBFY3iTAqzaKvsFB5AuSxhI3BSacT28ZfbgMcMetoZP/z8wKXFscEVw/3AaSv3
+m1nckACoHykg7TMF4oZ1emY9rFqnlhrYmOU3ByNoGHl/OBM1x9hZPONzT9xJ5KBr
+hrCnD9xVgMloZnlpRzKVBxdhuliLpyFRyK/YGbbfmeEpF5mGSQsbzORAUymMmzfP
+2v7jZPEwbLoFDPVymBsy3anFqyfWmjXxJnWUHvwRFJnNt+SDh/6Yeup7E3eeNade
+UrQXVc1e09N5GbCa3WcRH02oRbMd4XizbVK7pkHnZZXGODKQkP4eWKDOH1Pz21bW
+2VyyluJ+U7qYNM+o2DLXxCTNU0/qjgmYoX5Si3gnDUat7whpvsmr6h4WQ8jrA+Gg
+ZEdbPmlupD9IxuSeKPoA3FVm9tg4kTrwM2xuWmb8jICOM3FRvlB3JMPNZEMBmiN4
+Eu7dYIF0f2Q5bQFsiZd3V5HM9QUFgCFcJA432EdTFOyi6mRy/74r8qrRf3tXF5O+
+xcIEzF5GLQykBsEv4diWxgABr2vd1geWsCK8/s1lwjDRzUXb4lX7t8L6q4tQ+MNN
+g0+NXL58Ve8RV9X4bLhGzEyZtIeJF35kW+p7g5DRmlmNNaICxzRzBuz2aGhsn9IN
+YdEK1/80/GFYiKgGr8O++CIPUICFZcbIoE+jCadtaNqx9qFZfzgordH8TFY8XvCK
+7mtfH5TJLdX22te6viFMGv13YHH1eRE75ISQIuXDyE2Z9Wb9j8zWCoTFuohmGYjy
+UrR5Q2piOb5BRFJGZSIjX2kYRwM989BWYy+1NTezEZijJBLWQbkm0Y0uDOqoZy5c
+skJ01nkjXoXdJzdWJe1pzTxZbTHJfGLYd5ZaXy7isGseRd1QFHTbEGgOMO2bhpML
+5Zg3ae48lIl4EusJAhB18HNZszSQwmuLhc2qefQ81Awdns2sxjqbxDouVQTJoEtr
+xgKkEmQfpcb0+EjoAichuBhbWl9g6kP/R/ZOybS6CE9m8RymeURX6KOlXeFVomTJ
+B6DZVgMVzbWeCAi+F0wdl4kaQfEkSeC5yVuBVk1yOg58FOT25SlCt4t4XOpbUXmo
+unQqPaC/zbL0IP0SxqBF3t2OLKL972M2xds4h/GmRm3gI6RME8uzA0x7cWSvkmtN
+7DQeAlvzD+CSuQMQDGW1YlgofyXBKxGKr1qHzGJwNSBtyTRAE3f5F34U5mkFC6Br
+ol/Cu0ACjm8JegNMVJzq/Q+cy+Go00bKMLmcFKDaQHJZnXfMk3h9fHupC2lSzsRh
+pOxeq9CPMWdO/hst5FlB81YCe//qjkt592JJA4fcbf1Ei+sO2Xlf/HtrNvkxwVX7
+hx6vPN6LnDa3mszphNSBYg==
+-----END ENCRYPTED PRIVATE KEY-----
+</key>
+<cert>
+-----BEGIN CERTIFICATE-----
+MIIDYjCCAkqgAwIBAgIQHw4J4GNPfpnsgi5W3sUZVDANBgkqhkiG9w0BAQsFADAc
+MRowGAYDVQQDDBF2cG4uYWxnb21ldGljLmNvbTAeFw0yMTEyMjQwMDU1MDFaFw0y
+NDAzMjgwMDU1MDFaMBMxETAPBgNVBAMMCGhhcmVob2xlMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEAyTJz7J9RLEjldDFpYJCPnU44HzyBNbJWqwV77Q1C
+nCVSKXQe1OF5+z+HRN8m6yyL2PJs47yCuiodgGIHSPw++4rjteRf15g8fEJjC4jl
+B2I1fq/mnmXEL822QtU5qKAEnlyu9G1tudBsAw2oo1IPRw1aTBBBS6faf0AmZ2aH
+H+l7O3kXj1tg7wqdT7VNjb/q6aLMDSRC4FD1tmU2cnH6VkUodScu5awyLRNJiuIM
+2d6cEnCEvgwJksamnh0KfQK/P3gBZXXvZ7GXVI+V0qAKQDeVwlP3iSFAEJYsaS+0
+/nxJz7/3cR9TBcvlb3PPpyoQBfVvUvF4yEuGfbrZLlTyfwIDAQABo4GoMIGlMAkG
+A1UdEwQCMAAwHQYDVR0OBBYEFMUchwZeP0O8XCr1vDV2bHQSloF+MFcGA1UdIwRQ
+ME6AFLzutrumE7nkfqCQuyx20cj+l933oSCkHjAcMRowGAYDVQQDDBF2cG4uYWxn
+b21ldGljLmNvbYIUFZpXj4m+MBU0JYvoqCLtbg9BP+4wEwYDVR0lBAwwCgYIKwYB
+BQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBCwUAA4IBAQC1pyavCaLa0G7N
+pNW5SNaT/g2/EWa052MF3hYdpNzPdKZf2FEqeGlGDbtRolfJ5VFlGvujdFD6xb7X
+KbcEiCX1Hqy/TXODFUw6YXZzgtqRCyEFy+JS4qfRp5GTdG5hjnzb9oRjcmJ3NMq1
+SLGLNzHp+he3nFB9J+jNvdvxk3/hueVwnAWMtqBH05/HlzUzRrlNi0mleVpq357A
+iK64yCkHv9ISAHXaNHa4uU2UbVkFvBEPAEsZIB6nEPD6Vp/j0Y+oGgiRgWlGRSoJ
+qzyOCXBr9G15rFErAO1EtOX2PRDLWjqDYL/iR3MOGRaxc/vFbqruEpdO9AHase9S
+cp6DdnSF
+-----END CERTIFICATE-----
+</cert>
+<ca>
+-----BEGIN CERTIFICATE-----
+MIIDXTCCAkWgAwIBAgIUFZpXj4m+MBU0JYvoqCLtbg9BP+4wDQYJKoZIhvcNAQEL
+BQAwHDEaMBgGA1UEAwwRdnBuLmFsZ29tZXRpYy5jb20wHhcNMjExMjIzMTcwMzQ0
+WhcNMzExMjIxMTcwMzQ0WjAcMRowGAYDVQQDDBF2cG4uYWxnb21ldGljLmNvbTCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPcQ5URlwTarZvgUEWSKjcWM
+BuFs8CEJ0fn+4jSA04l/KXzlaoiIY3zUL7imG6udRRSEKOWm9x5RTT+HmSQOSlRQ
+YN5JsCDn8hqrqqO+nYMQ860Q4QBTCzNfUjzBt6mjo8bHYZSFSMc9oG4vpitxDZ4i
+9kpxAi4lo6jz+4KSx5Vbtb7Fae+MicYnDm2ae9O08yaOiiTze6QytIfuP9lxaypl
+JGtvJ/rj/H7FpgCHa3ZYuRTHbIt5kmXoAHrt1grwHrunQ/joymOZXy/GhV2IE1PA
+1rc/3iiNg+eh99xC2V3HIkxXfHcooMt9peIt2i7do28IRUE3HAWiLQnI1LcJ8MEC
+AwEAAaOBljCBkzAdBgNVHQ4EFgQUvO62u6YTueR+oJC7LHbRyP6X3fcwVwYDVR0j
+BFAwToAUvO62u6YTueR+oJC7LHbRyP6X3fehIKQeMBwxGjAYBgNVBAMMEXZwbi5h
+bGdvbWV0aWMuY29tghQVmlePib4wFTQli+ioIu1uD0E/7jAMBgNVHRMEBTADAQH/
+MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAhOBWEIYM2wAPmRK8sZ5X
+ln4jz5EDymTdpHZSu+1DSLDexPRPvntz4Py6DTVtJhJFfl5x6BLwLijnMP8FntLY
+08iNMavfgaLuiRZjePojjsA+2Pyb83xS3Jm3kLn8xSkDGQKhDTW84eGt/LYzEPuz
+uYQgMCupVuo99fCLzxZh2zV0sPiM759hjSl0IPulX3o7HQ/vEsq9dCzXKLg2CYLj
+5MjqSuD3mqBg+m1ifIhkoiE/bHw+k1O3inp5JumVdcbXAIZkgNQlEqzr+4meIyD3
+c1gfEVx20DgSieeMPttxXxB19saljOtpLvY9JfbzhJ5SAJjyLsWIUabShyXrJwWv
+yQ==
+-----END CERTIFICATE-----
+</ca>
+key-direction 1
+<tls-auth>
+#
+# 2048 bit OpenVPN static key
+#
+-----BEGIN OpenVPN Static key V1-----
+91a3938f0e980ba5776328c65524fdad
+8debb91e248152a0bff30bd17fbd2f4f
+e69e95a4db9691a82655907959ef693b
+5dbb430527bf10b731d802050ae2258e
+217887cae173a8e00420c4b7cbbfa33d
+0cf0c0c469647ca6a07b15f2fac43716
+b4568d8dcc977ae7d2d8525efbc52fb1
+e5e300b0ac94e862d09e82b79318ae00
+aa8c63d684ceb52fe1860c5a472bbb40
+c7e265cd63509d899acf2ddbcb79adfb
+5534a08f7937515569d85080d2f1c830
+9a679335facdb4081669c1b152f9c445
+1c783f04610b4f629c06b5752a0bdde1
+7e50cef65fa040e19dd3f76b3494311e
+fc332cee2ae3835806e6c7647fda4ed5
+527aac5b009c00bacdaa405b57b2f3ab
+-----END OpenVPN Static key V1-----
+</tls-auth>
+
+redirect-gateway def1

+ 112 - 0
openvpn/rabbit2.ovpn

@@ -0,0 +1,112 @@
+
+client
+nobind
+dev tun
+remote-cert-tls server
+
+remote vpn.algometic.com 1194 udp
+
+<key>
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFHDBOBgkqhkiG9w0BBQ0wQTApBgkqhkiG9w0BBQwwHAQIlHEV+s+4MA4CAggA
+MAwGCCqGSIb3DQIJBQAwFAYIKoZIhvcNAwcECB7vCKEv0uFBBIIEyIaroK6Dy5FG
+BP7n2W7+Jt4Fbd1Kzfwlp0sQUGjQPPdt0aSTg8OkxYKNe1BAYZ8KEx0rp4rHzjX1
+op8QSx1B8RnPLdawX+v0LttCD2Gs2v+vUXndR1fZUJ7I5cPxtkPcMMdfOXeDaOzF
+Ozz275jXF2a86uEUMpw6A7qtaoDxzvEsRW42WuHzoeX6Eu8X2XuCXGoF68bI5gZy
+LRbqoSr8dMKW8G8VpS7FGey6JvjFRAk2ErAoRjMBtzDfKZetLN73FBKd6RWI9KMC
+JRf1aRRutW6v2K0RUMU2EX3qaUfGdYDlxxqknOar6BbvcgB4RroP+CP31PPNTcYs
+bpeLbSgkT9cVWB3V9U2tZsEvOJDsngJxL8wTfvY2nlDJdpwumEZMtp+OMiaUaits
+Qkyq8n6eDmfdbgaIubAiDYczG8El3bx2JuA6jDn5fAKSY9rMTkpULUSV24qGJPcj
+Rl6UG0Aj0FQxnFx/JatqF67kO37xp8rwO8/Zg+1sz64L828XPxWb2hBqsJ7s8l+K
+Ztk+pMJHMYdYPWa4hDw43tdZC3OiJzAtQhpiB5SS4M0r88b+shu8Kv0GA+kCUHs4
+tETit+LtA1FzIdGc/10VfwZanKss5821SoRyfhvelWwhlmBpDe6XuehgIMrt5wGM
+HWD4gny0jLkh8Fbnb6Sr3Dl/W3JqUVAEAs4byu3MPLQCQI8djHEv8YyNYRrgyf0T
+mKlIZtX3D5QLIGRxx9vDY0NW96jlaVBtZwKD9G91NAA4spXSK3r6OVIQfy0Nt48b
+Hh/KS5Jz9bvJbTbtMVVmzl+m09piBU1fkrDM4HgeYIxyNv43cEZgXNIJIr+mO6Fd
+n7SE7drd2uWxFhZugcscW5h0LfTT/6f3fmUPtK0BgHKWcocPbcPTYGx1vfulLluw
+XTkP4ARQJbjyeHgDV7USruTuPwUHVfDdl4zkCSYpJZ7v7Qzv0HPGZuvAmhPhc/y2
+C9+HrRRDbBCjUxQFQYx87AIcXM/oow3MNkNm6VlZnsVvjfwomHa8hBiRYJ9zQYBQ
+2ikc537yzORprGgTdlgQPQGOl/e4lr/MEyl0qMU70odfwdrH/4Jpw/09nUbaCcDJ
+oceVnewuwYBtGOWPxmm9sij3GstQB0TeeB7K4IX/DUO1XZ4u2/LSuUu0P3E+b20t
+8bjQFqcCu706mflHxDzDAmxga7kCBEQH3wNkxHmLJ38vRC5uFZPd7KXXUNLvxDOh
++Jc/zcW6glymCb1lS5kZ2e02uBLA0JP3dvwcbyJBjPHjNfS9Z34Gz30TlUu10IIp
+G76DeeeymdglKArRR50CBMcMF2UzesvIMSC77HmGuP7jazLujLDi4To7imz9ph0s
+xbJhgTz9fSbrRBet4XX+ArldluW1S9RI1jA2WDN2ubgtdovSK7jHn9KPFuxhMSKQ
+iNHgOCu5/WQJ5X4UD9ElV7eTcT9GHuYj0OUsz5fEFV84wui/Eeg0kbF0YJxIhm5e
+HrrMs2UJ3omguM8SoG3gn6oqWSpGGqYJ9t03cIgZd3UyedPy4ZdRowJRHPHmi0p3
+efLsRZR2HbsnozfEoHNXj4L4E1MWtG/TwfR1CFWbMuLfcXLrto8Z7BGx8S4VWTth
+jklT4vTUALxeXQ8WW5WFoA==
+-----END ENCRYPTED PRIVATE KEY-----
+</key>
+<cert>
+-----BEGIN CERTIFICATE-----
+MIIDYjCCAkqgAwIBAgIRAPKm1PColmv2CbU948CMzs8wDQYJKoZIhvcNAQELBQAw
+HDEaMBgGA1UEAwwRdnBuLmFsZ29tZXRpYy5jb20wHhcNMjExMjI0MDMwMzQ4WhcN
+MjQwMzI4MDMwMzQ4WjASMRAwDgYDVQQDDAdyYWJiaXQyMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEAvFRvCTig46hcBKQMy0GeP6LwVwFRIR4wrI0f5Y+G
+O/HlhusE59KeJlyn/rS5Ov+FqThOJ5hT8/CRXMWVQ4hwxBTZ3dC27Ory92o+eJld
+hfLTZk38bSJPOIPspqKtMjgqyXStUVCwuZF0GaSDdn1UFha9FKuMMNHIpSpz7S9Y
+StDRNj0Y6iIdZ3hdMcjNSGaGqAdZNnu6guYK7BPE1W4A43Cd3SRz83+7jEHhm8yE
+aW1rR1bjpEnAiwCXXrpg8yNLzuzRsZy/HM/ph7Y89fjPVJ13T80L9tUM4Afd1xXo
+2xN4CFQpu4abGmQdQymxCjppGG9nfXaVu9HSvy+GaHy+vQIDAQABo4GoMIGlMAkG
+A1UdEwQCMAAwHQYDVR0OBBYEFKg0UjAeVmgrsM7sJ+jI7gxnqd52MFcGA1UdIwRQ
+ME6AFLzutrumE7nkfqCQuyx20cj+l933oSCkHjAcMRowGAYDVQQDDBF2cG4uYWxn
+b21ldGljLmNvbYIUFZpXj4m+MBU0JYvoqCLtbg9BP+4wEwYDVR0lBAwwCgYIKwYB
+BQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBCwUAA4IBAQCyjKYUdFsS5SRt
+uRS2LGBNheou1K7s/lp9sQYEJ1AhSgEKtWLuYkUlf6hLAHJoWVL7SCzy9gAyCcNW
+uoo4c2ERf5WVhlgHMKJdvIZKsJBs/ihYhPWWd4nTUSq7dC3zuws2DCYMyoaNOCnw
+NsyKflBKlf+nUsyfJX42cxswgSWnOQqOIz8cXXtoMEqGw3ChyA226iambKbxrQAd
+dvbXUXn0S3B+hffkxH3HORn3+NOtCV81QQQyaI4zb3isnxfoe39wK0GRmdrvjFay
+l8UNRiLckrO8HiCjvocRGLYVyOLFC4RGon6JFyUKLOiiKHy04VDuz+o9oIomH9A1
+MjZDAttS
+-----END CERTIFICATE-----
+</cert>
+<ca>
+-----BEGIN CERTIFICATE-----
+MIIDXTCCAkWgAwIBAgIUFZpXj4m+MBU0JYvoqCLtbg9BP+4wDQYJKoZIhvcNAQEL
+BQAwHDEaMBgGA1UEAwwRdnBuLmFsZ29tZXRpYy5jb20wHhcNMjExMjIzMTcwMzQ0
+WhcNMzExMjIxMTcwMzQ0WjAcMRowGAYDVQQDDBF2cG4uYWxnb21ldGljLmNvbTCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPcQ5URlwTarZvgUEWSKjcWM
+BuFs8CEJ0fn+4jSA04l/KXzlaoiIY3zUL7imG6udRRSEKOWm9x5RTT+HmSQOSlRQ
+YN5JsCDn8hqrqqO+nYMQ860Q4QBTCzNfUjzBt6mjo8bHYZSFSMc9oG4vpitxDZ4i
+9kpxAi4lo6jz+4KSx5Vbtb7Fae+MicYnDm2ae9O08yaOiiTze6QytIfuP9lxaypl
+JGtvJ/rj/H7FpgCHa3ZYuRTHbIt5kmXoAHrt1grwHrunQ/joymOZXy/GhV2IE1PA
+1rc/3iiNg+eh99xC2V3HIkxXfHcooMt9peIt2i7do28IRUE3HAWiLQnI1LcJ8MEC
+AwEAAaOBljCBkzAdBgNVHQ4EFgQUvO62u6YTueR+oJC7LHbRyP6X3fcwVwYDVR0j
+BFAwToAUvO62u6YTueR+oJC7LHbRyP6X3fehIKQeMBwxGjAYBgNVBAMMEXZwbi5h
+bGdvbWV0aWMuY29tghQVmlePib4wFTQli+ioIu1uD0E/7jAMBgNVHRMEBTADAQH/
+MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAhOBWEIYM2wAPmRK8sZ5X
+ln4jz5EDymTdpHZSu+1DSLDexPRPvntz4Py6DTVtJhJFfl5x6BLwLijnMP8FntLY
+08iNMavfgaLuiRZjePojjsA+2Pyb83xS3Jm3kLn8xSkDGQKhDTW84eGt/LYzEPuz
+uYQgMCupVuo99fCLzxZh2zV0sPiM759hjSl0IPulX3o7HQ/vEsq9dCzXKLg2CYLj
+5MjqSuD3mqBg+m1ifIhkoiE/bHw+k1O3inp5JumVdcbXAIZkgNQlEqzr+4meIyD3
+c1gfEVx20DgSieeMPttxXxB19saljOtpLvY9JfbzhJ5SAJjyLsWIUabShyXrJwWv
+yQ==
+-----END CERTIFICATE-----
+</ca>
+key-direction 1
+<tls-auth>
+#
+# 2048 bit OpenVPN static key
+#
+-----BEGIN OpenVPN Static key V1-----
+91a3938f0e980ba5776328c65524fdad
+8debb91e248152a0bff30bd17fbd2f4f
+e69e95a4db9691a82655907959ef693b
+5dbb430527bf10b731d802050ae2258e
+217887cae173a8e00420c4b7cbbfa33d
+0cf0c0c469647ca6a07b15f2fac43716
+b4568d8dcc977ae7d2d8525efbc52fb1
+e5e300b0ac94e862d09e82b79318ae00
+aa8c63d684ceb52fe1860c5a472bbb40
+c7e265cd63509d899acf2ddbcb79adfb
+5534a08f7937515569d85080d2f1c830
+9a679335facdb4081669c1b152f9c445
+1c783f04610b4f629c06b5752a0bdde1
+7e50cef65fa040e19dd3f76b3494311e
+fc332cee2ae3835806e6c7647fda4ed5
+527aac5b009c00bacdaa405b57b2f3ab
+-----END OpenVPN Static key V1-----
+</tls-auth>
+
+redirect-gateway def1

+ 1 - 0
stackedit/.env

@@ -0,0 +1 @@
+DATA_ROOT=/home/tuffy/docker/Dockers_dev/stackedit

+ 1 - 0
stackedit/.gitignore

@@ -0,0 +1 @@
+files/

+ 24 - 0
stackedit/config/default.d/10-bitnami.ini

@@ -0,0 +1,24 @@
+[couchdb]
+database_dir=/bitnami/couchdb/data
+view_index_dir=/bitnami/couchdb/data
+
+[chttpd]
+port=5984
+bind_address=0.0.0.0
+require_valid_user=true
+WWW-Authenticate=Basic realm="Welcome to Stackedit Couchdb"
+
+[httpd]
+; Prevent changes in the configuration via the config API so the changes are not lost after a restart/redeploy
+config_whitelist=[{httpd,config_whitelist}]
+WWW-Authenticate=Basic realm="administrator"
+enable_cors=true
+
+[couch_httpd_auth]
+require_valid_user=true
+
+[cors]
+# origins=https://stackedit.io
+origins=*
+credentials=true
+

+ 11 - 0
stackedit/config/default.d/README

@@ -0,0 +1,11 @@
+CouchDB default configuration files
+
+Files found under the etc/default.d directory that end with .ini are
+parsed within couchdb(1) at startup.
+
+This directory is intended for distribution-specific overrides of
+CouchDB defaults. Package maintainers should be placing overrides in
+this directory.
+
+System administrator should place overrides in the etc/local.d directory
+instead.

+ 720 - 0
stackedit/config/default.ini

@@ -0,0 +1,720 @@
+; Upgrading CouchDB will overwrite this file.
+[vendor]
+name = The Apache Software Foundation
+
+[couchdb]
+uuid = 
+database_dir = ./data
+view_index_dir = ./data
+; util_driver_dir =
+; plugin_dir =
+;os_process_timeout = 5000 ; 5 seconds. for view servers.
+
+; Maximum number of .couch files to open at once.
+; The actual limit may be slightly lower depending on how
+; many schedulers you have as the allowance is divided evenly
+; among them.
+;max_dbs_open = 500
+
+; Method used to compress everything that is appended to database and view index files, except
+; for attachments (see the attachments section). Available methods are:
+;
+; none         - no compression
+; snappy       - use google snappy, a very fast compressor/decompressor
+; deflate_N    - use zlib's deflate, N is the compression level which ranges from 1 (fastest,
+;                lowest compression ratio) to 9 (slowest, highest compression ratio)
+;file_compression = snappy
+; Higher values may give better read performance due to less read operations
+; and/or more OS page cache hits, but they can also increase overall response
+; time for writes when there are many attachment write requests in parallel.
+;attachment_stream_buffer_size = 4096
+; Default security object for databases if not explicitly set
+; everyone - same as couchdb 1.0, everyone can read/write
+; admin_only - only admins can read/write
+; admin_local - sharded dbs on :5984 are read/write for everyone,
+;               local dbs on :5986 are read/write for admins only
+;default_security = admin_only
+; btree_chunk_size = 1279
+; maintenance_mode = false
+; stem_interactive_updates = true
+; uri_file =
+; The speed of processing the _changes feed with doc_ids filter can be
+; influenced directly with this setting - increase for faster processing at the
+; expense of more memory usage.
+;changes_doc_ids_optimization_threshold = 100
+; Maximum document ID length. Can be set to an integer or 'infinity'.
+;max_document_id_length = infinity
+;
+; Limit maximum document size. Requests to create / update documents with a body
+; size larger than this will fail with a 413 http error. This limit applies to
+; requests which update a single document as well as individual documents from
+; a _bulk_docs request. The size limit is approximate due to the nature of JSON
+; encoding.
+;max_document_size = 8000000 ; bytes
+;
+; Maximum attachment size.
+; max_attachment_size = 1073741824 ; 1 gibibyte
+;
+; Do not update the least recently used DB cache on reads, only writes
+;update_lru_on_read = false
+;
+; The default storage engine to use when creating databases
+; is set as a key into the [couchdb_engines] section.
+;default_engine = couch
+;
+; Enable this to only "soft-delete" databases when DELETE /{db} requests are
+; made. This will place a .recovery directory in your data directory and
+; move deleted databases/shards there instead. You can then manually delete
+; these files later, as desired.
+;enable_database_recovery = false
+;
+; Set the maximum size allowed for a partition. This helps users avoid
+; inadvertently abusing partitions resulting in hot shards. The default
+; is 10GiB. A value of 0 or less will disable partition size checks.
+;max_partition_size = 10737418240
+;
+; When true, system databases _users and _replicator are created immediately
+; on startup if not present.
+;single_node = false
+
+; Allow edits on the _security object in the user db. By default, it's disabled.
+;users_db_security_editable = false
+
+[purge]
+; Allowed maximum number of documents in one purge request
+;max_document_id_number = 100
+;
+; Allowed maximum number of accumulated revisions in one purge request
+;max_revisions_number = 1000
+;
+; Allowed durations when index is not updated for local purge checkpoint
+; document. Default is 24 hours.
+;index_lag_warn_seconds = 86400
+
+[couchdb_engines]
+; The keys in this section are the filename extension that
+; the specified engine module will use. This is important so
+; that couch_server is able to find an existing database without
+; having to ask every configured engine.
+couch = couch_bt_engine
+
+[process_priority]
+; Selectively disable altering process priorities for modules that request it.
+; * NOTE: couch_server priority has been shown to lead to CouchDB hangs and
+;     failures on Erlang releases 21.0 - 21.3.8.12 and 22.0 -> 22.2.4. Do not
+;     enable when running with those versions.
+;couch_server = false
+
+[cluster]
+;q=2
+;n=3
+; placement = metro-dc-a:2,metro-dc-b:1
+
+; Supply a comma-delimited list of node names that this node should
+; contact in order to join a cluster. If a seedlist is configured the ``_up``
+; endpoint will return a 404 until the node has successfully contacted at
+; least one of the members of the seedlist and replicated an up-to-date copy
+; of the ``_nodes``, ``_dbs``, and ``_users`` system databases.
+; seedlist = couchdb@node1.example.com,couchdb@node2.example.com
+
+[chttpd]
+; These settings affect the main, clustered port (5984 by default).
+port = 5984
+bind_address = 127.0.0.1
+;backlog = 512
+;socket_options = [{sndbuf, 262144}, {nodelay, true}]
+;server_options = [{recbuf, undefined}]
+;require_valid_user = false
+; require_valid_user_except_for_up = false
+; List of headers that will be kept when the header Prefer: return=minimal is included in a request.
+; If Server header is left out, Mochiweb will add its own one in.
+;prefer_minimal = Cache-Control, Content-Length, Content-Range, Content-Type, ETag, Server, Transfer-Encoding, Vary
+;
+; Limit maximum number of databases when tying to get detailed information using
+; _dbs_info in a request
+;max_db_number_for_dbs_info_req = 100
+
+; set to true to delay the start of a response until the end has been calculated
+;buffer_response = false
+
+; authentication handlers
+; authentication_handlers = {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
+; uncomment the next line to enable proxy authentication
+; authentication_handlers = {chttpd_auth, proxy_authentication_handler}, {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
+; uncomment the next line to enable JWT authentication
+; authentication_handlers = {chttpd_auth, jwt_authentication_handler}, {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
+
+; prevent non-admins from accessing /_all_dbs
+; admin_only_all_dbs = true
+
+; These options are moved from [httpd]
+;secure_rewrites = true
+;allow_jsonp = false
+
+;enable_cors = false
+;enable_xframe_options = false
+
+; CouchDB can optionally enforce a maximum uri length;
+;max_uri_length = 8000
+
+;changes_timeout = 60000
+;config_whitelist =
+;rewrite_limit = 100
+;x_forwarded_host = X-Forwarded-Host
+;x_forwarded_proto = X-Forwarded-Proto
+;x_forwarded_ssl = X-Forwarded-Ssl
+
+; Maximum allowed http request size. Applies to both clustered and local port.
+;max_http_request_size = 4294967296 ; 4GB
+
+; Set to true to decode + to space in db and doc_id parts.
+; decode_plus_to_space = true
+
+;[jwt_auth]
+; List of claims to validate
+; can be the name of a claim like "exp" or a tuple if the claim requires
+; a parameter
+; required_claims = exp, {iss, "IssuerNameHere"}
+; roles_claim_name = https://example.com/roles
+;
+; [jwt_keys]
+; Configure at least one key here if using the JWT auth handler.
+; If your JWT tokens do not include a "kid" attribute, use "_default"
+; as the config key, otherwise use the kid as the config key.
+; Examples
+; hmac:_default = aGVsbG8=
+; hmac:foo = aGVsbG8=
+; The config values can represent symmetric and asymmetrics keys.
+; For symmetrics keys, the value is base64 encoded;
+; hmac:_default = aGVsbG8= # base64-encoded form of "hello"
+; For asymmetric keys, the value is the PEM encoding of the public
+; key with newlines replaced with the escape sequence \n.
+; rsa:foo = -----BEGIN PUBLIC KEY-----\nMIIBIjAN...IDAQAB\n-----END PUBLIC KEY-----\n
+; ec:bar = -----BEGIN PUBLIC KEY-----\nMHYwEAYHK...AzztRs\n-----END PUBLIC KEY-----\n
+
+[couch_peruser]
+; If enabled, couch_peruser ensures that a private per-user database
+; exists for each document in _users. These databases are writable only
+; by the corresponding user. Databases are in the following form:
+; userdb-{hex encoded username}
+;enable = false
+; If set to true and a user is deleted, the respective database gets
+; deleted as well.
+;delete_dbs = false
+; Set a default q value for peruser-created databases that is different from
+; cluster / q
+;q = 1
+; prefix for user databases. If you change this after user dbs have been
+; created, the existing databases won't get deleted if the associated user
+; gets deleted because of the then prefix mismatch.
+;database_prefix = userdb-
+
+[httpd]
+port = 5986
+bind_address = 127.0.0.1
+;authentication_handlers = {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
+
+; Options for the MochiWeb HTTP server.
+;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
+; For more socket options, consult Erlang's module 'inet' man page.
+;socket_options = [{recbuf, undefined}, {sndbuf, 262144}, {nodelay, true}]
+;socket_options = [{sndbuf, 262144}]
+
+; These settings were moved to [chttpd]
+; secure_rewrites, allow_jsonp, enable_cors, enable_xframe_options,
+; max_uri_length, changes_timeout, config_whitelist, rewrite_limit,
+; x_forwarded_host, x_forwarded_proto, x_forwarded_ssl, max_http_request_size
+
+; [httpd_design_handlers]
+; _view = 
+
+; [ioq]
+; concurrency = 10
+; ratio = 0.01
+
+[ssl]
+;port = 6984
+
+[chttpd_auth]
+;authentication_db = _users
+
+; These options are moved from [couch_httpd_auth]
+;authentication_redirect = /_utils/session.html
+;require_valid_user = false
+;timeout = 600 ; number of seconds before automatic logout
+;auth_cache_size = 50 ; size is number of cache entries
+;allow_persistent_cookies = true ; set to false to disallow persistent cookies
+;iterations = 10 ; iterations for password hashing
+;min_iterations = 1
+;max_iterations = 1000000000
+;password_scheme = pbkdf2
+; List of Erlang RegExp or tuples of RegExp and an optional error message.
+; Where a new password must match all RegExp.
+; Example: [{".{10,}", "Password min length is 10 characters."}, "\\d+"]
+;password_regexp = []
+;proxy_use_secret = false
+; comma-separated list of public fields, 404 if empty
+;public_fields =
+;secret =
+;users_db_public = false
+;cookie_domain = example.com
+; Set the SameSite cookie property for the auth cookie. If empty, the SameSite property is not set.
+;same_site =
+
+; [chttpd_auth_cache]
+; max_lifetime = 600000
+; max_objects = 
+; max_size = 104857600
+
+; [mem3]
+; nodes_db = _nodes
+; shard_cache_size = 25000
+; shards_db = _dbs
+; sync_concurrency = 10
+
+; [fabric]
+; all_docs_concurrency = 10
+; changes_duration = 
+; shard_timeout_factor = 2
+; shard_timeout_min_msec = 100
+; uuid_prefix_len = 7
+; request_timeout = 60000
+; all_docs_timeout = 10000
+; attachments_timeout = 60000
+; view_timeout = 3600000
+; partition_view_timeout = 3600000
+
+; [rexi]
+; buffer_count = 2000
+; server_per_node = true
+; stream_limit = 5
+;
+; Use a single message to kill a group of remote workers This is
+; mostly is an upgrade clause to allow operating in a mixed cluster of
+; 2.x and 3.x nodes. After upgrading switch to true to save some
+; network bandwidth
+;use_kill_all = false
+
+; [global_changes]
+; max_event_delay = 25
+; max_write_delay = 500
+; update_db = true
+
+; [view_updater]
+; min_writer_items = 100
+; min_writer_size = 16777216
+
+[couch_httpd_auth]
+; WARNING! This only affects the node-local port (5986 by default).
+; You probably want the settings under [chttpd].
+authentication_db = _users
+
+; These settings were moved to [chttpd_auth]
+; authentication_redirect, require_valid_user, timeout,
+; auth_cache_size, allow_persistent_cookies, iterations, min_iterations,
+; max_iterations, password_scheme, password_regexp, proxy_use_secret,
+; public_fields, secret, users_db_public, cookie_domain, same_site
+
+; CSP (Content Security Policy) Support
+[csp]
+;utils_enable = true
+;utils_header_value = default-src 'self'; img-src 'self'; font-src *; script-src 'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';
+;attachments_enable = true
+;attachments_header_value = sandbox
+;showlist_enable = true
+;showlist_header_value = sandbox
+
+[cors]
+;credentials = false
+; List of origins separated by a comma, * means accept all
+; Origins must include the scheme: http://example.com
+; You can't set origins: * and credentials = true at the same time.
+;origins = *
+; List of accepted headers separated by a comma
+; headers =
+; List of accepted methods
+; methods =
+
+; Configuration for a vhost
+;[cors:http://example.com]
+; credentials = false
+; List of origins separated by a comma
+; Origins must include the scheme: http://example.com
+; You can't set origins: * and credentials = true at the same time.
+;origins =
+; List of accepted headers separated by a comma
+; headers =
+; List of accepted methods
+; methods =
+
+; Configuration for the design document cache
+;[ddoc_cache]
+; The maximum size of the cache in bytes
+;max_size = 104857600 ; 100MiB
+; The period each cache entry should wait before
+; automatically refreshing in milliseconds
+;refresh_timeout = 67000
+
+[x_frame_options]
+; Settings same-origin will return X-Frame-Options: SAMEORIGIN.
+; If same origin is set, it will ignore the hosts setting
+; same_origin = true
+; Settings hosts will return X-Frame-Options: ALLOW-FROM https://example.com/
+; List of hosts separated by a comma. * means accept all
+; hosts =
+
+[native_query_servers]
+; erlang query server
+; enable_erlang_query_server = false
+
+; Changing reduce_limit to false will disable reduce_limit.
+; If you think you're hitting reduce_limit with a "good" reduce function,
+; please let us know on the mailing list so we can fine tune the heuristic.
+[query_server_config]
+; commit_freq = 5
+;reduce_limit = true
+;os_process_limit = 100
+; os_process_idle_limit = 300
+; os_process_soft_limit = 100
+; Timeout for how long a response from a busy view group server can take.
+; "infinity" is also a valid configuration value.
+;group_info_timeout = 5000
+;query_limit = 268435456
+;partition_query_limit = 268435456
+
+[mango]
+; Set to true to disable the "index all fields" text index, which can lead
+; to out of memory issues when users have documents with nested array fields.
+;index_all_disabled = false
+; Default limit value for mango _find queries.
+;default_limit = 25
+; Ratio between documents scanned and results matched that will
+; generate a warning in the _find response. Setting this to 0 disables
+; the warning.
+;index_scan_warning_threshold = 10
+
+[indexers]
+couch_mrview = true
+
+[feature_flags]
+; This enables any database to be created as a partitioned databases (except system db's). 
+; Setting this to false will stop the creation of paritioned databases.
+; paritioned||allowed* = true will scope the creation of partitioned databases
+; to databases with 'allowed' prefix.
+partitioned||* = true
+
+[uuids]
+; Known algorithms:
+;   random - 128 bits of random awesome
+;     All awesome, all the time.
+;   sequential - monotonically increasing ids with random increments
+;     First 26 hex characters are random. Last 6 increment in
+;     random amounts until an overflow occurs. On overflow, the
+;     random prefix is regenerated and the process starts over.
+;   utc_random - Time since Jan 1, 1970 UTC with microseconds
+;     First 14 characters are the time in hex. Last 18 are random.
+;   utc_id - Time since Jan 1, 1970 UTC with microseconds, plus utc_id_suffix string
+;     First 14 characters are the time in hex. uuids/utc_id_suffix string value is appended to these.
+;algorithm = sequential
+; The utc_id_suffix value will be appended to uuids generated by the utc_id algorithm.
+; Replicating instances should have unique utc_id_suffix values to ensure uniqueness of utc_id ids.
+;utc_id_suffix =
+# Maximum number of UUIDs retrievable from /_uuids in a single request
+;max_count = 1000
+
+[attachments]
+;compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
+;compressible_types = text/*, application/javascript, application/json, application/xml
+
+[replicator]
+; Random jitter applied on replication job startup (milliseconds)
+;startup_jitter = 5000
+; Number of actively running replications
+;max_jobs = 500
+;Scheduling interval in milliseconds. During each reschedule cycle
+;interval = 60000
+; Maximum number of replications to start and stop during rescheduling.
+;max_churn = 20
+; More worker processes can give higher network throughput but can also
+; imply more disk and network IO.
+;worker_processes = 4
+; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
+; also reduce the total amount of used RAM memory.
+;worker_batch_size = 500
+; Maximum number of HTTP connections per replication.
+;http_connections = 20
+; HTTP connection timeout per replication.
+; Even for very fast/reliable networks it might need to be increased if a remote
+; database is too busy.
+;connection_timeout = 30000
+; Request timeout
+;request_timeout = infinity
+; If a request fails, the replicator will retry it up to N times.
+;retries_per_request = 5
+; Use checkpoints
+;use_checkpoints = true
+; Checkpoint interval
+;checkpoint_interval = 30000
+; Some socket options that might boost performance in some scenarios:
+;       {nodelay, boolean()}
+;       {sndbuf, integer()}
+;       {recbuf, integer()}
+;       {priority, integer()}
+; See the `inet` Erlang module's man page for the full list of options.
+;socket_options = [{keepalive, true}, {nodelay, false}]
+; Path to a file containing the user's certificate.
+;cert_file = /full/path/to/server_cert.pem
+; Path to file containing user's private PEM encoded key.
+;key_file = /full/path/to/server_key.pem
+; String containing the user's password. Only used if the private keyfile is password protected.
+;password = somepassword
+; Set to true to validate peer certificates.
+;verify_ssl_certificates = false
+; File containing a list of peer trusted certificates (in the PEM format).
+;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
+; Maximum peer certificate depth (must be set even if certificate validation is off).
+;ssl_certificate_max_depth = 3
+; Maximum document ID length for replication.
+;max_document_id_length = infinity
+; How much time to wait before retrying after a missing doc exception. This
+; exception happens if the document was seen in the changes feed, but internal
+; replication hasn't caught up yet, and fetching document's revisions
+; fails. This a common scenario when source is updated while continous
+; replication is running. The retry period would depend on how quickly internal
+; replication is expected to catch up. In general this is an optimisation to
+; avoid crashing the whole replication job, which would consume more resources
+; and add log noise.
+;missing_doc_retry_msec = 2000
+; Wait this many seconds after startup before attaching changes listeners
+; cluster_start_period = 5
+; Re-check cluster state at least every cluster_quiet_period seconds
+; cluster_quiet_period = 60
+
+; List of replicator client authentication plugins to try. Plugins will be
+; tried in order. The first to initialize successfully will be used for that
+; particular endpoint (source or target). Normally couch_replicator_auth_noop
+; would be used at the end of the list as a "catch-all". It doesn't do anything
+; and effectively implements the previous behavior of using basic auth.
+; There are currently two plugins available:
+;   couch_replicator_auth_session - use _session cookie authentication
+;   couch_replicator_auth_noop - use basic authentication (previous default)
+; Currently, the new _session cookie authentication is tried first, before
+; falling back to the old basic authenticaion default:
+;auth_plugins = couch_replicator_auth_session,couch_replicator_auth_noop
+; To restore the old behaviour, use the following value:
+;auth_plugins = couch_replicator_auth_noop
+
+; Force couch_replicator_auth_session plugin to refresh the session
+; periodically if max-age is not present in the cookie. This is mostly to
+; handle the case where anonymous writes are allowed to the database and a VDU
+; function is used to forbid writes based on the authenticated user name. In
+; that case this value should be adjusted based on the expected minimum session
+; expiry timeout on replication endpoints. If session expiry results in a 401
+; or 403 response this setting is not needed.
+;session_refresh_interval_sec = 550
+
+; Usage coefficient decays historic fair share usage every scheduling
+; cycle. The value must be between 0.0 and 1.0. Lower values will
+; ensure historic usage decays quicker and higher values means it will
+; be remembered longer.
+;usage_coeff = 0.5
+
+; Priority coefficient decays all the job priorities such that they slowly
+; drift towards the front of the run queue. This coefficient defines a maximum
+; time window over which this algorithm would operate. For example, if this
+; value is too small (0.1), after a few cycles quite a few jobs would end up at
+; priority 0, and would render this algorithm useless. The default value of
+; 0.98 is picked such that if a job ran for one scheduler cycle, then didn't
+; get to run for 7 hours, it would still have priority > 0. 7 hours was picked
+; as it was close enought to 8 hours which is the default maximum error backoff
+; interval.
+;priority_coeff = 0.98
+
+
+[replicator.shares]
+; Fair share configuration section. More shares result in a higher
+; chance that jobs from that db get to run. The default value is 100,
+; minimum is 1 and maximum is 1000. The configuration may be set even
+; if the database does not exit.
+;_replicator = 100
+
+
+[log]
+; Possible log levels:
+;  debug
+;  info
+;  notice
+;  warning, warn
+;  error, err
+;  critical, crit
+;  alert
+;  emergency, emerg
+;  none
+;
+;level = info
+;
+; Set the maximum log message length in bytes that will be
+; passed through the writer
+;
+; max_message_size = 16000
+;
+; Do not log last message received by terminated process
+; strip_last_msg = true
+;
+; List of fields to remove before logging the crash report
+; filter_fields = [pid, registered_name, error_info, messages]
+;
+; There are four different log writers that can be configured
+; to write log messages. The default writes to stderr of the
+; Erlang VM which is useful for debugging/development as well
+; as a lot of container deployments.
+;
+; There's also a file writer that works with logrotate, a
+; rsyslog writer for deployments that need to have logs sent
+; over the network, and a journald writer that's more suitable
+; when using systemd journald.
+;
+;writer = stderr
+; Journald Writer notes:
+;
+; The journald writer doesn't have any options. It still writes
+; the logs to stderr, but without the timestamp prepended, since
+; the journal will add it automatically, and with the log level
+; formated as per
+; https://www.freedesktop.org/software/systemd/man/sd-daemon.html
+;
+;
+; File Writer Options:
+;
+; The file writer will check every 30s to see if it needs
+; to reopen its file. This is useful for people that configure
+; logrotate to move log files periodically.
+;
+; file = ./couch.log ; Path name to write logs to
+;
+; Write operations will happen either every write_buffer bytes
+; or write_delay milliseconds. These are passed directly to the
+; Erlang file module with the write_delay option documented here:
+;
+;     http://erlang.org/doc/man/file.html
+;
+; write_buffer = 0
+; write_delay = 0
+;
+;
+; Syslog Writer Options:
+;
+; The syslog writer options all correspond to their obvious
+; counter parts in rsyslog nomenclature.
+;
+; syslog_host =
+; syslog_port = 514
+; syslog_appid = couchdb
+; syslog_facility = local2
+
+[stats]
+; Stats collection interval in seconds. Default 10 seconds.
+;interval = 10
+
+[smoosh]
+;
+; More documentation on these is in the Automatic Compaction
+; section of the documentation.
+;
+;db_channels = upgrade_dbs,ratio_dbs,slack_dbs
+;view_channels = upgrade_views,ratio_views,slack_views
+;
+;[smoosh.ratio_dbs]
+;priority = ratio
+;min_priority = 2.0
+;
+;[smoosh.ratio_views]
+;priority = ratio
+;min_priority = 2.0
+;
+;[smoosh.slack_dbs]
+;priority = slack
+;min_priority = 536870912
+;
+;[smoosh.slack_views]
+;priority = slack
+;min_priority = 536870912
+
+[ioq]
+; The maximum number of concurrent in-flight IO requests that
+;concurrency = 10
+
+; The fraction of the time that a background IO request will be selected
+; over an interactive IO request when both queues are non-empty
+;ratio = 0.01
+
+[ioq.bypass]
+; System administrators can choose to submit specific classes of IO directly
+; to the underlying file descriptor or OS process, bypassing the queues
+; altogether. Installing a bypass can yield higher throughput and lower
+; latency, but relinquishes some control over prioritization. The following
+; classes are recognized with the following defaults:
+
+; Messages on their way to an external process (e.g., couchjs) are bypassed
+;os_process = true
+
+; Disk IO fulfilling interactive read requests is bypassed
+;read = true
+
+; Disk IO required to update a database is bypassed
+;write = true
+
+; Disk IO required to update views and other secondary indexes is bypassed
+;view_update = true
+
+; Disk IO issued by the background replication processes that fix any
+; inconsistencies between shard copies is queued
+;shard_sync = false
+
+; Disk IO issued by compaction jobs is queued
+;compaction = false
+
+[dreyfus]
+; The name and location of the Clouseau Java service required to
+; enable Search functionality.
+; name = clouseau@127.0.0.1
+
+; CouchDB will try to re-connect to Clouseau using a bounded
+; exponential backoff with the following number of iterations.
+; retry_limit = 5
+
+; The default number of results returned from a global search query.
+; limit = 25
+
+; The default number of results returned from a search on a partition
+; of a database.
+; limit_partitions = 2000
+ 
+; The maximum number of results that can be returned from a global
+; search query (or any search query on a database without user-defined
+; partitions). Attempts to set ?limit=N higher than this value will
+; be rejected.
+; max_limit = 200
+
+; The maximum number of results that can be returned when searching
+; a partition of a database. Attempts to set ?limit=N higher than this
+; value will be rejected. If this config setting is not defined,
+; CouchDB will use the value of `max_limit` instead. If neither is
+; defined, the default is 2000 as stated here.
+; max_limit_partitions = 2000
+
+[reshard]
+;max_jobs = 48
+;max_history = 20
+;max_retries = 1
+;retry_interval_sec = 10
+;delete_source = true
+;update_shard_map_timeout_sec = 60
+;source_close_timeout_sec = 600
+;require_node_param = false
+;require_range_param = false
+
+[prometheus]
+additional_port = false
+bind_address = 127.0.0.1
+port = 17986

+ 8 - 0
stackedit/config/local.d/README

@@ -0,0 +1,8 @@
+CouchDB local configuration files
+
+Files found under the etc/local.d directory that end with .ini are parsed
+within couchdb(1) at startup.
+
+This directory is intended for system administrator overrides of CouchDB
+defaults. Package maintainers should be placing overrides in the
+etc/default.d directory instead.

+ 94 - 0
stackedit/config/local.ini

@@ -0,0 +1,94 @@
+; CouchDB Configuration Settings
+; Custom settings should be made in this file. They will override settings
+; in default.ini, but unlike changes made to default.ini, this file won't be
+; overwritten on server upgrade.
+[couchdb]
+; max_document_size = 4294967296 ; bytes
+; os_process_timeout = 5000
+uuid=97c3f18131cf41abf2725f8d3cc81c41
+
+[couch_peruser]
+
+; If enabled, couch_peruser ensures that a private per-user database
+; exists for each document in _users. These databases are writable only
+; by the corresponding user. Databases are in the following form:
+; userdb-{hex encoded username}
+; enable = true
+; If set to true and a user is deleted, the respective database gets
+; deleted as well.
+; delete_dbs = true
+; Set a default q value for peruser-created databases that is different from
+; cluster / q
+; q = 1
+[chttpd]
+
+; port = 5984
+; bind_address = 127.0.0.1
+; Options for the MochiWeb HTTP server.
+; server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
+; For more socket options, consult Erlang's module 'inet' man page.
+; socket_options = [{sndbuf, 262144}, {nodelay, true}]
+[httpd]
+
+; NOTE that this only configures the "backend" node-local port, not the
+; "frontend" clustered port. You probably don't want to change anything in
+; this section.
+; Uncomment next line to trigger basic-auth popup on unauthorized requests.
+; WWW-Authenticate = Basic realm="administrator"
+; Uncomment next line to set the configuration modification whitelist. Only
+; whitelisted values may be changed via the /_config URLs. To allow the admin
+; to change this value over HTTP, remember to include {httpd,config_whitelist}
+; itself. Excluding it from the list would require editing this file to update
+; the whitelist.
+; config_whitelist = [{httpd,config_whitelist}, {log,level}, {etc,etc}]
+[chttpd_auth]
+; If you set this to true, you should also uncomment the WWW-Authenticate line
+; above. If you don't configure a WWW-Authenticate header, CouchDB will send
+; Basic realm="server" in order to prevent you getting logged out.
+; require_valid_user = false
+secret=e7d00ef78460ab2a05b6db6fdafc5754
+
+[ssl]
+
+; enable = true
+; cert_file = /full/path/to/server_cert.pem
+; key_file = /full/path/to/server_key.pem
+; password = somepassword
+; set to true to validate peer certificates
+; verify_ssl_certificates = false
+; Set to true to fail if the client does not send a certificate. Only used if verify_ssl_certificates is true.
+; fail_if_no_peer_cert = false
+; Path to file containing PEM encoded CA certificates (trusted
+; certificates used for verifying a peer certificate). May be omitted if
+; you do not want to verify the peer.
+; cacert_file = /full/path/to/cacertf
+; The verification fun (optional) if not specified, the default
+; verification fun will be used.
+; verify_fun = {Module, VerifyFun}
+; maximum peer certificate depth
+; ssl_certificate_max_depth = 1
+; 
+; Reject renegotiations that do not live up to RFC 5746.
+; secure_renegotiate = true
+; The cipher suites that should be supported.
+; Can be specified in erlang format "{ecdhe_ecdsa,aes_128_cbc,sha256}"
+; or in OpenSSL format "ECDHE-ECDSA-AES128-SHA256".
+; ciphers = ["ECDHE-ECDSA-AES128-SHA256", "ECDHE-ECDSA-AES128-SHA"]
+; The SSL/TLS versions to support
+; tls_versions = [tlsv1, 'tlsv1.1', 'tlsv1.2']
+; To enable Virtual Hosts in CouchDB, add a vhost = path directive. All requests to
+; the Virual Host will be redirected to the path. In the example below all requests
+; to http://example.com/ are redirected to /database.
+; If you run CouchDB on a specific port, include the port number in the vhost:
+; example.com:5984 = /database
+[vhosts]
+
+; example.com = /database/
+; To create an admin account uncomment the '[admins]' section below and add a
+; line in the format 'username = password'. When you next start CouchDB, it
+; will change the password to a hash (so that your passwords don't linger
+; around in plain-text files). You can add more admin accounts with more
+; 'username = password' lines. Don't forget to restart CouchDB after
+; changing this.
+[admins]
+admin = -pbkdf2-f39d3e67a1133fcb3306c7b860e7635f61a1f46c,67bdb51d7cfd6c896714b59a0212f329,10

+ 99 - 0
stackedit/config/vm.args

@@ -0,0 +1,99 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# Each node in the system must have a unique name. These are specified through
+# the Erlang -name flag, which takes the form:
+#
+#    -name nodename@<FQDN>
+#
+# or
+#
+#    -name nodename@<IP-ADDRESS>
+#
+# CouchDB recommends the following values for this flag:
+#
+# 1. If this is a single node, not in a cluster, use:
+#    -name couchdb@127.0.0.1
+#
+# 2. If DNS is configured for this host, use the FQDN, such as:
+#    -name couchdb@my.host.domain.com
+#
+# 3. If DNS isn't configured for this host, use IP addresses only, such as:
+#    -name couchdb@192.168.0.1
+#
+# Do not rely on tricks with /etc/hosts or libresolv to handle anything
+# other than the above 3 approaches correctly. They will not work reliably.
+#
+# Multiple CouchDBs running on the same machine can use couchdb1@, couchdb2@,
+# etc.
+-name couchdb@127.0.0.1
+
+# All nodes must share the same magic cookie for distributed Erlang to work.
+# Comment out this line if you synchronized the cookies by other means (using
+# the ~/.erlang.cookie file, for example).
+-setcookie monster
+
+# Tell kernel and SASL not to log anything
+-kernel error_logger silent
+-sasl sasl_error_logger false
+
+# Use kernel poll functionality if supported by emulator
++K true
+
+# Start a pool of asynchronous IO threads
++A 16
+
+# Comment this line out to enable the interactive Erlang shell on startup
++Bd -noinput
+
+# Force use of the smp scheduler, fixes #1296
+-smp enable
+
+# Set maximum SSL session lifetime to reap terminated replication readers
+-ssl session_lifetime 300
+
+## TLS Distribution
+## Use TLS for connections between Erlang cluster members.
+## http://erlang.org/doc/apps/ssl/ssl_distribution.html
+##
+## Generate Cert(PEM) File
+## This is just an example command to generate a certfile (PEM).
+## This is not an endorsement of specific expiration limits, key sizes, or algorithms.
+##    $ openssl req -newkey rsa:2048 -new -nodes -x509 -days 3650 -keyout key.pem -out cert.pem
+##    $ cat key.pem cert.pem > dev/erlserver.pem && rm key.pem cert.pem
+##
+## Generate a Config File (couch_ssl_dist.conf)
+##    [{server,
+##      [{certfile, "</path/to/erlserver.pem>"},
+##       {secure_renegotiate, true}]},
+##     {client,
+##      [{secure_renegotiate, true}]}].
+##
+## CouchDB recommends the following values for no_tls flag:
+## 1. Use TCP only, set to true, such as:
+##      -couch_dist no_tls true
+## 2. Use TLS only, set to false, such as:
+##      -couch_dist no_tls false
+## 3. Specify which node to use TCP, such as:
+##      -couch_dist no_tls \"*@127.0.0.1\"
+##
+## To ensure search works, make sure to set 'no_tls' option for the clouseau node.
+## By default that would be "clouseau@127.0.0.1".
+## Don't forget to override the paths to point to your certificate(s) and key(s)!
+##
+#-proto_dist couch
+#-couch_dist no_tls \"clouseau@127.0.0.1\"
+#-ssl_dist_optfile <path/to/couch_ssl_dist.conf>
+
+# Set a well-known cluster port
+-kernel inet_dist_listen_min 9100
+-kernel inet_dist_listen_max 9100

+ 49 - 0
stackedit/docker-compose.yml

@@ -0,0 +1,49 @@
+version: '2'
+services:
+  couchdb:
+    image: docker.io/bitnami/couchdb:3
+    environment:
+      - COUCHDB_PASSWORD=couchdb
+    ports:
+      - '5123:5984'
+      - '4369:4369'
+      - '9100:9100'
+    volumes:
+      - couchdb_data:/bitnami/couchdb
+      - couchdb_config:/opt/bitnami/couchdb/etc/
+
+
+  stackedit:
+    image: qmcgaw/stackedit
+    container_name: stackedit
+    environment:
+      - LISTENING_PORT=8000
+      - ROOT_URL=/
+      - USER_BUCKET_NAME=stackedit-users
+      - PAYPAL_RECEIVER_EMAIL=
+      - DROPBOX_APP_KEY=
+      - DROPBOX_APP_KEY_FULL=
+      - GITHUB_CLIENT_ID=
+      - GITHUB_CLIENT_SECRET=
+      - GOOGLE_CLIENT_ID=
+      - GOOGLE_API_KEY=
+      - WORDPRESS_CLIENT_ID=
+    ports:
+      - 8165:8000/tcp
+#    network_mode: bridge
+    restart: "no" 
+
+volumes:
+  couchdb_data:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${DATA_ROOT}/files"
+  couchdb_config:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${DATA_ROOT}/config"
+

+ 1 - 0
syncthing/.gitignore

@@ -0,0 +1 @@
+config/

+ 5 - 0
wireguard/.env

@@ -0,0 +1,5 @@
+CONTAINER_NAME=openvpn
+DATA_ROOT=./openvpn-data
+
+
+

+ 6 - 0
wireguard/data/config/.donoteditthisfile

@@ -0,0 +1,6 @@
+ORIG_SERVERURL="vpn.algometic.com"
+ORIG_SERVERPORT="51820"
+ORIG_PEERDNS="8.8.8.8"
+ORIG_PEERS="4"
+ORIG_INTERFACE="10.13.13"
+ORIG_ALLOWEDIPS="0.0.0.0/0"

+ 4 - 0
wireguard/data/config/coredns/Corefile

@@ -0,0 +1,4 @@
+. {
+    loop
+    forward . /etc/resolv.conf
+}

+ 10 - 0
wireguard/data/config/peer1/peer1.conf

@@ -0,0 +1,10 @@
+[Interface]
+Address = 10.13.13.2
+PrivateKey = yCJYC2RP7RxLxntZpEzuY3TcQn+R1by1EplzG2zjBVs=
+ListenPort = 51820
+DNS = 8.8.8.8
+
+[Peer]
+PublicKey = LM7ErmXcl6FxQYCPkl6Q7KDQb51971+A1BJ9ZN7lIQU=
+Endpoint = vpn.algometic.com:51820
+AllowedIPs = 0.0.0.0/0

BIN
wireguard/data/config/peer1/peer1.png


+ 1 - 0
wireguard/data/config/peer1/privatekey-peer1

@@ -0,0 +1 @@
+yCJYC2RP7RxLxntZpEzuY3TcQn+R1by1EplzG2zjBVs=

+ 1 - 0
wireguard/data/config/peer1/publickey-peer1

@@ -0,0 +1 @@
+8+oQEcvSnx9F4wplUeDCe6MGXnmNo6PpZzfhQ6cqmhs=

+ 10 - 0
wireguard/data/config/peer2/peer2.conf

@@ -0,0 +1,10 @@
+[Interface]
+Address = 10.13.13.3
+PrivateKey = KOhoBZEhp0B+11z0xJE0aBq4J2anC/5/ZBGPmRx20Fc=
+ListenPort = 51820
+DNS = 8.8.8.8
+
+[Peer]
+PublicKey = LM7ErmXcl6FxQYCPkl6Q7KDQb51971+A1BJ9ZN7lIQU=
+Endpoint = vpn.algometic.com:51820
+AllowedIPs = 0.0.0.0/0

BIN
wireguard/data/config/peer2/peer2.png


+ 1 - 0
wireguard/data/config/peer2/privatekey-peer2

@@ -0,0 +1 @@
+KOhoBZEhp0B+11z0xJE0aBq4J2anC/5/ZBGPmRx20Fc=

+ 1 - 0
wireguard/data/config/peer2/publickey-peer2

@@ -0,0 +1 @@
+ZSfB5mkPehJr2bjm8eQ/GzI2l1qo5IUFouecRvvpjgk=

+ 10 - 0
wireguard/data/config/peer3/peer3.conf

@@ -0,0 +1,10 @@
+[Interface]
+Address = 10.13.13.4
+PrivateKey = SL3cDO67+6Y0Po6dp0uFYH2b/S24mjPIeIqj5LrPxEc=
+ListenPort = 51820
+DNS = 8.8.8.8
+
+[Peer]
+PublicKey = LM7ErmXcl6FxQYCPkl6Q7KDQb51971+A1BJ9ZN7lIQU=
+Endpoint = vpn.algometic.com:51820
+AllowedIPs = 0.0.0.0/0

BIN
wireguard/data/config/peer3/peer3.png


+ 1 - 0
wireguard/data/config/peer3/privatekey-peer3

@@ -0,0 +1 @@
+SL3cDO67+6Y0Po6dp0uFYH2b/S24mjPIeIqj5LrPxEc=

+ 1 - 0
wireguard/data/config/peer3/publickey-peer3

@@ -0,0 +1 @@
+rTz/pPDnpNiYwCNGK1INoLDV71iXjTE7nPSfR9DTrHk=

+ 10 - 0
wireguard/data/config/peer4/peer4.conf

@@ -0,0 +1,10 @@
+[Interface]
+Address = 10.13.13.5
+PrivateKey = GFr9vXavFkNZW7IyeVCKfPKDdxh0B17HIHIzHKmV9XM=
+ListenPort = 51820
+DNS = 8.8.8.8
+
+[Peer]
+PublicKey = LM7ErmXcl6FxQYCPkl6Q7KDQb51971+A1BJ9ZN7lIQU=
+Endpoint = vpn.algometic.com:51820
+AllowedIPs = 0.0.0.0/0

BIN
wireguard/data/config/peer4/peer4.png


+ 1 - 0
wireguard/data/config/peer4/privatekey-peer4

@@ -0,0 +1 @@
+GFr9vXavFkNZW7IyeVCKfPKDdxh0B17HIHIzHKmV9XM=

+ 1 - 0
wireguard/data/config/peer4/publickey-peer4

@@ -0,0 +1 @@
+rIbrCJrzFQMzD9RPzXQKQkWVJQGPruZn7VGceuvTUzs=

+ 1 - 0
wireguard/data/config/privatekey

@@ -0,0 +1 @@
+EFYq+T+WwkJKJgIddbJ2pMucCkWmFfWA6tilyt+8Dks=

+ 1 - 0
wireguard/data/config/publickey

@@ -0,0 +1 @@
+9R15OxuClX6ULeTw4UZewyT+cvSXzGZ5uj6u2zC1vTA=

+ 1 - 0
wireguard/data/config/server/privatekey-server

@@ -0,0 +1 @@
+iEHgxxxyGGEtyeGOzWBg8/8IOcM7nb23B/bBgJcE0E8=

+ 1 - 0
wireguard/data/config/server/publickey-server

@@ -0,0 +1 @@
+LM7ErmXcl6FxQYCPkl6Q7KDQb51971+A1BJ9ZN7lIQU=

+ 1 - 0
wireguard/data/config/server_privatekey

@@ -0,0 +1 @@
+YPNDKJxn0Jx3Mv/XHqzdCxNZ426lehvhMZJx2TP2fFU=

+ 10 - 0
wireguard/data/config/templates/peer.conf

@@ -0,0 +1,10 @@
+[Interface]
+Address = ${CLIENT_IP}
+PrivateKey = $(cat /config/${PEER_ID}/privatekey-${PEER_ID})
+ListenPort = 51820
+DNS = ${PEERDNS}
+
+[Peer]
+PublicKey = $(cat /config/server/publickey-server)
+Endpoint = ${SERVERURL}:${SERVERPORT}
+AllowedIPs = ${ALLOWEDIPS}

+ 6 - 0
wireguard/data/config/templates/server.conf

@@ -0,0 +1,6 @@
+[Interface]
+Address = ${INTERFACE}.1
+ListenPort = 51820
+PrivateKey = $(cat /config/server/privatekey-server)
+PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -A FORWARD -o %i -j ACCEPT; iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
+PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -D FORWARD -o %i -j ACCEPT; iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE

+ 27 - 0
wireguard/data/config/wg0.conf

@@ -0,0 +1,27 @@
+[Interface]
+Address = 10.13.13.1
+ListenPort = 51820
+PrivateKey = iEHgxxxyGGEtyeGOzWBg8/8IOcM7nb23B/bBgJcE0E8=
+PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -A FORWARD -o %i -j ACCEPT; iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
+PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -D FORWARD -o %i -j ACCEPT; iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
+
+[Peer]
+# peer1
+PublicKey = 8+oQEcvSnx9F4wplUeDCe6MGXnmNo6PpZzfhQ6cqmhs=
+AllowedIPs = 10.13.13.2/32
+
+[Peer]
+# peer2
+PublicKey = ZSfB5mkPehJr2bjm8eQ/GzI2l1qo5IUFouecRvvpjgk=
+AllowedIPs = 10.13.13.3/32
+
+[Peer]
+# peer3
+PublicKey = rTz/pPDnpNiYwCNGK1INoLDV71iXjTE7nPSfR9DTrHk=
+AllowedIPs = 10.13.13.4/32
+
+[Peer]
+# peer4
+PublicKey = rIbrCJrzFQMzD9RPzXQKQkWVJQGPruZn7VGceuvTUzs=
+AllowedIPs = 10.13.13.5/32
+

+ 12 - 0
wireguard/data/config/wg0.conf.bak

@@ -0,0 +1,12 @@
+[Interface]
+Address = 10.13.13.1
+ListenPort = 51820
+PrivateKey = iEHgxxxyGGEtyeGOzWBg8/8IOcM7nb23B/bBgJcE0E8=
+PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -A FORWARD -o %i -j ACCEPT; iptables -t nat -A POSTROUTING -o enp6s0.1 -j MASQUERADE
+PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -D FORWARD -o %i -j ACCEPT; iptables -t nat -D POSTROUTING -o enp6s0.1 -j MASQUERADE
+
+[Peer]
+# peer1
+PublicKey = 8+oQEcvSnx9F4wplUeDCe6MGXnmNo6PpZzfhQ6cqmhs=
+AllowedIPs = 10.13.13.2/32
+

+ 5 - 0
wireguard/data/wireguard-ui/config.json

@@ -0,0 +1,5 @@
+{
+ "PrivateKey": "kIP/uJibC8O/ObT/lDITJYh21fkH5tix7zJ0RGKTGUc=",
+ "PublicKey": "oJzwwNWu13IxAmQuSPQvFbi4sS3MF5OHKGstcllPM1g=",
+ "Users": {}
+}

+ 9 - 0
wireguard/db/server/interfaces.json

@@ -0,0 +1,9 @@
+{
+	"addresses": [
+		"10.252.1.0/24"
+	],
+	"listen_port": "51820",
+	"updated_at": "2021-12-27T15:46:16.358407392Z",
+	"post_up": "",
+	"post_down": ""
+}

+ 5 - 0
wireguard/db/server/keypair.json

@@ -0,0 +1,5 @@
+{
+	"private_key": "8OsZyB8k76A0fQcYU0uDVQZT8L85/13AEXIDxrKTYng=",
+	"public_key": "hsF595uoRrV0hP/fA3jlTD3XIBxotRSwbevpP2S5/Ak=",
+	"updated_at": "2021-12-27T15:46:16.358628515Z"
+}

+ 24 - 0
wireguard/docker-compose.yml

@@ -0,0 +1,24 @@
+version: "2.1"
+services:
+  wg-manager:
+    container_name: wg-manager
+    image: perara/wg-manager
+    restart: "no" 
+    sysctls:
+      net.ipv6.conf.all.disable_ipv6: 0  # Required for IPV6
+    cap_add:
+      - NET_ADMIN
+    #network_mode: host # Alternatively
+    ports:
+       - 51820:51820/udp
+       - 8888:8888
+    volumes:
+      - ./wg-manager:/config
+    environment:
+      HOST: 0.0.0.0
+      PORT: 8888
+      ADMIN_USERNAME: admin
+      ADMIN_PASSWORD: admin
+      WEB_CONCURRENCY: 1
+
+

+ 45 - 0
wireguard/docker-compose.yml.bak

@@ -0,0 +1,45 @@
+version: "2.1"
+services:
+  wireguard:
+    image: lscr.io/linuxserver/wireguard
+    container_name: wireguard
+    cap_add:
+      - NET_ADMIN
+      - SYS_MODULE
+    environment:
+      - PUID=1000
+      - PGID=1000
+      - TZ=Asia/Hong_Kong
+      - PEERDNS=auto
+      - PEERS=1
+      - INTERNAL_SUBNET=10.13.13.0
+      - SERVERURL=vpn.algometic.com #optional
+      - ALLOWEDIPS=10.13.13.0/24 #optional
+    volumes:
+      - ./data/config:/config
+      - ./data/lib/modules:/lib/modules
+    ports:
+      - 51820:51820/udp
+    sysctls:
+      - net.ipv4.conf.all.src_valid_mark=1
+      - net.ipv4.ip_forward=1
+    restart: "no" 
+
+
+  wireguard-ui:
+    image: embarkstudios/wireguard-ui:latest
+    entrypoint: "/wireguard-ui"
+    privileged: true
+    network_mode: host
+    volumes:
+      - ./data/wireguard-ui:/data
+    environment:
+      - WIREGUARD_UI_LISTEN_ADDRESS=0.0.0.0:8044
+      - WIREGUARD_UI_LOG_LEVEL=debug
+      - WIREGUARD_UI_DATA_DIR=/data
+      - WIREGUARD_UI_WG_ENDPOINT=vpn.algometic.com:51820
+      - WIREGUARD_UI_CLIENT_IP_RANGE=10.13.13.16/28
+      - WIREGUARD_UI_NAT=true
+      - WIREGUARD_UI_NAT_DEVICE=enp6s0.1
+      - WIREGUARD_UI_WG_DEVICE=wg0
+

+ 51 - 0
wireguard/docker-compose.yml.bak2

@@ -0,0 +1,51 @@
+version: "2.1"
+services:
+  wireguard:
+    image: lscr.io/linuxserver/wireguard
+    container_name: wireguard
+    cap_add:
+      - NET_ADMIN
+      - SYS_MODULE
+    environment:
+      - PUID=1000
+      - PGID=1000
+      - TZ=Asia/Hong_Kong
+      - PEERDNS=8.8.8.8
+      - PEERS=4
+      - INTERNAL_SUBNET=10.13.13.0/24
+      - SERVERURL=vpn.algometic.com #optional
+      - SERVERPORT=51820
+      - ALLOWEDIPS=0.0.0.0/0 #optional
+    volumes:
+      - ./data/config:/config
+      - ./data/lib/modules:/lib/modules
+    ports:
+      - 51820:51820/udp
+    sysctls:
+      - net.ipv4.conf.all.src_valid_mark=1
+    restart: "no" 
+
+
+
+  wg-manager:
+    container_name: wg-manager
+    image: perara/wg-manager
+    restart: "no" 
+    sysctls:
+      net.ipv6.conf.all.disable_ipv6: 0  # Required for IPV6
+    cap_add:
+      - NET_ADMIN
+    #network_mode: host # Alternatively
+    ports:
+       - 51820:51820/udp
+       - 8888:8888
+    volumes:
+      - ./wg-manager:/config
+    environment:
+      HOST: 0.0.0.0
+      PORT: 8888
+      ADMIN_USERNAME: admin
+      ADMIN_PASSWORD: admin
+      WEB_CONCURRENCY: 1
+
+

+ 1 - 0
wireguard/privatekey

@@ -0,0 +1 @@
+YHwke4BuGYYeYza28cltRei0Zlj/H9r7NV81OEtFbEw=

+ 0 - 0
wireguard/publickey


BIN
wireguard/wg-manager/database.db