laxaurus 4 lat temu
commit
ac24460b33
79 zmienionych plików z 2528 dodań i 0 usunięć
  1. 2 0
      ampache/.env
  2. 17 0
      ampache/docker-compose.yml
  3. 5 0
      calibre/.env
  4. 36 0
      calibre/docker-compose.yml
  5. 8 0
      ftp/.env
  6. 21 0
      ftp/docker-compose.yml
  7. 5 0
      gogs/.env
  8. 13 0
      gogs/docker-compose.yml
  9. 9 0
      guacamole/.env
  10. 43 0
      guacamole/docker-compose.yml
  11. 8 0
      mediawiki/.env
  12. 71 0
      mediawiki/docker-compose.yml
  13. 1 0
      minidlna/.env
  14. 2 0
      minidlna/dlna.sh
  15. 15 0
      minidlna/docker-compose.yml
  16. 8 0
      nc/.env
  17. 134 0
      nc/docker-compose.yml
  18. 7 0
      nextcloud/.env
  19. 7 0
      nextcloud/bak/20201013/.env
  20. 34 0
      nextcloud/bak/20201013/cleanup.sh
  21. 125 0
      nextcloud/bak/20201013/docker-compose.yml
  22. 20 0
      nextcloud/bak/20201013/unlock_qnap.sh
  23. 20 0
      nextcloud/bak/20201013/unlock_qnap_seagate.sh
  24. 20 0
      nextcloud/bak/20201013/unlock_qnap_toshiba.sh
  25. 34 0
      nextcloud/cleanup.sh
  26. 134 0
      nextcloud/docker-compose.yml
  27. 4 0
      prometheus/.env
  28. 61 0
      prometheus/docker-compose.yml
  29. 6 0
      scripts/docker_up.sh
  30. 36 0
      scripts/dockers_init.sh
  31. 11 0
      scripts/dockers_run.sh
  32. 8 0
      scripts/mount_qnap.sh
  33. 20 0
      scripts/unlock_qnap_seagate.sh
  34. 20 0
      scripts/unlock_qnap_toshiba.sh
  35. 2 0
      squid/.env
  36. 7 0
      squid/ad_block.sh
  37. 15 0
      squid/docker-compose.yml
  38. 3 0
      sslwp/.env
  39. 5 0
      sslwp/bak/php-uploads.ini
  40. 13 0
      sslwp/bak/www-vortify/wp-content/ai1wm-backups/web.config
  41. 134 0
      sslwp/bak/www-vortify/wp-content/plugins/caldera-forms/vendor/calderawp/caldera-forms-query/bin/includes.sh
  42. 71 0
      sslwp/bak/www-vortify/wp-content/plugins/caldera-forms/vendor/calderawp/caldera-forms-query/bin/install-docker.sh
  43. 122 0
      sslwp/bak/www-vortify/wp-content/plugins/caldera-forms/vendor/calderawp/caldera-forms-query/bin/install-wp-tests.sh
  44. 43 0
      sslwp/bak/www-vortify/wp-content/plugins/caldera-forms/vendor/calderawp/caldera-forms-query/docker-compose.yml
  45. 0 0
      sslwp/bak/www-vortify/wp-content/plugins/caldera-forms/vendor/symfony/translation/Tests/fixtures/empty.ini
  46. 1 0
      sslwp/bak/www-vortify/wp-content/plugins/caldera-forms/vendor/symfony/translation/Tests/fixtures/resources.ini
  47. 20 0
      sslwp/bak/www/mediawiki/docs/kss/scripts/kss-node-check.sh
  48. 1 0
      sslwp/bak/www/mediawiki/extensions/VisualEditor-old
  49. 55 0
      sslwp/bak/www/mediawiki/extensions/VisualEditor/bin/listRecentCommits.sh
  50. 23 0
      sslwp/bak/www/mediawiki/extensions/VisualEditor/bin/pre-commit.sh
  51. 68 0
      sslwp/bak/www/mediawiki/extensions/VisualEditor/bin/updateSubmodule.sh
  52. 41 0
      sslwp/bak/www/mediawiki/extensions/VisualEditor/lib/ve/bin/sync-gh-pages.sh
  53. 63 0
      sslwp/bak/www/mediawiki/extensions/VisualEditor/lib/ve/bin/update-oojs-ui.sh
  54. 59 0
      sslwp/bak/www/mediawiki/extensions/VisualEditor/lib/ve/bin/update-oojs.sh
  55. 59 0
      sslwp/bak/www/mediawiki/extensions/VisualEditor/lib/ve/bin/update-unicodejs.sh
  56. 122 0
      sslwp/bak/www/mediawiki/includes/limit.sh
  57. 14 0
      sslwp/bak/www/mediawiki/maintenance/dev/includes/php.sh
  58. 8 0
      sslwp/bak/www/mediawiki/maintenance/dev/includes/require-php.sh
  59. 8 0
      sslwp/bak/www/mediawiki/maintenance/dev/install.sh
  60. 18 0
      sslwp/bak/www/mediawiki/maintenance/dev/installmw.sh
  61. 57 0
      sslwp/bak/www/mediawiki/maintenance/dev/installphp.sh
  62. 14 0
      sslwp/bak/www/mediawiki/maintenance/dev/start.sh
  63. 84 0
      sslwp/bak/www/mediawiki/maintenance/resources/update-oojs-ui.sh
  64. 59 0
      sslwp/bak/www/mediawiki/maintenance/resources/update-oojs.sh
  65. 38 0
      sslwp/bak/www/mediawiki/vendor/firebase/php-jwt/run-tests.sh
  66. 3 0
      sslwp/bak/www/mediawiki/vendor/pear/net_smtp/phpdoc.sh
  67. 5 0
      sslwp/bak/www/mediawiki/vendor/pear/pear-core-minimal/copy-from-pear-core.sh
  68. 63 0
      sslwp/bak/www/mediawiki/vendor/ruflin/elastica/ansible/provision.sh
  69. 27 0
      sslwp/bak/www/mediawiki/vendor/ruflin/elastica/docker-compose.yml
  70. 78 0
      sslwp/docker-compose.yml
  71. 2 0
      transmission-gm/.env
  72. 20 0
      transmission-gm/config/blocklist-update.sh
  73. 34 0
      transmission-gm/docker-compose.yml
  74. 1 0
      transmission-sbt/.env
  75. 20 0
      transmission-sbt/config/blocklist-update.sh
  76. 28 0
      transmission-sbt/docker-compose.yml
  77. 1 0
      transmission/.env
  78. 20 0
      transmission/config/blocklist-update.sh
  79. 34 0
      transmission/docker-compose.yml

+ 2 - 0
ampache/.env

@@ -0,0 +1,2 @@
+DATA_VOLUME_ROOT=/media/luks-53d827ad-9c21-4312-ab46-f76f5ce57d03/Music
+AMPACHE_ROOT=/media/luks-53d827ad-9c21-4312-ab46-f76f5ce57d03/Music/ampache

+ 17 - 0
ampache/docker-compose.yml

@@ -0,0 +1,17 @@
+version: "3"
+services:
+  ampache:
+    image: ampache/ampache 
+    container_name: ampache 
+    volumes:
+      - ${AMPACHE_ROOT}/data/config:/var/www/config
+      - ${AMPACHE_ROOT}/data/log:/var/log/ampache
+#      - ${AMPACHE_ROOT}/data/media:/media
+      - ${DATA_VOLUME_ROOT}/Library:/media
+      - ${AMPACHE_ROOT}/data/mysql:/var/lib/mysql
+      - ${AMPACHE_ROOT}/data/sites-enabled:/etc/apache2/sites-enabled
+    ports:
+      - 9045:80
+      - 8100:8100
+    restart: "no" 
+

+ 5 - 0
calibre/.env

@@ -0,0 +1,5 @@
+CALIBRE_WEB_NAME=calibre-web
+CALIBRE_NAME=calibre
+CALIBRE_ROOT=/media/luks-53d827ad-9c21-4312-ab46-f76f5ce57d03/books/calibre-web
+#OAUTHLIB_RELAX_TOKEN_SCOPE=1
+#OAUTHLIB_INSECURE_TRANSPORT=0

+ 36 - 0
calibre/docker-compose.yml

@@ -0,0 +1,36 @@
+version: "2.1"
+services:
+  calibre-web:
+    image: ghcr.io/linuxserver/calibre-web
+    container_name: ${CALIBRE_WEB_NAME}
+    environment:
+      - PUID=1000
+      - PGID=1000
+      - TZ=Asia/Hong_Kong
+      - DOCKER_MODS=linuxserver/calibre-web:calibre
+      - OAUTHLIB_RELAX_TOKEN_SCOPE=${OAUTHLIB_RELAX_TOKEN_SCOPE}
+      - OAUTHLIB_INSECURE_TRANSPORT=${OAUTHLIB_INSECURE_TRANSPORT}
+    volumes:
+      - ${CALIBRE_ROOT}/calibre-web/config:/config
+      - ${CALIBRE_ROOT}/books:/books
+    ports:
+      - 8783:8083
+    restart: "no"
+
+  calibre:
+    image: ghcr.io/linuxserver/calibre
+    container_name: ${CALIBRE_NAME}
+    environment:
+      - PUID=1000
+      - PGID=1000
+      - TZ=Asia/Hong_Kong
+      - GUAC_USER=calibre#  # the user id is calibre# with a pound sign at the end
+      - GUAC_PASS=5f4dcc3b5aa765d61d8327deb882cf99 # password=password
+      - UMASK_SET=022 #optional
+      - CLI_ARGS= #optional
+    volumes:
+      - ${CALIBRE_ROOT}/books:/config
+    ports:
+      - 8780:8080
+      - 8781:8081
+    restart: "no"

+ 8 - 0
ftp/.env

@@ -0,0 +1,8 @@
+CONTAINER_NAME=ftpd
+DATA_ROOT=/media/luks-53d827ad-9c21-4312-ab46-f76f5ce57d03/ftp-scans
+PUBLICHOST=vorsprung.local
+FTP_USER_NAME=fuser
+FTP_USER_PASS=123456
+FTP_USER_HOME=/home/ftpusers/fuser
+
+

+ 21 - 0
ftp/docker-compose.yml

@@ -0,0 +1,21 @@
+version: '3'
+
+
+services:
+  ftpd_server:
+    image: stilliard/pure-ftpd:buster-latest
+    container_name: ${CONTAINER_NAME}
+    ports:
+      - "7021:21"
+#      - "30000-30009:30000-30009"
+    volumes: 
+      - "${DATA_ROOT}/data:${FTP_USER_HOME}"
+      - "${DATA_ROOT}/passwd:/etc/pure-ftpd/passwd"
+    environment:
+      PUBLICHOST: ${PUBLICHOST}
+      FTP_USER_NAME: ${FTP_USER_NAME}
+      FTP_USER_PASS: ${FTP_USER_PASS}
+      FTP_USER_HOME: ${FTP_USER_HOME}
+      ADDED_FLAGS: "--tls=0 --umask 002:002"
+    hostname: ${PUBLICHOST}
+    restart: "no"

+ 5 - 0
gogs/.env

@@ -0,0 +1,5 @@
+DATA_VOLUME_ROOT=/media/luks-53d827ad-9c21-4312-ab46-f76f5ce57d03/git_repo
+CONTAINER_NAME=gogs
+RUN_CROND=true
+BACKUP_INTERVAL=5d
+BACKUP_RETENTION=7d

+ 13 - 0
gogs/docker-compose.yml

@@ -0,0 +1,13 @@
+gogs:
+  restart: "no" 
+  image: gogs/gogs
+  volumes:
+    - ${DATA_VOLUME_ROOT}/data:/data
+  container_name: ${CONTAINER_NAME} 
+  ports:
+    - "10029:22"
+    - "3079:3000"
+  environment:
+    - RUN_CROND=${RUN_CROND}
+    - BACKUP_INTERVAL=${BACKUP_INTERVAL}
+    - BACKUP_RETENTION=${BACKUP_RETENTION}

+ 9 - 0
guacamole/.env

@@ -0,0 +1,9 @@
+CONTAINER_PREFIX=guaca
+GUACA_ROOT=/media/luks-0a551422-727b-43ac-bd0b-917193b2db77/docker_containers/container_data/guacamole
+GUACD_HOSTNAME=guacd
+MYSQL_ROOT_PASSWORD=guacamole
+MYSQL_USER=guacamole
+MYSQL_PASSWORD=password
+MYSQL_DATABASE=guacamole_db
+MYSQL_HOSTNAME=db
+

+ 43 - 0
guacamole/docker-compose.yml

@@ -0,0 +1,43 @@
+version: '3.1'
+
+services:
+
+  guacd:
+    image:  guacamole/guacd:1.2.0
+    container_name: ${CONTAINER_PREFIX}_cd
+    restart: "no"
+    ports:
+      - 4822:4822
+
+
+  db:
+    image: mysql:8.0.22
+    container_name: ${CONTAINER_PREFIX}_db
+    restart: "no"
+    environment:
+      - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD} 
+      - MYSQL_USER=${MYSQL_USER}
+      - MYSQL_PASSWORD=${MYSQL_PASSWORD}
+      - MYSQL_DATABASE=${MYSQL_DATABASE}
+    volumes:
+      - ${GUACA_ROOT}/backups:/var/backups
+      - ${GUACA_ROOT}/data_dir:/var/lib/mysql
+
+  guaca:
+    image: guacamole/guacamole:1.2.0
+    container_name: ${CONTAINER_PREFIX}
+    restart: "no"
+    links: 
+      - db
+      - guacd
+    ports:
+      - 8111:8080
+    environment:
+      - MYSQL_HOSTNAME=${MYSQL_HOSTNAME}
+      - GUACD_HOSTNAME=${GUACD_HOSTNAME}      
+      - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD} 
+      - MYSQL_USER=${MYSQL_USER}
+      - MYSQL_PASSWORD=${MYSQL_PASSWORD}
+      - MYSQL_DATABASE=${MYSQL_DATABASE}      
+
+

+ 8 - 0
mediawiki/.env

@@ -0,0 +1,8 @@
+CONTAINER_PREFIX=mwiki
+MWIKI_ROOT=/media/luks-53d827ad-9c21-4312-ab46-f76f5ce57d03/mediawiki_data
+MYSQL_ROOT_PASSWORD=root 
+MYSQL_USER=root
+MYSQL_PASSWORD=root
+MYSQL_DATABASE=my_wiki
+PARSOID_DOMAIN_NAME=mediawiki
+

+ 71 - 0
mediawiki/docker-compose.yml

@@ -0,0 +1,71 @@
+version: '3.1'
+volumes:
+  files:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${MWIKI_ROOT}/www"
+  es01:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${MWIKI_ROOT}/es"
+
+
+services:
+
+  elasticsearch:
+    image: docker.elastic.co/elasticsearch/elasticsearch:6.5.4
+    container_name: ${CONTAINER_PREFIX}_es
+    restart: "no"
+    ports:
+      - 9255:9200
+    volumes:
+      - es01:/usr/share/elasticsearch/data
+
+  parsoid:
+    image: thenets/parsoid:0.11
+    container_name: ${CONTAINER_PREFIX}_parsoid
+    restart: "no"
+    ports:
+      - 8142:8000
+    environment: 
+      - PARSOID_DOMAIN_${PARSOID_DOMAIN_NAME}=http://${PARSOID_DOMAIN_NAME}/api.php 
+
+  db:
+    image: mysql
+    container_name: ${CONTAINER_PREFIX}_mysql
+    command: --default-authentication-plugin=mysql_native_password
+    restart: "no"
+    environment:
+      - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD} 
+      - MYSQL_USER==${MYSQL_USER}
+      - MYSQL_PASSWORD=${MYSQL_PASSWORD}
+      - MYSQL_DATABASE=${MYSQL_DATABASE}
+    volumes: 
+      - ${MWIKI_ROOT}/backups:/var/backups
+      - ${MWIKI_ROOT}/data_dir:/var/lib/mysql
+
+  mediawiki:
+    image: laxaurus/mediawiki:1.1
+    container_name: ${CONTAINER_PREFIX}
+    restart: "no"
+    ports:
+      - 8055:80
+    links:
+      - db
+      - parsoid
+    volumes:
+      - files:/var/www/html
+      #- ${MWIKI_ROOT}/etc:/etc/mediawiki/parsoid 
+
+
+      #- ${MWIKI_ROOT}/www:/var/www/html
+      
+      # After initial setup, download LocalSettings.php to the same directory as
+      # this yaml and uncomment the following line and use compose to restart
+      # the mediawiki service
+      #- ${MwIKI_ROOT}/LocalSettings.php:/var/www/html/LocalSettings.php
+      #- ${MWIKI_ROOT}/images:/var/www/html/images

+ 1 - 0
minidlna/.env

@@ -0,0 +1 @@
+DATA_VOLUME_ROOT=/media/luks-0a551422-727b-43ac-bd0b-917193b2db77/crm

+ 2 - 0
minidlna/dlna.sh

@@ -0,0 +1,2 @@
+#docker run -d --name minidlna2 --net=host    -p 8200:8200    -p 1900:1900/udp -v  /media/luks-6fc288279795f202021d3d1996b3c2e9/crm/crm-media:/opt/Videos  geekduck/minidlna
+docker run -d --name minidlna2 --net=host    -p 8200:8200    -p 1900:1900/udp -v  /media/luks-6fc288279795f202021d3d1996b3c2e9/crm/crm-media-002:/opt/Videos  geekduck/minidlna

+ 15 - 0
minidlna/docker-compose.yml

@@ -0,0 +1,15 @@
+version: "3"
+services:
+  minidlna:
+    image: geekduck/minidlna
+    container_name: minidlna 
+    network_mode: host
+    volumes:
+      - ${DATA_VOLUME_ROOT}/crm-media-002/movies3/downloads/complete:/opt/Videos
+      - ${DATA_VOLUME_ROOT}/crm-media:/opt/Music
+      - ${DATA_VOLUME_ROOT}/crm-media:/opt/Pictures
+    ports:
+      - 8200:8200
+      - 1900:1900/udp
+    restart: "no" 
+

+ 8 - 0
nc/.env

@@ -0,0 +1,8 @@
+DATA_VOLUME_ROOT=/media/luks-53d827ad-9c21-4312-ab46-f76f5ce57d03/qnap_20210206
+REDIS_VOLUME_ROOT=/home/laxaurus/docker/nc
+#DB_VOLUME_ROOT=/home/laxaurus/docker/nc
+DB_VOLUME_ROOT=/media/luks-53d827ad-9c21-4312-ab46-f76f5ce57d03/qnap_20210206/data
+ELASTIC_SEARCH_ROOT=/home/laxaurus/docker/nc/elasticsearch
+MYSQL_ROOT_PASSWORD=2eqQ6Rqs
+MYSQL_PASSWORD=VT2cL4Zg
+CONTAINER_NAME=nu_nc

+ 134 - 0
nc/docker-compose.yml

@@ -0,0 +1,134 @@
+version: '2.1'
+#version: '3'
+
+volumes:
+  files:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${DATA_VOLUME_ROOT}/data/files"
+  db:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${DB_VOLUME_ROOT}/db"
+
+  redis:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${REDIS_VOLUME_ROOT}/redis"
+
+  es_index:
+    driver: local
+    driver_opts:
+      type: volume 
+      o: 'bind'
+      device: "${ELASTIC_SEARCH_ROOT}/es"
+
+  es_root:
+    driver: local
+    driver_opts:
+      type: volume 
+      o: 'bind'
+      device: "${ELASTIC_SEARCH_ROOT}/es_root"
+
+
+  oo_data:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${DATA_VOLUME_ROOT}/data/oo_data"
+ 
+  clamav:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${DATA_VOLUME_ROOT}/data/clamav"
+
+
+services:
+  db:
+    image: mariadb
+    container_name: ${CONTAINER_NAME}_db
+    command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW
+    restart: "no" 
+    volumes:
+      - db:/var/lib/mysql
+    environment:
+      - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
+      - MYSQL_PASSWORD=${MYSQL_PASSWORD}
+      - MYSQL_DATABASE=nextcloud
+      - MYSQL_USER=nextcloud
+
+
+  redis:
+    image: redis:alpine
+    container_name: ${CONTAINER_NAME}_redis
+    restart: "no"
+    volumes:
+      - redis:/var/lib/redis
+
+
+  app:
+    image: nextcloud:20.0.6
+    container_name: ${CONTAINER_NAME}
+    ports:
+      - 1235:80
+    links:
+      - db
+      - redis
+    volumes:
+      - files:/var/www/html
+    restart: "no" 
+    environment:
+      - REDIS_HOST=redis
+      - REDIS_PASSWORD=
+
+
+  es01:
+    image: docker.elastic.co/elasticsearch/elasticsearch:6.8.1
+    container_name: ${CONTAINER_NAME}_es 
+    environment:
+      - node.name=es01
+      - discovery.type=single-node
+    ulimits:
+      memlock:
+        soft: -1
+        hard: -1
+    volumes:
+      #- es_index:/usr/share/elasticsearch/data
+      - es_index:/usr/share/elasticsearch/data
+      #- es_root:/usr/share/elasticsearch
+      - es_root:/usr/share/elasticsearch
+    restart: "no" 
+    ports:
+      - 9201:9200
+
+  onlyoffice:
+    container_name: ${CONTAINER_NAME}_onlyoffice
+    image: onlyoffice/documentserver:6.0.2
+    stdin_open: true
+    tty: true
+    restart: "no" 
+    #restart: always
+    ports:
+      - 9981:80
+    volumes:
+      - oo_data:/var/www/onlyoffice/Data
+      #- oo_log:/var/log/onlyoffice
+
+
+  av:
+    image: mkodockx/docker-clamav:alpine
+    container_name: ${CONTAINER_NAME}_clamav 
+    restart: "no" 
+    ports:
+      - 3311:3310
+    volumes:
+      - clamav:/etc/clamav

+ 7 - 0
nextcloud/.env

@@ -0,0 +1,7 @@
+DATA_VOLUME_ROOT=/media/luks-0a551422-727b-43ac-bd0b-917193b2db77/nextcloud/qnap_nc20200110
+REDIS_VOLUME_ROOT=/home/laxaurus/docker/nextcloud
+DB_VOLUME_ROOT=/home/laxaurus/docker/nextcloud
+ELASTIC_SEARCH_ROOT=/media/luks-0a551422-727b-43ac-bd0b-917193b2db77/nextcloud/qnap_nc20200110_es
+MYSQL_ROOT_PASSWORD=2eqQ6Rqs
+MYSQL_PASSWORD=VT2cL4Zg
+CONTAINER_NAME=qnap_nc20200110

+ 7 - 0
nextcloud/bak/20201013/.env

@@ -0,0 +1,7 @@
+DATA_VOLUME_ROOT=/media/luks-0a551422-727b-43ac-bd0b-917193b2db77/nextcloud/qnap_nc20200110
+REDIS_VOLUME_ROOT=/home/laxaurus/docker/nextcloud
+DB_VOLUME_ROOT=/home/laxaurus/docker/nextcloud
+ELASTIC_SEARCH_ROOT=/media/luks-0a551422-727b-43ac-bd0b-917193b2db77/nextcloud/qnap_nc20200110_es
+MYSQL_ROOT_PASSWORD=2eqQ6Rqs
+MYSQL_PASSWORD=VT2cL4Zg
+CONTAINER_NAME=qnap_nc20200110

+ 34 - 0
nextcloud/bak/20201013/cleanup.sh

@@ -0,0 +1,34 @@
+#!/bin/bash
+source .env
+docker-compose down
+clean_up(){
+	docker volume rm nextcloud_files
+	docker volume rm nextcloud_db
+	docker volume rm nextcloud_redis
+
+	cd $DATA_VOLUME_ROOT
+	pwd
+	sudo rm -Rf ./data
+	mkdir ./data
+	mkdir ./data/files
+	cd $DB_VOLUME_ROOT
+	pwd
+	sudo rm -Rf ./db
+	mkdir ./db
+	cd $REDIS_VOLUME_ROOT
+	pwd
+	sudo rm -Rf ./redis
+	mkdir ./redis
+ 	docker ps
+ 	docker volume ls
+}
+
+while true; do
+    read -p "Do you wish to proceed. This will ERASE all the data on $DATA_VOLUME_ROOT?" yn
+    case $yn in
+        [Yy]* ) clean_up; break;;
+        [Nn]* ) exit;;
+        * ) echo "Please answer yes or no.";;
+    esac
+done
+

+ 125 - 0
nextcloud/bak/20201013/docker-compose.yml

@@ -0,0 +1,125 @@
+version: '2.1'
+#version: '3'
+
+volumes:
+  files:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${DATA_VOLUME_ROOT}/data/files"
+  db:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${DB_VOLUME_ROOT}/db"
+
+  redis:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${REDIS_VOLUME_ROOT}/redis"
+
+  es_index:
+    driver: local
+    driver_opts:
+      type: volume 
+      o: 'bind'
+      device: "${ELASTIC_SEARCH_ROOT}/es"
+
+  es_root:
+    driver: local
+    driver_opts:
+      type: volume 
+      o: 'bind'
+      device: "${ELASTIC_SEARCH_ROOT}/es_root"
+
+
+  oo_data:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${DATA_VOLUME_ROOT}/data/oo_data"
+
+services:
+  db:
+    image: mariadb
+    container_name: ${CONTAINER_NAME}_db
+    command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW
+    restart: always
+    volumes:
+      - db:/var/lib/mysql
+    environment:
+      - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
+      - MYSQL_PASSWORD=${MYSQL_PASSWORD}
+      - MYSQL_DATABASE=nextcloud
+      - MYSQL_USER=nextcloud
+
+
+  redis:
+    image: redis:alpine
+    container_name: ${CONTAINER_NAME}_redis
+    restart: "no"
+    volumes:
+      - redis:/var/lib/redis
+
+
+  app:
+    image: nextcloud
+    container_name: ${CONTAINER_NAME}
+    ports:
+      - 1234:80
+    links:
+      - db
+      - redis
+    volumes:
+      - files:/var/www/html
+    restart: "no" 
+    environment:
+      - REDIS_HOST=redis
+      - REDIS_PASSWORD=
+
+
+  es01:
+    image: docker.elastic.co/elasticsearch/elasticsearch:6.8.1
+    container_name: ${CONTAINER_NAME}_es 
+    environment:
+      - node.name=es01
+      - discovery.type=single-node
+    ulimits:
+      memlock:
+        soft: -1
+        hard: -1
+    volumes:
+      #- es_index:/usr/share/elasticsearch/data
+      - es_index:/usr/share/elasticsearch/data
+      #- es_root:/usr/share/elasticsearch
+      - es_root:/usr/share/elasticsearch
+    restart: "no" 
+    ports:
+      - 9200:9200
+
+  onlyoffice:
+    container_name: ${CONTAINER_NAME}_onlyoffice
+    image: onlyoffice/documentserver:latest
+    stdin_open: true
+    tty: true
+    restart: "no" 
+    #restart: always
+    ports:
+      - 9980:80
+    volumes:
+      - oo_data:/var/www/onlyoffice/Data
+      #- oo_log:/var/log/onlyoffice
+
+
+  av:
+    image: mkodockx/docker-clamav:alpine
+    container_name: ${CONTAINER_NAME}_clamav 
+    restart: "no" 
+    ports:
+      - 3310:3310
+    #restart: unless-stopped

+ 20 - 0
nextcloud/bak/20201013/unlock_qnap.sh

@@ -0,0 +1,20 @@
+#!/bin/sh
+
+#LV_NAME1=/dev/sdd1
+LV_NAME1=0a551422-727b-43ac-bd0b-917193b2db77
+LUKS_MNT1=luks-6fc288279795f202021d3d1996b3c2e9
+UNLOCK_MNT1=/media/luks-6fc288279795f202021d3d1996b3c2e9
+#
+#
+sudo umount $UNLOCK_MNT1
+sudo cryptsetup luksClose $LUKS_MNT1
+
+#
+# open device
+echo "unlock qnap disk /dev/sde1..."
+sudo cryptsetup luksOpen UUID=$LV_NAME1 $LUKS_MNT1 
+echo "mounting ..."
+sudo mount /dev/mapper/$LUKS_MNT1 $UNLOCK_MNT1
+#sudo mount -o rw, remount /dev/mapper/$LUKS_MNT1 $UNLOCK_MNT1
+
+

+ 20 - 0
nextcloud/bak/20201013/unlock_qnap_seagate.sh

@@ -0,0 +1,20 @@
+#!/bin/sh
+
+#LV_NAME1=/dev/sdd1
+LV_NAME1=0a551422-727b-43ac-bd0b-917193b2db77
+LUKS_MNT1=luks-0a551422-727b-43ac-bd0b-917193b2db77
+UNLOCK_MNT1=/media/luks-0a551422-727b-43ac-bd0b-917193b2db77
+#
+#
+sudo umount $UNLOCK_MNT1
+sudo cryptsetup luksClose $LUKS_MNT1
+
+#
+# open device
+echo "unlock qnap disk /dev/sdd1..."
+sudo cryptsetup luksOpen UUID=$LV_NAME1 $LUKS_MNT1 
+echo "mounting ..."
+sudo mount /dev/mapper/$LUKS_MNT1 $UNLOCK_MNT1
+#sudo mount -o rw, remount /dev/mapper/$LUKS_MNT1 $UNLOCK_MNT1
+
+

+ 20 - 0
nextcloud/bak/20201013/unlock_qnap_toshiba.sh

@@ -0,0 +1,20 @@
+#!/bin/sh
+
+#LV_NAME1=/dev/sdd1
+LV_NAME1=53d827ad-9c21-4312-ab46-f76f5ce57d03
+LUKS_MNT1=luks-53d827ad-9c21-4312-ab46-f76f5ce57d03
+UNLOCK_MNT1=/media/luks-53d827ad-9c21-4312-ab46-f76f5ce57d03
+#
+#
+sudo umount $UNLOCK_MNT1
+sudo cryptsetup luksClose $LUKS_MNT1
+
+#
+# open device
+echo "unlock qnap disk /dev/sde1..."
+sudo cryptsetup luksOpen UUID=$LV_NAME1 $LUKS_MNT1 
+echo "mounting ..."
+sudo mount /dev/mapper/$LUKS_MNT1 $UNLOCK_MNT1
+#sudo mount -o rw, remount /dev/mapper/$LUKS_MNT1 $UNLOCK_MNT1
+
+

+ 34 - 0
nextcloud/cleanup.sh

@@ -0,0 +1,34 @@
+#!/bin/bash
+source .env
+docker-compose down
+clean_up(){
+	docker volume rm nextcloud_files
+	docker volume rm nextcloud_db
+	docker volume rm nextcloud_redis
+
+	cd $DATA_VOLUME_ROOT
+	pwd
+	sudo rm -Rf ./data
+	mkdir ./data
+	mkdir ./data/files
+	cd $DB_VOLUME_ROOT
+	pwd
+	sudo rm -Rf ./db
+	mkdir ./db
+	cd $REDIS_VOLUME_ROOT
+	pwd
+	sudo rm -Rf ./redis
+	mkdir ./redis
+ 	docker ps
+ 	docker volume ls
+}
+
+while true; do
+    read -p "Do you wish to proceed. This will ERASE all the data on $DATA_VOLUME_ROOT?" yn
+    case $yn in
+        [Yy]* ) clean_up; break;;
+        [Nn]* ) exit;;
+        * ) echo "Please answer yes or no.";;
+    esac
+done
+

+ 134 - 0
nextcloud/docker-compose.yml

@@ -0,0 +1,134 @@
+version: '2.1'
+#version: '3'
+
+volumes:
+  files:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${DATA_VOLUME_ROOT}/data/files"
+  db:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${DB_VOLUME_ROOT}/db"
+
+  redis:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${REDIS_VOLUME_ROOT}/redis"
+
+  es_index:
+    driver: local
+    driver_opts:
+      type: volume 
+      o: 'bind'
+      device: "${ELASTIC_SEARCH_ROOT}/es"
+
+  es_root:
+    driver: local
+    driver_opts:
+      type: volume 
+      o: 'bind'
+      device: "${ELASTIC_SEARCH_ROOT}/es_root"
+
+
+  oo_data:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${DATA_VOLUME_ROOT}/data/oo_data"
+ 
+  clamav:
+    driver: local
+    driver_opts: 
+      type: volume 
+      o: 'bind'
+      device: "${DATA_VOLUME_ROOT}/data/clamav"
+
+
+services:
+  db:
+    image: mariadb
+    container_name: ${CONTAINER_NAME}_db
+    command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW
+    restart: "no" 
+    volumes:
+      - db:/var/lib/mysql
+    environment:
+      - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
+      - MYSQL_PASSWORD=${MYSQL_PASSWORD}
+      - MYSQL_DATABASE=nextcloud
+      - MYSQL_USER=nextcloud
+
+
+  redis:
+    image: redis:alpine
+    container_name: ${CONTAINER_NAME}_redis
+    restart: "no"
+    volumes:
+      - redis:/var/lib/redis
+
+
+  app:
+    image: nextcloud
+    container_name: ${CONTAINER_NAME}
+    ports:
+      - 1234:80
+    links:
+      - db
+      - redis
+    volumes:
+      - files:/var/www/html
+    restart: "no" 
+    environment:
+      - REDIS_HOST=redis
+      - REDIS_PASSWORD=
+
+
+  es01:
+    image: docker.elastic.co/elasticsearch/elasticsearch:6.8.1
+    container_name: ${CONTAINER_NAME}_es 
+    environment:
+      - node.name=es01
+      - discovery.type=single-node
+    ulimits:
+      memlock:
+        soft: -1
+        hard: -1
+    volumes:
+      #- es_index:/usr/share/elasticsearch/data
+      - es_index:/usr/share/elasticsearch/data
+      #- es_root:/usr/share/elasticsearch
+      - es_root:/usr/share/elasticsearch
+    restart: "no" 
+    ports:
+      - 9200:9200
+
+  onlyoffice:
+    container_name: ${CONTAINER_NAME}_onlyoffice
+    image: onlyoffice/documentserver:latest
+    stdin_open: true
+    tty: true
+    restart: "no" 
+    #restart: always
+    ports:
+      - 9980:80
+    volumes:
+      - oo_data:/var/www/onlyoffice/Data
+      #- oo_log:/var/log/onlyoffice
+
+
+  av:
+    image: mkodockx/docker-clamav:alpine
+    container_name: ${CONTAINER_NAME}_clamav 
+    restart: "no" 
+    ports:
+      - 3310:3310
+    volumes:
+      - clamav:/etc/clamav

+ 4 - 0
prometheus/.env

@@ -0,0 +1,4 @@
+PROMETHEUS_NAME=promethus
+PROMETHEUS_ROOT=/home/laxaurus/docker/prometheus
+ADMIN_USER=admin
+ADMIN_PASSWORD=admin

+ 61 - 0
prometheus/docker-compose.yml

@@ -0,0 +1,61 @@
+version: '3.2'
+services:
+  prometheus:
+    image: prom/prometheus:latest
+    container_name: prometheus
+    ports:
+    - 9090:9090
+    command:
+    - --config.file=/etc/prometheus/prometheus.yml
+    volumes:
+    - ${PROMETHEUS_ROOT}/prometheus.yml:/etc/prometheus/prometheus.yml:ro
+    depends_on:
+    - cadvisor
+    - node-exporter
+  cadvisor:
+    image: gcr.io/google-containers/cadvisor:latest
+    container_name: cadvisor
+    ports:
+    - 8087:8080
+    volumes:
+    - /:/rootfs:ro
+    - /var/run:/var/run:rw
+    - /sys:/sys:ro
+    - /var/lib/docker/:/var/lib/docker:ro
+    depends_on:
+    - redis
+  
+  node-exporter:
+    image: prom/node-exporter:latest
+    container_name: node-exporter
+    ports:
+    - 9100:9100
+    volumes:
+    - /proc:/proc:rw
+  redis:
+    image: redis:latest
+    container_name: redis
+    ports:
+    - 6479:6379
+
+  grafana:
+    image: grafana/grafana:latest
+    container_name: monitoring_grafana
+    restart: "no" 
+    links:
+      - prometheus:prometheus
+    volumes:
+      - ${PROMETHEUS_ROOT}/grafana/data/grafana:/var/lib/grafana
+      - ${PROMETHEUS_ROOT}/grafana/provisioning:/etc/grafana/provisioning
+    environment:
+      - GF_SECURITY_ADMIN_USER=${ADMIN_USER}
+      - GF_SECURITY_ADMIN_PASSWORD=${ADMIN_PASSWORD}
+      - GF_USERS_ALLOW_SIGN_UP=false
+      - GF_SERVER_DOMAIN=myrul.com
+      - GF_SMTP_ENABLED=true
+      - GF_SMTP_HOST=smtp.gmail.com:587
+      - GF_SMTP_USER=myadrress@gmail.com
+      - GF_SMTP_PASSWORD=mypassword
+      - GF_SMTP_FROM_ADDRESS=myaddress@gmail.com
+    ports:
+      - 3000:3000

+ 6 - 0
scripts/docker_up.sh

@@ -0,0 +1,6 @@
+#!/bin/bash
+CONTAINER_ROOT=/home/laxaurus/docker
+
+CONTAINER_DIR=$CONTAINER_ROOT/$1
+echo "attempting to start $1 at $CONTAINER_DIR..."
+env $(cat $CONTAINER_DIR/.env | grep "#" -v) docker-compose -f $CONTAINER_DIR/docker-compose.yml up -d 

+ 36 - 0
scripts/dockers_init.sh

@@ -0,0 +1,36 @@
+#!/bin/bash
+echo "Mount encrypted filesystems before running this script."
+echo "Unlock scripts is located at nextcloud directory."
+
+CONTAINER_ROOT="/home/laxaurus/docker"
+#
+#
+CONTAINERS_UP=( sslwp ftp nextcloud squid transmission transmission-sbt transmission-gm minidlna calibre ampache prometheus guacamole mediawiki)
+CONTAINERS_DOWN=( ftpd squid transmission transmission-sbt transmission-gm minidlna calibre calibre-web ampache prometheus qnap_nc20200110 qnap_nc20200110_db qnap_nc20200110_redis qnap_nc20200110_onlyoffice qnap_nc20200110_clamav qnap_nc20200110_es monitoring_grafana cadvisor redis node-exporter sslwp_https-portal sslwp_web sslwp_php mwiki mwiki_mysql mwiki_parsoid guaca guaca_db guaca_cd) 
+#CONTAINERS=( transmission-sbt transmission )
+
+
+if [[ $1 == "up" ]]
+then
+	for i in "${CONTAINERS_UP[@]}"
+		do
+			CONTAINER_DIR="$CONTAINER_ROOT/${i}" 
+		        echo "starting container ${i} at $CONTAINER_DIR..."
+			env $(cat $CONTAINER_DIR/.env | grep "#" -v) docker-compose -f $CONTAINER_DIR/docker-compose.yml up -d 
+
+		done
+elif [[ $1 == "down" ]]
+then
+
+	for i in "${CONTAINERS_DOWN[@]}"
+		do
+			echo "stopping container ${i}..."
+			docker stop "${i}"
+                        
+		done
+else
+	echo "$0: allowed options: {up|down}"
+	exit -1
+
+fi
+

+ 11 - 0
scripts/dockers_run.sh

@@ -0,0 +1,11 @@
+#!/bin/bash
+
+CONTAINER_ROOT=/home/laxaurus/docker
+CONTAINERS=( transmission transmission-sbt transmission-gm )
+
+for i in "${CONTAINERS[@]}"
+do
+        echo "starting container ${i}..."
+	$CONTAINER_ROOT/scripts/docker_up.sh ${i} 
+done
+

+ 8 - 0
scripts/mount_qnap.sh

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+echo "unlock segate..."
+./unlock_qnap_seagate.sh
+echo "unlock toshiba..."
+./unlock_qnap_toshiba.sh
+
+

+ 20 - 0
scripts/unlock_qnap_seagate.sh

@@ -0,0 +1,20 @@
+#!/bin/sh
+
+#LV_NAME1=/dev/sdd1
+LV_NAME1=0a551422-727b-43ac-bd0b-917193b2db77
+LUKS_MNT1=luks-0a551422-727b-43ac-bd0b-917193b2db77
+UNLOCK_MNT1=/media/luks-0a551422-727b-43ac-bd0b-917193b2db77
+#
+#
+sudo umount $UNLOCK_MNT1
+sudo cryptsetup luksClose $LUKS_MNT1
+
+#
+# open device
+echo "unlock qnap disk /dev/sdd1..."
+sudo cryptsetup luksOpen UUID=$LV_NAME1 $LUKS_MNT1 
+echo "mounting ..."
+sudo mount /dev/mapper/$LUKS_MNT1 $UNLOCK_MNT1
+#sudo mount -o rw, remount /dev/mapper/$LUKS_MNT1 $UNLOCK_MNT1
+
+

+ 20 - 0
scripts/unlock_qnap_toshiba.sh

@@ -0,0 +1,20 @@
+#!/bin/sh
+
+#LV_NAME1=/dev/sdd1
+LV_NAME1=53d827ad-9c21-4312-ab46-f76f5ce57d03
+LUKS_MNT1=luks-53d827ad-9c21-4312-ab46-f76f5ce57d03
+UNLOCK_MNT1=/media/luks-53d827ad-9c21-4312-ab46-f76f5ce57d03
+#
+#
+sudo umount $UNLOCK_MNT1
+sudo cryptsetup luksClose $LUKS_MNT1
+
+#
+# open device
+echo "unlock qnap disk /dev/sde1..."
+sudo cryptsetup luksOpen UUID=$LV_NAME1 $LUKS_MNT1 
+echo "mounting ..."
+sudo mount /dev/mapper/$LUKS_MNT1 $UNLOCK_MNT1
+#sudo mount -o rw, remount /dev/mapper/$LUKS_MNT1 $UNLOCK_MNT1
+
+

+ 2 - 0
squid/.env

@@ -0,0 +1,2 @@
+SQUID_ROOT=/home/laxaurus/docker/squid
+CONTAINER_NAME=squid

+ 7 - 0
squid/ad_block.sh

@@ -0,0 +1,7 @@
+#### Calomel.org  ad_servers_newlist.sh 
+#
+## get new ad server list
+curl -sS -L --compressed "http://pgl.yoyo.org/adservers/serverlist.php?hostformat=nohtml&showintro=0&mimetype=plaintext" > ./ad_block.txt 
+
+## refresh squid
+docker kill -s HUP squid

+ 15 - 0
squid/docker-compose.yml

@@ -0,0 +1,15 @@
+version: '2.1'
+
+
+services:
+  squid:
+    image: sameersbn/squid:3.5.27-2
+    volumes:
+      - ${SQUID_ROOT}/cache:/var/spool/squid
+      - ${SQUID_ROOT}/squid.conf:/etc/squid/squid.conf
+      - ${SQUID_ROOT}/ad_block.txt:/etc/squid/ad_block.txt
+    container_name: ${CONTAINER_NAME} 
+    ports:
+      - 3128:3128
+#    restart: always
+    restart: "no" 

+ 3 - 0
sslwp/.env

@@ -0,0 +1,3 @@
+CONTAINER_PREFIX=sslwp
+DATA_ROOT=/media/luks-0a551422-727b-43ac-bd0b-917193b2db77/docker_containers/container_data/sslwp
+

+ 5 - 0
sslwp/bak/php-uploads.ini

@@ -0,0 +1,5 @@
+file_uploads = On
+memory_limit = 64M
+upload_max_filesize = 64M
+post_max_size = 64M
+max_execution_time = 600

+ 13 - 0
sslwp/bak/www-vortify/wp-content/ai1wm-backups/web.config

@@ -0,0 +1,13 @@
+<configuration>
+<system.webServer>
+<staticContent>
+<mimeMap fileExtension=".wpress" mimeType="application/octet-stream" />
+</staticContent>
+<defaultDocument>
+<files>
+<add value="index.php" />
+</files>
+</defaultDocument>
+<directoryBrowse enabled="false" />
+</system.webServer>
+</configuration>

+ 134 - 0
sslwp/bak/www-vortify/wp-content/plugins/caldera-forms/vendor/calderawp/caldera-forms-query/bin/includes.sh

@@ -0,0 +1,134 @@
+#!/bin/bash
+
+##
+# Ask a Yes/No question, and way for a reply.
+#
+# This is a general-purpose function to ask Yes/No questions in Bash, either with or without a default
+# answer. It keeps repeating the question until it gets a valid answer.
+#
+# @param {string} prompt    The question to ask the user.
+# @param {string} [default] Optional. "Y" or "N", for the default option to use if none is entered.
+# @param {int}    [timeout] Optional. The number of seconds to wait before using the default option.
+#
+# @returns {bool} true if the user replies Yes, false if the user replies No.
+##
+ask() {
+    # Source: https://djm.me/ask
+    local timeout endtime timediff prompt default reply
+
+    while true; do
+
+		timeout="${3:-}"
+
+        if [ "${2:-}" = "Y" ]; then
+            prompt="Y/n"
+            default=Y
+        elif [ "${2:-}" = "N" ]; then
+            prompt="y/N"
+            default=N
+        else
+            prompt="y/n"
+            default=
+			timeout=
+        fi
+
+		if [ -z "$timeout" ]; then
+        	# Ask the question (not using "read -p" as it uses stderr not stdout)
+        	echo -en "$1 [$prompt] "
+
+        	# Read the answer (use /dev/tty in case stdin is redirected from somewhere else)
+        	read reply </dev/tty
+		else
+			endtime=$((`date +%s` + $timeout));
+			while [ "$endtime" -ge `date +%s` ]; do
+				timediff=$(($endtime - `date +%s`))
+
+				echo -en "\r$1 [$prompt] (Default $default in ${timediff}s) "
+				read -t 1 reply </dev/tty
+
+				if [ -n "$reply" ]; then
+					break
+				fi
+			done
+		fi
+
+        # Default?
+        if [ -z "$reply" ]; then
+            reply=$default
+        fi
+
+        # Check if the reply is valid
+        case "$reply" in
+            Y*|y*) return 0 ;;
+            N*|n*) return 1 ;;
+        esac
+
+    done
+}
+
+##
+# Download from a remote source.
+#
+# Checks for the existence of curl and wget, then downloads the remote file using the first available option.
+#
+# @param {string} remote  The remote file to download.
+# @param {string} [local] Optional. The local filename to use. If it isn't passed, STDOUT is used.
+#
+# @return {bool} Whether the download succeeded or not.
+##
+download() {
+    if command_exists "curl"; then
+        curl -s -o "${2:--}" "$1"
+    elif command_exists "wget"; then
+		wget -nv -O "${2:--}" "$1"
+    fi
+}
+
+##
+# Add error message formatting to a string, and echo it.
+#
+# @param {string} message The string to add formatting to.
+##
+error_message() {
+	echo -en "\033[31mERROR\033[0m: $1"
+}
+
+##
+# Add warning message formatting to a string, and echo it.
+#
+# @param {string} message The string to add formatting to.
+##
+warning_message() {
+	echo -en "\033[33mWARNING\033[0m: $1"
+}
+
+##
+# Add status message formatting to a string, and echo it.
+#
+# @param {string} message The string to add formatting to.
+##
+status_message() {
+	echo -en "\033[32mSTATUS\033[0m: $1"
+}
+
+##
+# Add formatting to an action string.
+#
+# @param {string} message The string to add formatting to.
+##
+action_format() {
+	echo -en "\033[32m$1\033[0m"
+}
+
+##
+# Check if the command exists as some sort of executable.
+#
+# The executable form of the command could be an alias, function, builtin, executable file or shell keyword.
+#
+# @param {string} command The command to check.
+#
+# @return {bool} Whether the command exists or not.
+##
+command_exists() {
+	type -t "$1" >/dev/null 2>&1
+}

+ 71 - 0
sslwp/bak/www-vortify/wp-content/plugins/caldera-forms/vendor/calderawp/caldera-forms-query/bin/install-docker.sh

@@ -0,0 +1,71 @@
+#!/bin/bash
+
+#Set WordPress version
+WP_VERSION=${1-latest}
+
+# Exit if any command fails
+set -e
+
+# Include useful functions
+. "$(dirname "$0")/includes.sh"
+
+# Check that Docker is installed
+if ! command_exists "docker"; then
+	echo -e $(error_message "Docker doesn't seem to be installed. Please head on over to the Docker site to download it: $(action_format "https://www.docker.com/community-edition#/download")")
+	exit 1
+fi
+
+# Check that Docker is running
+if ! docker info >/dev/null 2>&1; then
+	echo -e $(error_message "Docker isn't running. Please check that you've started your Docker app, and see it in your system tray.")
+	exit 1
+fi
+
+# Stop existing containers
+echo -e $(status_message "Stopping Docker containers...")
+docker-compose down --remove-orphans >/dev/null 2>&1
+
+# Download image updates
+echo -e $(status_message "Downloading Docker image updates...")
+docker-compose pull --parallel
+
+# Launch the containers
+echo -e $(status_message "Starting Docker containers...")
+docker-compose up -d >/dev/null
+
+HOST_PORT=$(docker-compose port wordpress 80 | awk -F : '{printf $2}')
+
+# Wait until the docker containers are setup properely
+echo -en $(status_message "Attempting to connect to wordpress...")
+until $(curl -L http://localhost:$HOST_PORT -so - 2>&1 | grep -q "WordPress"); do
+    echo -n '.'
+    sleep 5
+done
+echo ''
+
+# Install WordPress
+echo -e $(status_message "Installing WordPress...")
+docker-compose run --rm -u 33 cli core install --url=localhost:$HOST_PORT --title=TestSite --admin_user=admin --admin_password=password --admin_email=test@test.com >/dev/null
+# Check for WordPress updates, just in case the WordPress image isn't up to date.
+docker-compose run --rm -u 33 cli core update >/dev/null
+
+# If the 'wordpress' volume wasn't during the down/up earlier, but the post port has changed, we need to update it.
+CURRENT_URL=$(docker-compose run -T --rm cli option get siteurl)
+if [ "$CURRENT_URL" != "http://localhost:$HOST_PORT" ]; then
+	docker-compose run --rm cli option update home "http://localhost:$HOST_PORT" >/dev/null
+	docker-compose run --rm cli option update siteurl "http://localhost:$HOST_PORT" >/dev/null
+fi
+echo -e $(status_message "Server is running at:")
+echo -e $(status_message "http://localhost:$HOST_PORT")
+
+# Install Composer
+echo -e $(status_message "Installing and updating Composer modules...")
+docker-compose run --rm composer install
+
+# Install the PHPUnit test scaffolding
+echo -e $(status_message "Installing PHPUnit test scaffolding...")
+docker-compose run --rm wordpress_phpunit /app/bin/install-wp-tests.sh wordpress_test root example mysql "${WP_VERSION}" false >/dev/null
+echo -e $(status_message "Completed installing tests")
+
+
+

+ 122 - 0
sslwp/bak/www-vortify/wp-content/plugins/caldera-forms/vendor/calderawp/caldera-forms-query/bin/install-wp-tests.sh

@@ -0,0 +1,122 @@
+#!/usr/bin/env bash
+
+# Include useful functions
+. "$(dirname "$0")/includes.sh"
+
+if [ $# -lt 3 ]; then
+	echo "usage: $0 <db-name> <db-user> <db-pass> [db-host] [wp-version] [skip-database-creation]"
+	exit 1
+fi
+
+DB_NAME=$1
+DB_USER=$2
+DB_PASS=$3
+DB_HOST=${4-localhost}
+WP_VERSION=${5-latest}
+SKIP_DB_CREATE=${6-false}
+
+WP_TESTS_DIR=${WP_TESTS_DIR-/tmp/wordpress-tests-lib}
+WP_CORE_DIR=${WP_CORE_DIR-/tmp/wordpress/}
+
+if [[ $WP_VERSION =~ [0-9]+\.[0-9]+(\.[0-9]+)? ]]; then
+	WP_TESTS_TAG="tags/$WP_VERSION"
+elif [[ $WP_VERSION == 'nightly' || $WP_VERSION == 'trunk' ]]; then
+	WP_TESTS_TAG="trunk"
+else
+	# http serves a single offer, whereas https serves multiple. we only want one
+	download http://api.wordpress.org/core/version-check/1.7/ /tmp/wp-latest.json
+	grep '[0-9]+\.[0-9]+(\.[0-9]+)?' /tmp/wp-latest.json
+	LATEST_VERSION=$(grep -o '"version":"[^"]*' /tmp/wp-latest.json | sed 's/"version":"//')
+	if [[ -z "$LATEST_VERSION" ]]; then
+		echo "Latest WordPress version could not be found"
+		exit 1
+	fi
+	WP_TESTS_TAG="tags/$LATEST_VERSION"
+fi
+
+set -ex
+
+install_wp() {
+
+	if [ -d $WP_CORE_DIR ]; then
+		return;
+	fi
+
+	mkdir -p $WP_CORE_DIR
+
+	if [[ $WP_VERSION == 'nightly' || $WP_VERSION == 'trunk' ]]; then
+		mkdir -p /tmp/wordpress-nightly
+		download https://wordpress.org/nightly-builds/wordpress-latest.zip  /tmp/wordpress-nightly/wordpress-nightly.zip
+		unzip -q /tmp/wordpress-nightly/wordpress-nightly.zip -d /tmp/wordpress-nightly/
+		mv /tmp/wordpress-nightly/wordpress/* $WP_CORE_DIR
+	else
+		if [ $WP_VERSION == 'latest' ]; then
+			local ARCHIVE_NAME='latest'
+		else
+			local ARCHIVE_NAME="wordpress-$WP_VERSION"
+		fi
+		download https://wordpress.org/${ARCHIVE_NAME}.tar.gz  /tmp/wordpress.tar.gz
+		tar --strip-components=1 -zxmf /tmp/wordpress.tar.gz -C $WP_CORE_DIR
+	fi
+
+	download https://raw.github.com/markoheijnen/wp-mysqli/master/db.php $WP_CORE_DIR/wp-content/db.php
+}
+
+install_test_suite() {
+	# portable in-place argument for both GNU sed and Mac OSX sed
+	if [[ $(uname -s) == 'Darwin' ]]; then
+		local ioption='-i .bak'
+	else
+		local ioption='-i'
+	fi
+
+	# set up testing suite if it doesn't yet exist
+	if [ ! -d $WP_TESTS_DIR ]; then
+		# set up testing suite
+		mkdir -p $WP_TESTS_DIR
+		svn co --quiet https://develop.svn.wordpress.org/${WP_TESTS_TAG}/tests/phpunit/includes/ $WP_TESTS_DIR/includes
+		svn co --quiet https://develop.svn.wordpress.org/${WP_TESTS_TAG}/tests/phpunit/data/ $WP_TESTS_DIR/data
+	fi
+
+	if [ ! -f wp-tests-config.php ]; then
+		download https://develop.svn.wordpress.org/${WP_TESTS_TAG}/wp-tests-config-sample.php "$WP_TESTS_DIR"/wp-tests-config.php
+		# remove all forward slashes in the end
+		WP_CORE_DIR=$(echo $WP_CORE_DIR | sed "s:/\+$::")
+		sed $ioption "s:dirname( __FILE__ ) . '/src/':'$WP_CORE_DIR/':" "$WP_TESTS_DIR"/wp-tests-config.php
+		sed $ioption "s/youremptytestdbnamehere/$DB_NAME/" "$WP_TESTS_DIR"/wp-tests-config.php
+		sed $ioption "s/yourusernamehere/$DB_USER/" "$WP_TESTS_DIR"/wp-tests-config.php
+		sed $ioption "s/yourpasswordhere/$DB_PASS/" "$WP_TESTS_DIR"/wp-tests-config.php
+		sed $ioption "s|localhost|${DB_HOST}|" "$WP_TESTS_DIR"/wp-tests-config.php
+	fi
+
+}
+
+install_db() {
+
+	if [ ${SKIP_DB_CREATE} = "true" ]; then
+		return 0
+	fi
+
+	# parse DB_HOST for port or socket references
+	local PARTS=(${DB_HOST//\:/ })
+	local DB_HOSTNAME=${PARTS[0]};
+	local DB_SOCK_OR_PORT=${PARTS[1]};
+	local EXTRA=""
+
+	if ! [ -z $DB_HOSTNAME ] ; then
+		if [ $(echo $DB_SOCK_OR_PORT | grep -e '^[0-9]\{1,\}$') ]; then
+			EXTRA=" --host=$DB_HOSTNAME --port=$DB_SOCK_OR_PORT --protocol=tcp"
+		elif ! [ -z $DB_SOCK_OR_PORT ] ; then
+			EXTRA=" --socket=$DB_SOCK_OR_PORT"
+		elif ! [ -z $DB_HOSTNAME ] ; then
+			EXTRA=" --host=$DB_HOSTNAME --protocol=tcp"
+		fi
+	fi
+
+	# create database
+	mysql --user="$DB_USER" --password="$DB_PASS"$EXTRA --execute "CREATE DATABASE IF NOT EXISTS $DB_NAME;"
+}
+
+install_wp
+install_test_suite
+install_db

+ 43 - 0
sslwp/bak/www-vortify/wp-content/plugins/caldera-forms/vendor/calderawp/caldera-forms-query/docker-compose.yml

@@ -0,0 +1,43 @@
+version: '3.1'
+
+services:
+
+  wordpress:
+    image: wordpress
+    ports:
+      - 8888:80
+    environment:
+      WORDPRESS_DB_PASSWORD: example
+      ABSPATH: /usr/src/wordpress/
+    volumes:
+      - wordpress:/var/www/html
+      - .:/var/www/html/wp-content/plugins/caldera-forms-query
+
+  cli:
+    image: wordpress:cli
+    volumes:
+      - wordpress:/var/www/html
+      - .:/var/www/html/wp-content/plugins/caldera-forms-query
+
+  mysql:
+    image: mysql:5.7
+    environment:
+      MYSQL_ROOT_PASSWORD: example
+      MYSQL_DATABASE: wordpress_test
+
+  wordpress_phpunit:
+    image: chriszarate/wordpress-phpunit
+    environment:
+      PHPUNIT_DB_HOST: mysql
+    volumes:
+      - .:/app
+      - testsuite:/tmp
+
+  composer:
+    image: composer
+    volumes:
+      - .:/app
+
+volumes:
+  testsuite:
+  wordpress:

+ 0 - 0
sslwp/bak/www-vortify/wp-content/plugins/caldera-forms/vendor/symfony/translation/Tests/fixtures/empty.ini


+ 1 - 0
sslwp/bak/www-vortify/wp-content/plugins/caldera-forms/vendor/symfony/translation/Tests/fixtures/resources.ini

@@ -0,0 +1 @@
+foo="bar"

+ 20 - 0
sslwp/bak/www/mediawiki/docs/kss/scripts/kss-node-check.sh

@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+if command -v npm > /dev/null ; then
+  npm install
+else
+  # If npm isn't installed, but kss-node is, exit normally.
+  # This allows setting it up on one machine, and running it on
+  # another (e.g. Tools Labs execution nodes) that doesn't have npm
+  # installed.  However, "npm install" still needs to be run
+  # occasionally to keep kss updated.
+
+  KSS_NODE="${BASH_SOURCE%/*}/../node_modules/.bin/kss-node"
+  if ! [ -x "$KSS_NODE" ] ; then
+    echo "Neither kss-node nor npm are installed."
+    echo "To install npm, see http://nodejs.org/"
+    echo "When npm is installed, the Makefile can automatically"
+    echo "install kss-node."
+    exit 1
+  fi
+fi

+ 1 - 0
sslwp/bak/www/mediawiki/extensions/VisualEditor-old

@@ -0,0 +1 @@
+Subproject commit fa74ca1b12407f40ab35836ef5ddac133a5c9aa4

+ 55 - 0
sslwp/bak/www/mediawiki/extensions/VisualEditor/bin/listRecentCommits.sh

@@ -0,0 +1,55 @@
+#!/bin/bash -eu
+
+# This script generates a structured git log of commits to the VisualEditor-MediaWiki repository,
+# and walks the submodule updates to the lib/ve submodule and the OOjs and OOjs UI pull-through
+# build commits to detail all changes since a given branch point.
+
+# Using `git branch -a | grep wmf | sort -V` to automatically pick the latest branch version would
+# be nice here, but doesn't work because Mac OS X's version of sort is too old.
+
+# cd to the VisualEditor directory
+cd $(cd $(dirname $0)/..; pwd)
+
+# Ensure input is correct
+if [ -z "${1:-}" ]
+then
+	echo >&2 "Usage: listRecentCommits.sh <startBranch>"
+	exit 1
+fi
+STARTHASH=`git rev-parse $1`
+if [ "$?" -ne "0" ]
+then
+	echo >&2 "Parameter is not a valid git branch"
+	exit 1
+fi
+
+echo "Listing changes since '$1' (hash: $STARTHASH)"
+echo ""
+
+LOCALCHANGES=`git log $1.. --oneline --no-merges --reverse --color=never |
+	egrep --color=never -v '(translatewiki|BrowserTest)'`
+
+# Iterate over lines matching "Update VE core submodule"
+while read -r CHANGE
+do
+	printf "$CHANGE\n"
+
+	if [[ $CHANGE == *"Update VE core submodule"* ]]
+	then
+		CHANGEHASH=`cut -f1 -d' ' <<< $CHANGE`
+
+		SUBCHANGES=`git log --format=%B -n1 $CHANGEHASH -- |
+			sed -n -e '/New changes/,/^$/p' |
+			tail -n +2 |
+			sed -e '$ d' |
+			grep --color=never -v 'translatewiki'`
+		while read -r SUBCHANGE
+		do
+			printf "\t$SUBCHANGE\n"
+		done <<< "$SUBCHANGES"
+
+		# Extra new-line between sub-module pulls for clarity
+		printf "\n"
+	fi
+done <<< "$LOCALCHANGES"
+exit

+ 23 - 0
sslwp/bak/www/mediawiki/extensions/VisualEditor/bin/pre-commit.sh

@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+# If the VE core sub-module was touched
+if git diff --quiet --cached lib/ve; then
+
+    GITBRANCH=`git rev-parse --abbrev-ref HEAD`;
+
+    # … and it doesn't look like
+    if [[ $GITBRANCH != "sync-repos" ]]; then
+        echo "VE core sub-module was touched but commit isn't from 'sync-repos'.";
+        exit 1;
+    fi
+
+fi
+
+# Stash any uncommited changes
+git stash -q --keep-index
+
+npm install || git stash pop -q && exit 1
+npm test && git add -u .docs/* || git stash pop -q && exit 1
+
+# Re-apply any uncommited changes
+git stash pop -q

+ 68 - 0
sslwp/bak/www/mediawiki/extensions/VisualEditor/bin/updateSubmodule.sh

@@ -0,0 +1,68 @@
+#!/bin/bash -eu
+
+# This script generates a commit that updates the lib/ve submodule
+# ./bin/updateSubmodule.sh        updates to master
+# ./bin/updateSubmodule.sh hash   updates to specified hash
+
+# cd to the VisualEditor directory
+cd $(cd $(dirname $0)/..; pwd)
+
+# Check that both working directories are clean
+if git status -uno --ignore-submodules | grep -i changes > /dev/null
+then
+	echo >&2 "Working directory must be clean"
+	exit 1
+fi
+cd lib/ve
+if git status -uno --ignore-submodules | grep -i changes > /dev/null
+then
+	echo >&2 "lib/ve working directory must be clean"
+	exit 1
+fi
+cd ../..
+
+git fetch origin
+# Create sync-repos branch if needed and reset it to master
+git checkout -B sync-repos origin/master
+git submodule update
+cd lib/ve
+git fetch origin
+
+# Figure out what to set the submodule to
+if [ -n "${1:-}" ]
+then
+	TARGET="$1"
+	TARGETDESC="$1"
+else
+	TARGET=origin/master
+	TARGETDESC="master ($(git rev-parse --short origin/master))"
+fi
+
+# Generate commit summary
+# TODO recurse
+NEWCHANGES=$(git log ..$TARGET --oneline --no-merges --reverse --color=never)
+NEWCHANGESDISPLAY=$(git log ..$TARGET --oneline --no-merges --reverse --color=always)
+COMMITMSG=$(cat <<END
+Update VE core submodule to $TARGETDESC
+
+New changes:
+$NEWCHANGES
+END
+)
+# Check out master of VE core
+git checkout $TARGET
+
+# Commit
+cd ../..
+git commit lib/ve -m "$COMMITMSG" > /dev/null
+if [ "$?" == "1" ]
+then
+	echo >&2 "No changes"
+else
+	cat >&2 <<END
+
+
+Created commit with changes:
+$NEWCHANGESDISPLAY
+END
+fi

+ 41 - 0
sslwp/bak/www/mediawiki/extensions/VisualEditor/lib/ve/bin/sync-gh-pages.sh

@@ -0,0 +1,41 @@
+#!/bin/bash -eu
+
+# This script builds a new gh-pages branch from latest master
+
+cd "$(dirname $0)/.."
+git fetch origin
+git checkout -B gh-pages origin/master
+git reset --hard origin/master
+
+git clean -dffx
+# Run npm-install to fetch qunitjs and build dist/
+npm install
+
+html='<!DOCTYPE html>
+<meta charset="utf-8">
+<title>VisualEditor</title>
+<link rel=stylesheet href="lib/oojs-ui/oojs-ui-apex.css">
+<link rel=stylesheet href="demos/ve/demo.css">
+<style>
+	article {
+		margin: 1em auto;
+		width: 45em;
+		max-width: 80%;
+		text-align: center;
+	}
+	article img {
+		max-width: 100%;
+	}
+</style>
+<article>
+	<img src="demos/ve/VisualEditor-logo.svg" alt="VisualEditor logo">
+	<div class="oo-ui-widget oo-ui-widget-enabled oo-ui-buttonElement oo-ui-buttonElement-framed oo-ui-labelElement oo-ui-buttonWidget"><a role="button" href="demos/ve/desktop-dist.html" tabindex="0" class="oo-ui-buttonElement-button"><span class="oo-ui-labelElement-label">Demo</span></a></div></a>
+	<div class="oo-ui-widget oo-ui-widget-enabled oo-ui-buttonElement oo-ui-buttonElement-framed oo-ui-labelElement oo-ui-buttonWidget"><a role="button" href="tests/" tabindex="0" class="oo-ui-buttonElement-button"><span class="oo-ui-labelElement-label">Test suite</span></a></div>
+</article>'
+echo "$html" > index.html
+
+git add index.html
+git add -f node_modules/qunitjs dist/
+
+git commit -m "Create gh-pages branch"
+git push origin -f HEAD

+ 63 - 0
sslwp/bak/www/mediawiki/extensions/VisualEditor/lib/ve/bin/update-oojs-ui.sh

@@ -0,0 +1,63 @@
+#!/bin/bash -eu
+
+# This script generates a commit that updates our copy of OOjs UI
+
+if [ -n "${2:-}" ]
+then
+	# Too many parameters
+	echo >&2 "Usage: $0 [<version>]"
+	exit 1
+fi
+
+REPO_DIR=$(cd "$(dirname $0)/.."; pwd) # Root dir of the git repo working tree
+TARGET_DIR="lib/oojs-ui" # Destination relative to the root of the repo
+NPM_DIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'update-oojs-ui') # e.g. /tmp/update-oojs-ui.rI0I5Vir
+
+# Prepare working tree
+cd "$REPO_DIR"
+git reset -- $TARGET_DIR
+git checkout -- $TARGET_DIR
+git fetch origin
+git checkout -B upstream-oojs-ui origin/master
+
+# Fetch upstream version
+cd $NPM_DIR
+if [ -n "${1:-}" ]
+then
+	npm install "oojs-ui@$1"
+else
+	npm install oojs-ui
+fi
+
+OOJSUI_VERSION=$(node -e 'console.log(require("./node_modules/oojs-ui/package.json").version);')
+if [ "$OOJSUI_VERSION" == "" ]
+then
+	echo 'Could not find OOjs UI version'
+	exit 1
+fi
+
+# Copy files
+# - Exclude the minimised distribution files
+# - Support: IE9
+#   VE requires SVG support, but IE9 doesn't support the CSS background fallback
+#   so ends up using the PNGs. Otherwise they would not be required.
+rsync --force --recursive --delete --exclude 'oojs-ui*.min.*' --exclude 'oojs-ui.js' ./node_modules/oojs-ui/dist/ "$REPO_DIR/$TARGET_DIR"
+
+# Clean up temporary area
+rm -rf "$NPM_DIR"
+
+# Generate commit
+cd $REPO_DIR
+
+COMMITMSG=$(cat <<END
+Update OOjs UI to v$OOJSUI_VERSION
+
+Release notes:
+ https://git.wikimedia.org/blob/oojs%2Fui.git/v$OOJSUI_VERSION/History.md
+END
+)
+
+# Stage deletion, modification and creation of files. Then commit.
+git add --update $TARGET_DIR
+git add $TARGET_DIR
+git commit -m "$COMMITMSG"

+ 59 - 0
sslwp/bak/www/mediawiki/extensions/VisualEditor/lib/ve/bin/update-oojs.sh

@@ -0,0 +1,59 @@
+#!/bin/bash -eu
+
+# This script generates a commit that updates our copy of OOjs
+
+if [ -n "${2:-}" ]
+then
+	# Too many parameters
+	echo >&2 "Usage: $0 [<version>]"
+	exit 1
+fi
+
+REPO_DIR=$(cd "$(dirname $0)/.."; pwd) # Root dir of the git repo working tree
+TARGET_DIR="lib/oojs" # Destination relative to the root of the repo
+NPM_DIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'update-oojs') # e.g. /tmp/update-oojs.rI0I5Vir
+
+# Prepare working tree
+cd "$REPO_DIR"
+git reset -- $TARGET_DIR
+git checkout -- $TARGET_DIR
+git fetch origin
+git checkout -B upstream-oojs origin/master
+
+# Fetch upstream version
+cd $NPM_DIR
+if [ -n "${1:-}" ]
+then
+	npm install "oojs@$1"
+else
+	npm install oojs
+fi
+
+OOJS_VERSION=$(node -e 'console.log(require("./node_modules/oojs/package.json").version);')
+if [ "$OOJS_VERSION" == "" ]
+then
+	echo 'Could not find OOjs version'
+	exit 1
+fi
+
+# Copy file(s)
+rsync --force ./node_modules/oojs/dist/oojs.jquery.js "$REPO_DIR/$TARGET_DIR"
+
+# Clean up temporary area
+rm -rf "$NPM_DIR"
+
+# Generate commit
+cd $REPO_DIR
+
+COMMITMSG=$(cat <<END
+Update OOjs to v$OOJS_VERSION
+
+Release notes:
+ https://git.wikimedia.org/blob/oojs%2Fcore.git/v$OOJS_VERSION/History.md
+END
+)
+
+# Stage deletion, modification and creation of files. Then commit.
+git add --update $TARGET_DIR
+git add $TARGET_DIR
+git commit -m "$COMMITMSG"

+ 59 - 0
sslwp/bak/www/mediawiki/extensions/VisualEditor/lib/ve/bin/update-unicodejs.sh

@@ -0,0 +1,59 @@
+#!/bin/bash -eu
+
+# This script generates a commit that updates our copy of UnicodeJS
+
+if [ -n "${2:-}" ]
+then
+	# Too many parameters
+	echo >&2 "Usage: $0 [<version>]"
+	exit 1
+fi
+
+REPO_DIR=$(cd "$(dirname $0)/.."; pwd) # Root dir of the git repo working tree
+TARGET_DIR="lib/unicodejs" # Destination relative to the root of the repo
+NPM_DIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'update-unicodejs') # e.g. /tmp/update-unicodejs.rI0I5Vir
+
+# Prepare working tree
+cd "$REPO_DIR"
+git reset -- $TARGET_DIR
+git checkout -- $TARGET_DIR
+git fetch origin
+git checkout -B upstream-unicodejs origin/master
+
+# Fetch upstream version
+cd $NPM_DIR
+if [ -n "${1:-}" ]
+then
+	npm install "unicodejs@$1"
+else
+	npm install unicodejs
+fi
+
+UNICODEJS_VERSION=$(node -e 'console.log(require("./node_modules/unicodejs/package.json").version);')
+if [ "$UNICODEJS_VERSION" == "" ]
+then
+	echo 'Could not find UnicodeJS version'
+	exit 1
+fi
+
+# Copy file(s)
+rsync --force ./node_modules/unicodejs/dist/unicodejs.js "$REPO_DIR/$TARGET_DIR"
+
+# Clean up temporary area
+rm -rf "$NPM_DIR"
+
+# Generate commit
+cd $REPO_DIR
+
+COMMITMSG=$(cat <<END
+Update UnicodeJS to v$UNICODEJS_VERSION
+
+Release notes:
+ https://git.wikimedia.org/blob/unicodejs.git/v$UNICODEJS_VERSION/History.md
+END
+)
+
+# Stage deletion, modification and creation of files. Then commit.
+git add --update $TARGET_DIR
+git add $TARGET_DIR
+git commit -m "$COMMITMSG"

+ 122 - 0
sslwp/bak/www/mediawiki/includes/limit.sh

@@ -0,0 +1,122 @@
+#!/bin/bash
+#
+# Resource limiting wrapper for command execution
+#
+# Why is this in shell script? Because bash has a setrlimit() wrapper
+# and is available on most Linux systems. If Perl was distributed with
+# BSD::Resource included, we would happily use that instead, but it isn't.
+
+# Clean up cgroup
+cleanup() {
+	# First we have to move the current task into a "garbage" group, otherwise
+	# the cgroup will not be empty, and attempting to remove it will fail with
+	# "Device or resource busy"
+	if [ -w "$MW_CGROUP"/tasks ]; then
+		GARBAGE="$MW_CGROUP"
+	else
+		GARBAGE="$MW_CGROUP"/garbage-`id -un`
+		if [ ! -e "$GARBAGE" ]; then
+			mkdir -m 0700 "$GARBAGE"
+		fi
+	fi
+	echo $BASHPID > "$GARBAGE"/tasks
+
+	# Suppress errors in case the cgroup has disappeared due to a release script
+	rmdir "$MW_CGROUP"/$$ 2>/dev/null
+}
+
+updateTaskCount() {
+	# There are lots of ways to count lines in a file in shell script, but this
+	# is one of the few that doesn't create another process, which would
+	# increase the returned number of tasks.
+	readarray < "$MW_CGROUP"/$$/tasks
+	NUM_TASKS=${#MAPFILE[*]}
+}
+
+log() {
+	echo limit.sh: "$*" >&3
+	echo limit.sh: "$*" >&2
+}
+
+MW_INCLUDE_STDERR=
+MW_USE_LOG_PIPE=
+MW_CPU_LIMIT=0
+MW_CGROUP=
+MW_MEM_LIMIT=0
+MW_FILE_SIZE_LIMIT=0
+MW_WALL_CLOCK_LIMIT=0
+
+# Override settings
+eval "$2"
+
+if [ -n "$MW_INCLUDE_STDERR" ]; then
+	exec 2>&1
+fi
+if [ -z "$MW_USE_LOG_PIPE" ]; then
+	# Open a dummy log FD
+	exec 3>/dev/null
+fi
+
+if [ "$MW_CPU_LIMIT" -gt 0 ]; then
+	ulimit -t "$MW_CPU_LIMIT"
+fi
+if [ "$MW_MEM_LIMIT" -gt 0 ]; then
+	if [ -n "$MW_CGROUP" ]; then
+		# Create cgroup
+		if ! mkdir -m 0700 "$MW_CGROUP"/$$; then
+			log "failed to create the cgroup."
+			MW_CGROUP=""
+		fi
+	fi
+	if [ -n "$MW_CGROUP" ]; then
+		echo $$ > "$MW_CGROUP"/$$/tasks
+		if [ -n "$MW_CGROUP_NOTIFY" ]; then
+			echo "1" > "$MW_CGROUP"/$$/notify_on_release
+		fi
+		# Memory
+		echo $(($MW_MEM_LIMIT*1024)) > "$MW_CGROUP"/$$/memory.limit_in_bytes
+		# Memory+swap
+		# This will be missing if there is no swap
+		if [ -e "$MW_CGROUP"/$$/memory.memsw.limit_in_bytes ]; then
+			echo $(($MW_MEM_LIMIT*1024)) > "$MW_CGROUP"/$$/memory.memsw.limit_in_bytes
+		fi
+	else
+		ulimit -v "$MW_MEM_LIMIT"
+	fi
+else
+	MW_CGROUP=""
+fi
+if [ "$MW_FILE_SIZE_LIMIT" -gt 0 ]; then
+	ulimit -f "$MW_FILE_SIZE_LIMIT"
+fi
+if [ "$MW_WALL_CLOCK_LIMIT" -gt 0 -a -x "/usr/bin/timeout" ]; then
+	/usr/bin/timeout $MW_WALL_CLOCK_LIMIT /bin/bash -c "$1" 3>&-
+	STATUS="$?"
+	if [ "$STATUS" == 124 ]; then
+		log "timed out executing command \"$1\""
+	fi
+else
+	eval "$1" 3>&-
+	STATUS="$?"
+fi
+
+if [ -n "$MW_CGROUP" ]; then
+	updateTaskCount
+
+	if [ $NUM_TASKS -gt 1 ]; then
+		# Spawn a monitor process which will continue to poll for completion
+		# of all processes in the cgroup after termination of the parent shell
+		(
+			while [ $NUM_TASKS -gt 1 ]; do
+				sleep 10
+				updateTaskCount
+			done
+			cleanup
+		) >&/dev/null < /dev/null 3>&- &
+		disown -a
+	else
+		cleanup
+	fi
+fi
+exit "$STATUS"
+

+ 14 - 0
sslwp/bak/www/mediawiki/maintenance/dev/includes/php.sh

@@ -0,0 +1,14 @@
+# Include-able script to determine the location of our php if any
+# We search for a environment var called PHP, native php,
+# a local copy, home directory location used by installphp.sh
+# and previous home directory location
+# The binary path is returned in $PHP if any
+
+for binary in $PHP $(which php || true) "$DEV/php/bin/php" "$HOME/.mediawiki/php/bin/php" "$HOME/.mwphp/bin/php" ]; do
+	if [ -x "$binary" ]; then
+		if "$binary" -r 'exit((int)!version_compare(PHP_VERSION, "5.4", ">="));'; then
+			PHP="$binary"
+			break
+		fi
+	fi
+done

+ 8 - 0
sslwp/bak/www/mediawiki/maintenance/dev/includes/require-php.sh

@@ -0,0 +1,8 @@
+# Include-able script to require that we have a known php binary we can execute
+
+. "$DEV/includes/php.sh"
+
+if [ "x$PHP" == "x" -o ! -x "$PHP" ]; then
+	echo "Local copy of PHP is not installed"
+	exit 1
+fi

+ 8 - 0
sslwp/bak/www/mediawiki/maintenance/dev/install.sh

@@ -0,0 +1,8 @@
+#!/bin/bash
+
+if [ "x$BASH_SOURCE" == "x" ]; then echo '$BASH_SOURCE not set'; exit 1; fi
+DEV=$(cd -P "$(dirname "${BASH_SOURCE[0]}" )" && pwd)
+
+"$DEV/installphp.sh"
+"$DEV/installmw.sh"
+"$DEV/start.sh"

+ 18 - 0
sslwp/bak/www/mediawiki/maintenance/dev/installmw.sh

@@ -0,0 +1,18 @@
+#!/bin/bash
+
+if [ "x$BASH_SOURCE" == "x" ]; then echo '$BASH_SOURCE not set'; exit 1; fi
+DEV=$(cd -P "$(dirname "${BASH_SOURCE[0]}" )" && pwd)
+
+. "$DEV/includes/require-php.sh"
+
+set -e
+
+PORT=4881
+
+cd "$DEV/../../"; # $IP
+
+mkdir -p "$DEV/data"
+"$PHP" maintenance/install.php --server="http://localhost:$PORT" --scriptpath="" --dbtype=sqlite --dbpath="$DEV/data" --pass=admin "Trunk Test" "$USER"
+echo ""
+echo "Development wiki created with admin user $USER and password 'admin'."
+echo ""

+ 57 - 0
sslwp/bak/www/mediawiki/maintenance/dev/installphp.sh

@@ -0,0 +1,57 @@
+#!/bin/bash
+
+if [ "x$BASH_SOURCE" == "x" ]; then echo '$BASH_SOURCE not set'; exit 1; fi
+DEV=$(cd -P "$(dirname "${BASH_SOURCE[0]}" )" && pwd)
+
+set -e # DO NOT USE PIPES unless this is rewritten
+
+. "$DEV/includes/php.sh"
+
+if [ "x$PHP" != "x" -a -x "$PHP" ]; then
+	echo "PHP is already installed"
+	exit 0
+fi
+
+TAR=php5.4-latest.tar.gz
+PHPURL="http://snaps.php.net/$TAR"
+
+cd "$DEV"
+
+echo "Preparing to download and install a local copy of PHP 5.4, note that this can take some time to do."
+echo "If you wish to avoid re-doing this for uture dev installations of MediaWiki we suggest installing php in ~/.mediawiki/php"
+echo -n "Install PHP in ~/.mediawiki/php [y/N]: "
+read INSTALLINHOME
+
+case "$INSTALLINHOME" in
+	[Yy] | [Yy][Ee][Ss] )
+		PREFIX="$HOME/.mediawiki/php"
+		;;
+	*)
+		PREFIX="$DEV/php/"
+		;;
+esac
+
+# Some debain-like systems bundle wget but not curl, some other systems
+# like os x bundle curl but not wget... use whatever is available
+echo -n "Downloading PHP 5.4"
+if command -v wget &>/dev/null; then
+	echo "- using wget"
+	wget "$PHPURL"
+elif command -v curl &>/dev/null; then
+	echo "- using curl"
+	curl -O "$PHPURL"
+else
+	echo "- aborting"
+	echo "Could not find curl or wget." >&2;
+	exit 1;
+fi
+
+echo "Extracting php 5.4"
+tar -xzf "$TAR"
+
+cd php5.4-*/
+
+echo "Configuring and installing php 5.4 in $PREFIX"
+./configure --prefix="$PREFIX"
+make
+make install

+ 14 - 0
sslwp/bak/www/mediawiki/maintenance/dev/start.sh

@@ -0,0 +1,14 @@
+#!/bin/bash
+
+if [ "x$BASH_SOURCE" == "x" ]; then echo '$BASH_SOURCE not set'; exit 1; fi
+DEV=$(cd -P "$(dirname "${BASH_SOURCE[0]}" )" && pwd)
+
+. "$DEV/includes/require-php.sh"
+
+PORT=4881
+
+echo "Starting up MediaWiki at http://localhost:$PORT/"
+echo ""
+
+cd "$DEV/../../"; # $IP
+"$PHP" -S "localhost:$PORT" "$DEV/includes/router.php"

+ 84 - 0
sslwp/bak/www/mediawiki/maintenance/resources/update-oojs-ui.sh

@@ -0,0 +1,84 @@
+#!/bin/bash -eu
+
+# This script generates a commit that updates our copy of OOjs UI
+
+if [ -n "${2:-}" ]
+then
+	# Too many parameters
+	echo >&2 "Usage: $0 [<version>]"
+	exit 1
+fi
+
+REPO_DIR=$(cd "$(dirname $0)/../.."; pwd) # Root dir of the git repo working tree
+TARGET_DIR="resources/lib/oojs-ui" # Destination relative to the root of the repo
+NPM_DIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'update-oojs-ui') # e.g. /tmp/update-oojs-ui.rI0I5Vir
+
+# Prepare working tree
+cd "$REPO_DIR"
+git reset composer.json
+git checkout composer.json
+git reset -- $TARGET_DIR
+git checkout -- $TARGET_DIR
+git fetch origin
+git checkout -B upstream-oojs-ui origin/master
+
+# Fetch upstream version
+cd $NPM_DIR
+if [ -n "${1:-}" ]
+then
+	npm install "oojs-ui@$1"
+else
+	npm install oojs-ui
+fi
+
+OOJSUI_VERSION=$(node -e 'console.log(require("./node_modules/oojs-ui/package.json").version);')
+if [ "$OOJSUI_VERSION" == "" ]
+then
+	echo 'Could not find OOjs UI version'
+	exit 1
+fi
+
+# Copy files, picking the necessary ones from source and distribution
+rm -r "$REPO_DIR/$TARGET_DIR"
+mkdir -p "$REPO_DIR/$TARGET_DIR/i18n"
+mkdir -p "$REPO_DIR/$TARGET_DIR/images"
+mkdir -p "$REPO_DIR/$TARGET_DIR/themes/mediawiki/images"
+mkdir -p "$REPO_DIR/$TARGET_DIR/themes/apex/images"
+cp ./node_modules/oojs-ui/dist/oojs-ui-core.js "$REPO_DIR/$TARGET_DIR"
+cp ./node_modules/oojs-ui/dist/oojs-ui-core-{mediawiki,apex}.css "$REPO_DIR/$TARGET_DIR"
+cp ./node_modules/oojs-ui/dist/oojs-ui-widgets.js "$REPO_DIR/$TARGET_DIR"
+cp ./node_modules/oojs-ui/dist/oojs-ui-widgets-{mediawiki,apex}.css "$REPO_DIR/$TARGET_DIR"
+cp ./node_modules/oojs-ui/dist/oojs-ui-toolbars.js "$REPO_DIR/$TARGET_DIR"
+cp ./node_modules/oojs-ui/dist/oojs-ui-toolbars-{mediawiki,apex}.css "$REPO_DIR/$TARGET_DIR"
+cp ./node_modules/oojs-ui/dist/oojs-ui-windows.js "$REPO_DIR/$TARGET_DIR"
+cp ./node_modules/oojs-ui/dist/oojs-ui-windows-{mediawiki,apex}.css "$REPO_DIR/$TARGET_DIR"
+cp ./node_modules/oojs-ui/dist/oojs-ui-{mediawiki,apex}.js "$REPO_DIR/$TARGET_DIR"
+cp -R ./node_modules/oojs-ui/dist/i18n "$REPO_DIR/$TARGET_DIR"
+cp -R ./node_modules/oojs-ui/dist/images "$REPO_DIR/$TARGET_DIR"
+cp -R ./node_modules/oojs-ui/dist/themes/mediawiki/images "$REPO_DIR/$TARGET_DIR/themes/mediawiki"
+cp ./node_modules/oojs-ui/src/themes/mediawiki/*.json "$REPO_DIR/$TARGET_DIR/themes/mediawiki"
+cp -R ./node_modules/oojs-ui/dist/themes/apex/images "$REPO_DIR/$TARGET_DIR/themes/apex"
+cp ./node_modules/oojs-ui/src/themes/apex/*.json "$REPO_DIR/$TARGET_DIR/themes/apex"
+
+# Clean up temporary area
+rm -rf "$NPM_DIR"
+
+# Generate commit
+cd $REPO_DIR
+
+COMMITMSG=$(cat <<END
+Update OOjs UI to v$OOJSUI_VERSION
+
+Release notes:
+ https://phabricator.wikimedia.org/diffusion/GOJU/browse/master/History.md;v$OOJSUI_VERSION
+END
+)
+
+# Update composer.json as well
+composer require oojs/oojs-ui $OOJSUI_VERSION --no-update
+
+# Stage deletion, modification and creation of files. Then commit.
+git add --update $TARGET_DIR
+git add $TARGET_DIR
+git add composer.json
+git commit -m "$COMMITMSG"

+ 59 - 0
sslwp/bak/www/mediawiki/maintenance/resources/update-oojs.sh

@@ -0,0 +1,59 @@
+#!/bin/bash -eu
+
+# This script generates a commit that updates our copy of OOjs
+
+if [ -n "${2:-}" ]
+then
+	# Too many parameters
+	echo >&2 "Usage: $0 [<version>]"
+	exit 1
+fi
+
+REPO_DIR=$(cd "$(dirname $0)/../.."; pwd) # Root dir of the git repo working tree
+TARGET_DIR="resources/lib/oojs" # Destination relative to the root of the repo
+NPM_DIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'update-oojs') # e.g. /tmp/update-oojs.rI0I5Vir
+
+# Prepare working tree
+cd "$REPO_DIR"
+git reset -- $TARGET_DIR
+git checkout -- $TARGET_DIR
+git fetch origin
+git checkout -B upstream-oojs origin/master
+
+# Fetch upstream version
+cd $NPM_DIR
+if [ -n "${1:-}" ]
+then
+	npm install "oojs@$1"
+else
+	npm install oojs
+fi
+
+OOJS_VERSION=$(node -e 'console.log(require("./node_modules/oojs/package.json").version);')
+if [ "$OOJS_VERSION" == "" ]
+then
+	echo 'Could not find OOjs version'
+	exit 1
+fi
+
+# Copy file(s)
+rsync --force ./node_modules/oojs/dist/oojs.jquery.js "$REPO_DIR/$TARGET_DIR"
+
+# Clean up temporary area
+rm -rf "$NPM_DIR"
+
+# Generate commit
+cd $REPO_DIR
+
+COMMITMSG=$(cat <<END
+Update OOjs to v$OOJS_VERSION
+
+Release notes:
+ https://phabricator.wikimedia.org/diffusion/GOJS/browse/master/History.md;v$OOJS_VERSION
+END
+)
+
+# Stage deletion, modification and creation of files. Then commit.
+git add --update $TARGET_DIR
+git add $TARGET_DIR
+git commit -m "$COMMITMSG"

+ 38 - 0
sslwp/bak/www/mediawiki/vendor/firebase/php-jwt/run-tests.sh

@@ -0,0 +1,38 @@
+
+#!/usr/bin/env bash
+gpg --fingerprint D8406D0D82947747293778314AA394086372C20A
+if [ $? -ne 0 ]; then
+    echo -e "\033[33mDownloading PGP Public Key...\033[0m"
+    gpg --recv-keys D8406D0D82947747293778314AA394086372C20A
+    # Sebastian Bergmann <sb@sebastian-bergmann.de>
+    gpg --fingerprint D8406D0D82947747293778314AA394086372C20A
+    if [ $? -ne 0 ]; then
+        echo -e "\033[31mCould not download PGP public key for verification\033[0m"
+        exit
+    fi
+fi
+
+# Let's grab the latest release and its signature
+if [ ! -f phpunit.phar ]; then
+    wget https://phar.phpunit.de/phpunit.phar
+fi
+if [ ! -f phpunit.phar.asc ]; then
+    wget https://phar.phpunit.de/phpunit.phar.asc
+fi
+
+# Verify before running
+gpg --verify phpunit.phar.asc phpunit.phar
+if [ $? -eq 0 ]; then
+    echo
+    echo -e "\033[33mBegin Unit Testing\033[0m"
+    # Run the testing suite
+    php --version
+    php phpunit.phar --configuration phpunit.xml.dist
+else
+    echo
+    chmod -x phpunit.phar
+    mv phpunit.phar /tmp/bad-phpunit.phar
+    mv phpunit.phar.asc /tmp/bad-phpunit.phar.asc
+    echo -e "\033[31mSignature did not match! PHPUnit has been moved to /tmp/bad-phpunit.phar\033[0m"
+    exit 1
+fi

+ 3 - 0
sslwp/bak/www/mediawiki/vendor/pear/net_smtp/phpdoc.sh

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+phpdoc -f Net/SMTP.php -t docs/api -p -ti "Net_SMTP Package API" -dn Net_SMTP -dc Net_SMTP -ed examples

+ 5 - 0
sslwp/bak/www/mediawiki/vendor/pear/pear-core-minimal/copy-from-pear-core.sh

@@ -0,0 +1,5 @@
+#!/bin/sh
+cp ../pear-core/OS/Guess.php        src/OS/Guess.php
+cp ../pear-core/PEAR.php            src/PEAR.php
+cp ../pear-core/PEAR/ErrorStack.php src/PEAR/ErrorStack.php
+cp ../pear-core/System.php          src/System.php

+ 63 - 0
sslwp/bak/www/mediawiki/vendor/ruflin/elastica/ansible/provision.sh

@@ -0,0 +1,63 @@
+#!/bin/bash
+
+set -o xtrace
+
+install_ansible() {
+    sudo apt-get update
+    sudo apt-get install python python-pip python-dev -y
+    sudo pip install ansible==1.8.2
+    sudo mkdir -p /etc/ansible/
+    echo "localhost" | sudo tee /etc/ansible/hosts
+}
+
+run_playbook() {
+    # Write to stdout directly
+    export PYTHONUNBUFFERED=1
+
+    # No cows >_<
+    export ANSIBLE_NOCOWS=1
+
+    # Root of git repo
+    if [ -z "$ES_PROJECT_ROOT" ]; then
+        export ES_PROJECT_ROOT="$(dirname $(dirname $(readlink -f $0)))"
+    fi
+
+    if [ ! -x $(which ansible-playbook) ]; then
+        echo "Ansible is not installed"
+        return 1
+    fi
+
+    ansible-playbook $ES_PROJECT_ROOT/ansible/es-playbook.yml -v | tee /tmp/ansible-playbook-progress
+
+    if grep -q "FATAL\|ERROR" /tmp/ansible-playbook-progress; then
+        return 1
+    fi
+}
+
+check_cluster() {
+    curl -m 5 -s -o /dev/null "http://localhost:9200" &&
+    curl -m 5 -s -o /dev/null "http://localhost:9201"
+    return $?
+}
+
+travis_retry() {
+    # We don't use builtin Travis CI function, because this script is also used for vagrant provision.
+    # But main idea of restarts is so simple, so lets override it without name change.
+
+    $@ && return 0
+
+    echo "The command $@ failed. Retrying, 2 of 3"
+    sleep 60s && $@ && return 0
+
+    echo "The command $@ failed. Retrying, 3 of 3"
+    sleep 60s && $@ && return 0
+
+    echo "The command $@ failed."
+    return 1
+}
+
+travis_retry install_ansible || exit 1
+
+travis_retry run_playbook || exit 1
+
+travis_retry check_cluster || exit 1

+ 27 - 0
sslwp/bak/www/mediawiki/vendor/ruflin/elastica/docker-compose.yml

@@ -0,0 +1,27 @@
+elastica:
+  #build: . 
+  image: ruflin/elastica
+  ports:
+    - "9200:9200"
+  links:
+    - nginx
+    - elasticsearch
+  environment:
+    - ES_HOST=elasticsearch
+    - PROXY_HOST=nginx
+elasticsearch:
+  #build: ./env/elasticsearch/ 
+  image: ruflin/elasticsearch-elastica
+  volumes_from:
+    - data
+nginx:
+  #build: ./env/nginx/
+  image: ruflin/nginx-elastica
+  links:
+    - elasticsearch
+# data container to share data between elasticsearch nodes for snapshot testing
+data:
+  image: ruflin/elastica-data
+  volumes:
+    - "/tmp/backups/backup1"
+    - "/tmp/backups/backup2"

+ 78 - 0
sslwp/docker-compose.yml

@@ -0,0 +1,78 @@
+version: '3.1'
+
+services:
+
+  php:
+      image: php:7-fpm
+      container_name: ${CONTAINER_PREFIX}_php
+      
+  web:
+    image: nginx
+    depends_on:
+      - php 
+    restart: always
+    container_name: ${CONTAINER_PREFIX}_web
+    volumes:
+      - ${DATA_ROOT}/conf/nginx.conf:/etc/nginx/conf.d/default.conf
+      - ${DATA_ROOT}/logs:/var/log/nginx
+      - ${DATA_ROOT}/www:/var/www/html
+    ports:
+      #  the router forwards internet traffic from 80 to the host
+      #  machine where "web" is listening on exposed port 8080
+      #  the request will be processed according to the
+      #  configurations in nginx.conf
+      - 8280:80
+
+  https-portal:
+    image: steveltn/https-portal:1
+    container_name: ${CONTAINER_PREFIX}_https-portal
+    ports:
+      #- 8080:80
+      #  the router forwards internet traffic from 443 to the host
+      #  machine where https-portal is listening on 443  
+      - 8080:80
+      - 443:443
+    depends_on:
+      - web
+    restart: always
+    volumes:
+      - ${DATA_ROOT}/ssl_certs:/var/lib/https-portal
+    environment:
+#      DOMAINS: 'dav.algometic.com->http://vsu-koala:80 #local,
+#                www.vortifytech.com-> http://web:80 #production,
+#	        cal.algometic.com->http://vsu-koala:80 #local,
+#	        algometic.com->http://vsu-dolphin:80 #staging'
+#      DOMAINS: '
+#                algometic.com=>www.algometic.com,
+#                www.algometic.com->http://vorsprung:8280 #production,
+#                mango.algometic.com->http://vorsprung:8111 #production,
+#	        ocd.algometic.com->http://vorsprung:1234 #production,
+#	        code.algometic.com->http://vorsprung:9980 #production,
+#	        58985620.algometic.com->http://vorsprung:8055 #production,
+#	        melody.algometic.com->http://vorsprung:9045 #production,
+#	        metrics.algometic.com->http://vorsprung:3000 #production,
+#	        books.algometic.com->http://vorsprung:8783 #production,
+#                syn.algometic.com->http://vorsprung:9081 #production,
+#                slot.algometic.com->http://vorsprung:9981 #production,
+#                slot1234.algometic.com->http://vorsprung:1235 #production,
+#                '
+
+      DOMAINS: '
+                algometic.com=>www.algometic.com,
+                www.algometic.com->http://vorsprung:8280 #production,
+                mango.algometic.com->http://vorsprung:8111 #production,
+	        ocd.algometic.com->http://vorsprung:1235 #production,
+	        code.algometic.com->http://vorsprung:9981 #production,
+	        58985620.algometic.com->http://vorsprung:8055 #production,
+	        melody.algometic.com->http://vorsprung:9045 #production,
+	        metrics.algometic.com->http://vorsprung:3000 #production,
+	        books.algometic.com->http://vorsprung:8783 #production,
+                syn.algometic.com->http://vorsprung:9081 #production,
+                slot.algometic.com->http://tufbunny:3000 #production,
+                '
+
+              
+              
+#        #algometic.com->http://vsu-dolphin:80 #local,
+              
+      CLIENT_MAX_BODY_SIZE: 64M

+ 2 - 0
transmission-gm/.env

@@ -0,0 +1,2 @@
+#DATA_VOLUME_ROOT=/media/laxaurus/wdcrypt/bt-binaries
+DATA_VOLUME_ROOT=/mnt/vd_images/bt-binaries

+ 20 - 0
transmission-gm/config/blocklist-update.sh

@@ -0,0 +1,20 @@
+#!/usr/bin/with-contenv bash
+
+BLOCKLIST_ENABLED=`jq -r '.["blocklist-enabled"]' /config/settings.json`
+BLOCKLIST_URL=`jq -r '.["blocklist-url"]' /config/settings.json | sed 's/\&amp;/\&/g'`
+
+if [ $BLOCKLIST_ENABLED == true ]; then
+	mkdir -p /tmp/blocklists
+	rm -rf /tmp/blocklists/*
+	cd /tmp/blocklists
+	wget -q -O blocklist.gz "$BLOCKLIST_URL"
+	if [ $? == 0 ]; then
+		gunzip *.gz
+		if [ $? == 0 ]; then
+			chmod go+r *
+			rm -rf /config/blocklists/*
+			cp /tmp/blocklists/* /config/blocklists
+			s6-svc -h /var/run/s6/services/transmission
+		fi
+	fi
+fi

+ 34 - 0
transmission-gm/docker-compose.yml

@@ -0,0 +1,34 @@
+version: "2"
+services:
+  transmission:
+    image: linuxserver/transmission
+    container_name: transmission-gm
+    environment:
+# PUID and PGID should be set to the same numbers assigned
+# to the user id on the host running this docker
+# to find out the id, issue the command
+# 	id <laxaurus> for example
+# find what the host user id and group id 
+      - PUID=1000
+      - PGID=1000
+      - TZ=Europe/London
+      - TRANSMISSION_WEB_HOME=/kettu/ #optional
+    volumes:
+      - ./config:/config
+#      - ./downloads:/downloads
+#      - ./watch:/watch
+      - ${DATA_VOLUME_ROOT}/downloads:/downloads
+      - ${DATA_VOLUME_ROOT}/watch:/watch
+    ports:
+      - 9084:9091
+      - 50324:51413
+      - 50324:51413/udp
+   # restart: unless-stopped
+   # restart: always
+    restart: "no"
+    networks: 
+      - webnet
+
+networks: 
+  webnet:
+

+ 1 - 0
transmission-sbt/.env

@@ -0,0 +1 @@
+DATA_VOLUME_ROOT=/media/luks-0a551422-727b-43ac-bd0b-917193b2db77/crm

+ 20 - 0
transmission-sbt/config/blocklist-update.sh

@@ -0,0 +1,20 @@
+#!/usr/bin/with-contenv bash
+
+BLOCKLIST_ENABLED=`jq -r '.["blocklist-enabled"]' /config/settings.json`
+BLOCKLIST_URL=`jq -r '.["blocklist-url"]' /config/settings.json | sed 's/\&amp;/\&/g'`
+
+if [ $BLOCKLIST_ENABLED == true ]; then
+	mkdir -p /tmp/blocklists
+	rm -rf /tmp/blocklists/*
+	cd /tmp/blocklists
+	wget -q -O blocklist.gz "$BLOCKLIST_URL"
+	if [ $? == 0 ]; then
+		gunzip *.gz
+		if [ $? == 0 ]; then
+			chmod go+r *
+			rm -rf /config/blocklists/*
+			cp /tmp/blocklists/* /config/blocklists
+			s6-svc -h /var/run/s6/services/transmission
+		fi
+	fi
+fi

+ 28 - 0
transmission-sbt/docker-compose.yml

@@ -0,0 +1,28 @@
+version: "2"
+services:
+  transmission:
+    image: linuxserver/transmission
+    container_name: transmission-sbt
+    environment:
+      - PUID=1000
+      - PGID=1000
+      - TZ=Europe/London
+#      - TRANSMISSION_WEB_HOME=/combustion-release/ #optional
+    volumes:
+      - ./config:/config
+#      - ./downloads:/downloads
+#      - ./watch:/watch
+      - ${DATA_VOLUME_ROOT}/crm-media-gbt/downloads:/downloads
+      - ${DATA_VOLUME_ROOT}/crm-media-gbt/watch:/watch
+    ports:
+      - 9187:9091
+      - 51366:51413
+      - 51366:51413/udp
+#    restart: unless-stopped
+    restart: "no" 
+    networks: 
+      - webnet
+
+networks: 
+  webnet:
+

+ 1 - 0
transmission/.env

@@ -0,0 +1 @@
+DATA_VOLUME_ROOT=/media/luks-0a551422-727b-43ac-bd0b-917193b2db77/crm

+ 20 - 0
transmission/config/blocklist-update.sh

@@ -0,0 +1,20 @@
+#!/usr/bin/with-contenv bash
+
+BLOCKLIST_ENABLED=`jq -r '.["blocklist-enabled"]' /config/settings.json`
+BLOCKLIST_URL=`jq -r '.["blocklist-url"]' /config/settings.json | sed 's/\&amp;/\&/g'`
+
+if [ $BLOCKLIST_ENABLED == true ]; then
+	mkdir -p /tmp/blocklists
+	rm -rf /tmp/blocklists/*
+	cd /tmp/blocklists
+	wget -q -O blocklist.gz "$BLOCKLIST_URL"
+	if [ $? == 0 ]; then
+		gunzip *.gz
+		if [ $? == 0 ]; then
+			chmod go+r *
+			rm -rf /config/blocklists/*
+			cp /tmp/blocklists/* /config/blocklists
+			s6-svc -h /var/run/s6/services/transmission
+		fi
+	fi
+fi

+ 34 - 0
transmission/docker-compose.yml

@@ -0,0 +1,34 @@
+version: "2"
+services:
+  transmission:
+    image: linuxserver/transmission
+    container_name: transmission
+    environment:
+# PUID and PGID should be set to the same numbers assigned
+# to the user id on the host running this docker
+# to find out the id, issue the command
+# 	id <laxaurus> for example
+# find what the host user id and group id 
+      - PUID=1000
+      - PGID=1000
+      - TZ=Europe/London
+      - TRANSMISSION_WEB_HOME=/combustion-release/ #optional
+    volumes:
+      - ./config:/config
+#      - ./downloads:/downloads
+#      - ./watch:/watch
+      - ${DATA_VOLUME_ROOT}/crm-media-002/movies3/downloads:/downloads
+      - ${DATA_VOLUME_ROOT}/crm-media-002/movies3/watch:/watch
+    ports:
+      - 9081:9091
+      - 51327:51413
+      - 51327:51413/udp
+   # restart: unless-stopped
+   # restart: always
+    restart: "no"
+    networks: 
+      - webnet
+
+networks: 
+  webnet:
+