-
Notifications
You must be signed in to change notification settings - Fork 0
Deployment
This page describes how to deploy Alhena in a production setting.
There are three separate setups involved, so the below will describe our specifications for each setup. You can combine this into one setup.
Loading Setup
- 16GB RAM, 8 CPUs
- python 3, venv, pip
ElasticSearch Setup
- 16GB RAM, 4 CPUs
- 1 mounted disk per node (~1TB each)
- docker and docker-compose
Webserver Setup
- 16GB RAM, 4 CPUs
- docker and docker-compose
- nginx
This setup describes a 3 node Elasticsearch instance (named es01, es02, es03). Adjust this setup as needed.
In a new directory for your docker-compose environment, create a file called instances.yml
with the contents:
instances:
- name: es01
dns:
- es01
- localhost
ip:
- 127.0.0.1
- name: es02
dns:
- es02
- localhost
ip:
- 127.0.0.1
- name: es03
dns:
- es03
- localhost
ip:
- 127.0.0.1
Create another file called .env
with contents (change password as needed):
COMPOSE_PROJECT_NAME=es
CERTS_DIR=/usr/share/elasticsearch/config/certificates
ELASTIC_PASSWORD=PleaseChangeMe
Create create-certs.yml
with contents:
version: '2.2'
services:
create_certs:
container_name: create_certs
image: docker.elastic.co/elasticsearch/elasticsearch:7.9.0
command: >
bash -c '
if [[ ! -f /certs/bundle.zip ]]; then
bin/elasticsearch-certutil cert --silent --pem --in config/certificates/instances.yml -out /certs/bundle.zip;
unzip /certs/bundle.zip -d /certs;
fi;
chown -R 1000:0 /certs
'
user: "0"
working_dir: /usr/share/elasticsearch
volumes: ['certs:/certs', '.:/usr/share/elasticsearch/config/certificates']
volumes: {"certs"}
Lastly, create a docker-compose.yml
file with contents (making sure you edit places with !!!
accordingly):
version: "3"
services:
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:<!!! version>
container_name: es01
environment:
- node.name=es01
- discovery.seed_hosts=es02,es03
- cluster.initial_master_nodes=es01,es02,es03
- cluster.name=docker-cluster
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
- search.max_buckets=50000
- http.max_content_length=500mb
- "path.repo=/usr/share/elasticsearch/backup"
- ELASTIC_PASSWORD=$ELASTIC_PASSWORD
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=$CERTS_DIR/es01/es01.key
- xpack.security.http.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.http.ssl.certificate=$CERTS_DIR/es01/es01.crt
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es01/es01.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es01/es01.key
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- <!!! path of one disk>:/usr/share/elasticsearch/data
- <!!! path of backup disk>:/usr/share/elasticsearch/backup
- certs:$CERTS_DIR
ports:
- 9200:9200
healthcheck:
test: curl --cacert $CERTS_DIR/ca/ca.crt -s https://localhost:9200 >/dev/null; if [[ $$? == 52 ]]; then echo 0; else echo 1; fi
interval: 30s
timeout: 10s
retries: 5
es02:
image: docker.elastic.co/elasticsearch/elasticsearch:<!!! version>
container_name: es02
environment:
- node.name=es02
- discovery.seed_hosts=es01,es03
- cluster.initial_master_nodes=es01,es02,es03
- cluster.name=docker-cluster
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
- search.max_buckets=50000
- http.max_content_length=500mb
- "path.repo=/usr/share/elasticsearch/backup"
- ELASTIC_PASSWORD=$ELASTIC_PASSWORD
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=$CERTS_DIR/es02/es02.key
- xpack.security.http.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.http.ssl.certificate=$CERTS_DIR/es02/es02.crt
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es02/es02.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es02/es02.key
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- <!!! path of second disk>:/usr/share/elasticsearch/data
- <!!! path of backup disk>:/usr/share/elasticsearch/backup
- certs:$CERTS_DIR
es03:
image: docker.elastic.co/elasticsearch/elasticsearch:<!!! version>
container_name: es03
environment:
- node.name=es03
- discovery.seed_hosts=es01,es02
- cluster.initial_master_nodes=es01,es02,es03
- cluster.name=docker-cluster
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
- search.max_buckets=50000
- http.max_content_length=500mb
- "path.repo=/usr/share/elasticsearch/backup"
- ELASTIC_PASSWORD=$ELASTIC_PASSWORD
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=$CERTS_DIR/es03/es03.key
- xpack.security.http.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.http.ssl.certificate=$CERTS_DIR/es03/es03.crt
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es03/es03.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es03/es03.key
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- <!!! path of third disk>:/usr/share/elasticsearch/data
- <!!! path of backup disk>:/usr/share/elasticsearch/backup
- certs:$CERTS_DIR
volumes:
certs:
Now we generate the certificates
docker-compose -f create-certs.yml run --rm create_certs
Increase max_map_count
sysctl -w vm.max_map_count=262144
Start the Elasticsearch instance
docker-compose up -d
Curling the localhost URL with your credentials should return an introductory response:
curl -k -u elastic:PleaseChangeMe -X GET -k https://localhost:9200
Response:
{
"name" : "es01",
"cluster_name" : "docker-cluster",
"cluster_uuid" : "7KG9Po4hRQ-5GglViidmnQ",
"version" : {
"number" : "7.9.0",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "a479a2a7fce0389512d6a9361301708b92dff667",
"build_date" : "2020-08-11T21:36:48.204330Z",
"build_snapshot" : false,
"lucene_version" : "8.6.0",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
Setup virtual environment in the loader server
python3 -m venv <!!! /path/to/new/virtual/environment>
source <!!! /path/to/new/virtual/environment>/bin/activate
Install the loading code
pip install -e git://github.com/shahcompbio/alhenaloader.git#egg=alhenaloader
Add these variables into your bash_profile (see Elasticsearch setup for values)
export ALHENA_ES_USER=elastic
export ALHENA_ES_PASSWORD=PleaseChangeMe
Run the initialization call - this will set up the default project (DLP) and any initial indices
alhenaloader initialize
Now you should be able to load the test data
alhenaloader --id TEST_123 load <path to test data>
There are two parts too this setup - one is to actually get app (both graphQL and React) running inside the server using docker. The next is to connect the exposed port (localhost:5020, in this case) with the public URL using nginx.
First we will build the graphQL layer:
Checkout the code
git clone https://github.com/shahcompbio/alhena-graphql
Build the image:
docker build . -t alhena-graphql
Second, we will build the React layer:
Checkout the code
git clone https://github.com/shahcompbio/alhena
In that directory, add an .env
file with the following contents (change where necessary):
REACT_APP_BASENAME="/alhena"
PUBLIC_URL="https://<!!! URL>/alhena"
Build docker image:
docker build . -t alhena-react --build-arg BUILD_FILE=".env"
Lastly, we will setup the docker containers.
In a new directory, add the docker-compose.yml
file
version: "3"
services:
graphql:
container_name: alhena-graphql
image: alhena-graphql
env_file:
- graphql.env
frontend:
container_name: alhena-react
image: alhena-react
ports:
- "5020:80"
depends_on:
- graphql
redis:
image: redis
container_name: redis
ports:
- "6379:6379"
expose:
- 6379
volumes:
- /mnt/redis:/data
In the same directory, add a graphql.env file (edit !!!
where appropriate)
REDIS_HOST=redis
REDIS_PORT=6379
ES_USER=elastic
ES_PASSWORD=<!!! Password for ES>
ELASTICSEARCH_NODE=https://<!!! URL>/alhena/db
REACT_APP_BASENAME=alhena
SERVER_NAME=<!!! URL>
You should be able to start the docker containers now:
docker-compose up -d
We setup our webserver with nginx. Here are instructions on how to install that on your system.
https://ubuntu.com/tutorials/install-and-configure-nginx#2-installing-nginx
Here, we will connect the URL's port 443 to the app. In our production instances, we do that by adding a new file in /etc/nginx/conf.d/
called default.conf, but you may already have the server block specified in a different file, so adjust as needed.
upstream alhena-db {
server <!!! internal IP to ES>:9200;
keepalive 15;
}
upstream alhena {
server localhost:5020;
keepalive 15;
}
server {
listen 80;
listen [::]:80;
server_name <!!! URL>;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name $hostname;
add_header Strict-Transport-Security "max-age=31536000; includeSubdomains; preload";
ssl_certificate /etc/ssl/certs/nginx-selfsigned.crt;
ssl_certificate_key /etc/ssl/private/nginx-selfsigned.key;
#include snippets/ssl-params.conf;
gzip on;
gzip_vary on;
gzip_types text/plain text/css text/javascript application/javascript application/x-javascript;
client_max_body_size 2M;
location /alhena/db/ {
rewrite ^/alhena/db/(.*)$ /$1 break;
proxy_pass http://alhena-db;
}
location /alhena/ {
proxy_pass http://alhena/;
}
location ~* \.(eot|ttf|woff|woff2)$ {
add_header Access-Control-Allow-Origin *;
}
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
}
After you make any changes to these files you will need to restart the service to apply those changes. Depending on what system you have that command might be something like "sudo service nginx restart" and then "sudo service nginx status" to make sure it's running.
The final step involves creating certificates for your domain or IP. If you plan on using an IP, you'll need to create self signed certificate using Openssl and, more commonly, if you plan on using a domain you'll need to go through the instructions listed here https://certbot.eff.org/. The bot will install the certificates and alter your nginx file. Restart your nginx server.
Server errors
- Are your http/https ports are open?
- Are you the root user? Many of these commands need the user to be able to sudo.
Nginx issues
- Have you checked the error logs? (tail -30 /var/log/nginx/error.log)
- Have you tried checking the status of nginx to pinpoint an issue in the nginx conf file? (service ningx status)
Docker issues
- Are all containers started?
- Are all containers running?
- Have you tried pulling in new images?
- Is the graphql password set correctly in your graphql.env file?
- Are all the variables set correctly in the env files?
- Have you checked the docker logs?
- Are the es containers still starting up?
- Have you tried re-starting containers?
- Do your ES docker containers have access to your persistent data folders?