From 17d7c46afb0fcbe93adc34aedd2fd7ca38c4ea9b Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Fri, 18 Sep 2020 13:43:14 -0700 Subject: [PATCH] Initial commit --- .github/workflows/main.yml | 28 ++ .gitignore | 112 +++++ Dockerfile.oss | 53 +++ Dockerfile.plus | 61 +++ LICENSE.txt | 177 ++++++++ README.md | 105 +++++ .../00-check-for-required-env.sh | 83 ++++ common/etc/nginx/include/s3gateway.js | 400 ++++++++++++++++++ common/etc/nginx/nginx.conf | 52 +++ .../etc/nginx/templates/default.conf.template | 113 +++++ .../gateway/v2_headers.conf.template | 3 + .../gateway/v2_js_vars.conf.template | 4 + .../gateway/v4_headers.conf.template | 7 + .../gateway/v4_js_vars.conf.template | 4 + .../nginx/templates/upstreams.conf.template | 8 + .../10-listen-on-ipv6-by-default.sh | 77 ++++ .../20-envsubst-on-templates.sh | 48 +++ plus/docker-entrypoint.sh | 54 +++ .../nginx/templates/upstreams.conf.template | 16 + plus/etc/ssl/nginx/.gitignore | 2 + plus/usr/local/bin/add_nginx_plus_repo.sh | 53 +++ settings.example | 9 + test.sh | 201 +++++++++ test/data/bucket-1/a.txt | 1 + test/data/bucket-1/b/c/= | 2 + test/data/bucket-1/b/c/@ | 0 test/data/bucket-1/b/c/d.txt | 1 + test/data/bucket-1/b/e.txt | 1 + ...6\343\203\204\343\203\226\343\203\204.txt" | 1 + test/docker-compose.yaml | 45 ++ test/integration/test_api.sh | 115 +++++ test/unit/s3gateway_test.js | 154 +++++++ 32 files changed, 1990 insertions(+) create mode 100644 .github/workflows/main.yml create mode 100644 .gitignore create mode 100644 Dockerfile.oss create mode 100644 Dockerfile.plus create mode 100644 LICENSE.txt create mode 100644 README.md create mode 100644 common/docker-entrypoint.d/00-check-for-required-env.sh create mode 100644 common/etc/nginx/include/s3gateway.js create mode 100644 common/etc/nginx/nginx.conf create mode 100644 common/etc/nginx/templates/default.conf.template create mode 100644 common/etc/nginx/templates/gateway/v2_headers.conf.template create mode 100644 common/etc/nginx/templates/gateway/v2_js_vars.conf.template create mode 100644 common/etc/nginx/templates/gateway/v4_headers.conf.template create mode 100644 common/etc/nginx/templates/gateway/v4_js_vars.conf.template create mode 100644 oss/etc/nginx/templates/upstreams.conf.template create mode 100644 plus/docker-entrypoint.d/10-listen-on-ipv6-by-default.sh create mode 100644 plus/docker-entrypoint.d/20-envsubst-on-templates.sh create mode 100644 plus/docker-entrypoint.sh create mode 100644 plus/etc/nginx/templates/upstreams.conf.template create mode 100644 plus/etc/ssl/nginx/.gitignore create mode 100644 plus/usr/local/bin/add_nginx_plus_repo.sh create mode 100644 settings.example create mode 100755 test.sh create mode 100644 test/data/bucket-1/a.txt create mode 100644 test/data/bucket-1/b/c/= create mode 100644 test/data/bucket-1/b/c/@ create mode 100644 test/data/bucket-1/b/c/d.txt create mode 100644 test/data/bucket-1/b/e.txt create mode 100644 "test/data/bucket-1/b/\343\203\226\343\203\204\343\203\226\343\203\204.txt" create mode 100644 test/docker-compose.yaml create mode 100644 test/integration/test_api.sh create mode 100755 test/unit/s3gateway_test.js diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 00000000..6fb786c4 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,28 @@ +name: CI + +# Controls when the action will run. Triggers the workflow on push or pull request +# events but only for the master branch +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + build: + runs-on: ubuntu-latest + + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v2 + + - name: Configure Github Package Registry + run: echo ${{ secrets.GITHUB_TOKEN }} | docker login docker.pkg.github.com -u $GITHUB_ACTOR --password-stdin + - name: Install dependencies + run: sudo apt-get install -y wait-for-it + - name: Run tests + run: ./test.sh oss + - name: Push container image + run: docker tag nginx-s3-gateway docker.pkg.github.com/$GITHUB_REPOSITORY/nginx-oss-s3-gateway && docker push docker.pkg.github.com/$GITHUB_REPOSITORY/nginx-oss-s3-gateway + diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..731c0b03 --- /dev/null +++ b/.gitignore @@ -0,0 +1,112 @@ +# Created by https://www.toptal.com/developers/gitignore/api/intellij +# Edit at https://www.toptal.com/developers/gitignore?templates=intellij + +### Intellij ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### Intellij Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +*.iml +modules.xml +.idea/misc.xml +*.ipr + +# Sonarlint plugin +# https://plugins.jetbrains.com/plugin/7973-sonarlint +.idea/**/sonarlint/ + +# SonarQube Plugin +# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin +.idea/**/sonarIssues.xml + +# Markdown Navigator plugin +# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced +.idea/**/markdown-navigator.xml +.idea/**/markdown-navigator-enh.xml +.idea/**/markdown-navigator/ + +# Cache file creation bug +# See https://youtrack.jetbrains.com/issue/JBR-2257 +.idea/$CACHE_FILE$ + +# CodeStream plugin +# https://plugins.jetbrains.com/plugin/12206-codestream +.idea/codestream.xml + +# End of https://www.toptal.com/developers/gitignore/api/intellij + +# Test data files +test-settings.* +s3-requests.http +httpRequests/ diff --git a/Dockerfile.oss b/Dockerfile.oss new file mode 100644 index 00000000..3bf51f0e --- /dev/null +++ b/Dockerfile.oss @@ -0,0 +1,53 @@ +FROM nginx:1.19.2 + +ENV NGINX_VERSION "1.19.2" +ENV HEADERS_MORE_VERSION "v0.33" + +# We modify the nginx base image by: +# 1. Installing the headers-more module +# 2. Adding configuration files needed for proxying private S3 buckets +# 3. Adding a directory for proxied objects to be stored + +RUN set -eux \ + export DEBIAN_FRONTEND=noninteractive; \ + apt-get update -qq; \ + apt-get install -y -qq build-essential libpcre3-dev git; \ + curl -o /tmp/nginx.tar.gz --retry 6 -Ls "http://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz"; \ + mkdir /tmp/nginx /tmp/headers-more; \ + tar -C /tmp/nginx --strip-components 1 -xzf /tmp/nginx.tar.gz; \ + curl -o /tmp/headers-more.tar.gz --retry 6 -Ls "https://github.com/openresty/headers-more-nginx-module/archive/${HEADERS_MORE_VERSION}.tar.gz"; \ + tar -C "/tmp/headers-more" --strip-components 1 -xzf /tmp/headers-more.tar.gz; \ + cd /tmp/nginx; \ + ./configure --add-dynamic-module=/tmp/headers-more \ + --without-http_gzip_module \ + --prefix=/etc/nginx \ + --sbin-path=/usr/sbin/nginx \ + --modules-path=/usr/lib/nginx/modules \ + --conf-path=/etc/nginx/nginx.conf \ + --error-log-path=/var/log/nginx/error.log \ + --http-log-path=/var/log/nginx/access.log \ + --pid-path=/var/run/nginx.pid \ + --lock-path=/var/run/nginx.lock \ + --http-client-body-temp-path=/var/cache/nginx/client_temp \ + --http-proxy-temp-path=/var/cache/nginx/proxy_temp \ + --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \ + --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \ + --http-scgi-temp-path=/var/cache/nginx/scgi_temp \ + --user=nginx --group=nginx --with-compat --with-file-aio \ + --with-threads \ + --with-cc-opt="-g -O2 -fdebug-prefix-map=/data/builder/debuild/nginx-${NGINX_VERSION}/debian/debuild-base/nginx-${NGINX_VERSION}=. -fstack-protector-strong -Wformat -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -fPIC" \ + --with-ld-opt='-Wl,-z,relro -Wl,-z,now -Wl,--as-needed -pie'; \ + make -j $(nproc); \ + cp /tmp/nginx/objs/ngx_http_headers_more_filter_module.so /usr/lib/nginx/modules; \ + apt-get purge -y --auto-remove build-essential libpcre3-dev git; \ + rm -rf /var/lib/apt/lists/* /var/tmp/* /tmp/* + +COPY common/etc /etc +COPY common/docker-entrypoint.d/00-check-for-required-env.sh /docker-entrypoint.d/00-check-for-required-env.sh +COPY oss/etc /etc + +RUN set -eux \ + export DEBIAN_FRONTEND=noninteractive; \ + mkdir -p /var/cache/nginx/s3_proxy; \ + chown nginx:nginx /var/cache/nginx/s3_proxy; \ + chmod -R +x /docker-entrypoint.d/* diff --git a/Dockerfile.plus b/Dockerfile.plus new file mode 100644 index 00000000..dbbfc70a --- /dev/null +++ b/Dockerfile.plus @@ -0,0 +1,61 @@ +FROM debian:buster-slim + +ARG NGINX_GPGKEY + +ENV NGINX_VERSION 22 +ENV PKG_RELEASE 1~buster + +ENV NJS_VERSION 0.4.3 +ENV HEADERS_MORE_VERSION 0.33 + +COPY plus/etc/ssl /etc/ssl +COPY plus/usr /usr + +# Copy files from the OSS NGINX Docker container such that the container +# startup is the same. +# Source: https://github.com/nginxinc/docker-nginx/tree/1.19.2/stable/buster +COPY plus/docker-entrypoint.sh /docker-entrypoint.sh +COPY plus/docker-entrypoint.d /docker-entrypoint.d + +RUN set -eux \ + export DEBIAN_FRONTEND=noninteractive; \ + # create nginx user/group first, to be consistent throughout docker variants + addgroup --system --gid 101 nginx; \ + adduser --system --disabled-login --ingroup nginx --no-create-home --home /nonexistent --gecos "nginx user" --shell /bin/false --uid 101 nginx; \ + apt-get -qq update; \ + apt-get -qq upgrade -y; \ + sh -a /usr/local/bin/add_nginx_plus_repo.sh; \ + rm /usr/local/bin/add_nginx_plus_repo.sh; \ + apt-get -qq update; \ + apt-get -qq install --no-install-recommends --no-install-suggests -y \ + nginx-plus=${NGINX_VERSION}-${PKG_RELEASE} \ + nginx-plus-module-headers-more=${NGINX_VERSION}+${HEADERS_MORE_VERSION}-${PKG_RELEASE} \ + nginx-plus-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${PKG_RELEASE} \ + gettext-base; \ + apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \ + rm -rf /var/lib/apt/lists/* /var/tmp/* /tmp/*; \ + # forward request and error logs to docker log collector + ln -sf /dev/stdout /var/log/nginx/access.log; \ + ln -sf /dev/stderr /var/log/nginx/error.log; \ + chmod -R -v +x /docker-entrypoint.sh /docker-entrypoint.d/*.sh + +ENTRYPOINT ["/docker-entrypoint.sh"] + +EXPOSE 80 + +STOPSIGNAL SIGTERM + +CMD ["nginx", "-g", "daemon off;"] + +# NGINX Docker image setup complete, everything below is specific for +# the S3 Gateway use case. + +COPY plus/etc/nginx /etc/nginx +COPY common/etc /etc +COPY common/docker-entrypoint.d/00-check-for-required-env.sh /docker-entrypoint.d/00-check-for-required-env.sh + +RUN set -eux \ + export DEBIAN_FRONTEND=noninteractive; \ + mkdir -p /var/cache/nginx/s3_proxy; \ + chown nginx:nginx /var/cache/nginx/s3_proxy; \ + chmod -R +x /docker-entrypoint.d/* diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 00000000..f433b1a5 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/README.md b/README.md new file mode 100644 index 00000000..65daf4ae --- /dev/null +++ b/README.md @@ -0,0 +1,105 @@ +# NGINX S3 Gateway + +This project provides an example of configuring NGINX to act as an authenticating +and caching gateway for read-only requests (GET/HEAD) to the S3 API. + +## Potential Use Cases + + * Providing an authentication gateway using an alternative authentication + system to S3 + * Caching frequently accessed S3 objects for lower latency delivery and + protection against S3 outages + * For internal/micro services that can't authenticate against the S3 API + (e.g. don't have libraries available) the gateway can provide a means + to accessing S3 objects without authentication + * Compressing objects (gzip, brotli) from gateway to end user + * Protecting S3 bucket from arbitrary public access and traversal + * Rate limiting S3 objects + * Protecting S3 bucket with a WAF in order to prevent asset discovery attacks + * Serving static assets from a S3 bucket alongside a dynamic application + endpoints all in a single RESTful directory structure + +## Usage + +Few users will find this project as-is to be sufficient for their use cases. As +such, it is best to borrow from the patterns in this project and build your +own configuration. For example, if you want enable SSL/TLS and compression +in your NGINX S3 gateway configuration, you will need to look at other +resources because this project does not enable those features of NGINX. + +### Building the Docker Image + +#### NGINX OSS + +In order to build the NGINX OSS container image, do a `docker build` as follows: +``` +docker build -f Dockerfile.oss -t nginx-oss-s3-gateway . +``` + +#### NGINX Plus + +In order to build the NGINX Plus Docker image, copy your NGINX Plus repository +keys (`nginx-repo.crt` and `nginx-repo.key`) into the `plus/etc/ssl/nginx` +directory and set the environment variable `NGINX_GPGKEY` with the contents of +your NGINX GPG key. Then build the container image as follows: + +``` +export NGINX_GPGKEY= +docker build -f Dockerfile.plus -t nginx-plus-s3-gateway --build-arg NGINX_GPGKEY . +``` + +### Configuration + +Environment variables are used to configure this project. + +* `S3_BUCKET_NAME` - Name of S3 bucket to proxy requests to +* `S3_SERVER` - S3 host to connect to +* `S3_SERVER_PORT` - SSL/TLS port to connect to +* `S3_SERVER_PROTO` - Protocol to used connect to S3 server - `http` or `https` +* `AWS_SIGS_VERSION` - AWS Signatures API version - either 2 or 4 (4 is default) +* `S3_ACCESS_KEY_ID` - Access key +* `S3_SECRET_KEY` - Secret access key +* `S3_REGION` - Region associated with API +* `S3_DEBUG` - Flag (true/false) enabling AWS signatures debug output (default: false) + +The above environment variables can be set in a file that is passed to docker +with the `--env-file` flag. The file would look something like +[this example](settings.example). + +The container can be run by (replacing `oss` with `plus` when invoking the NGINX +Plus container): +``` +docker run --env-file ./settings -p80:80 --name nginx-oss-s3-gateway nginx-s3-gateway +``` + +## Directory Structure and File Descriptions + +``` +etc/ contains files used in both NGINX Plus and OSS configurations +oss/ contains files used solely in NGINX OSS configurations +plus/ contains files used solely in NGINX Plus configurations +test/ contains automated tests for validang that the examples work +Dockerfile.oss Dockerfile that configures NGINX OSS to act as a S3 gateway +Dockerfile.plus Dockerfile that builds a NGINX Plus instance that is configured + equivelently to NGINX OSS - instance is configured to act as a + S3 gateway with NGINX Plus additional features enabled +settings.example Docker env file example +test.sh test launcher +``` + +## Testing + +Automated tests require `docker`, `docker-compose`, `curl` and `md5sum` to be +installed. To run all unit tests and integration tests, run the following command. +If you invoke the test script with the plus parameter, you will need to add your +NGINX repository keys to the `plus/etc/ssl/nginx` directory. You will also need +to pass an additional parameter or set the environment variable `NGINX_GPGKEY` +with your NGINX Plus GPG key. + +``` +$ ./test.sh +``` + +## License + +All code include is licensed under the [Apache 2.0 license](LICENSE.txt). diff --git a/common/docker-entrypoint.d/00-check-for-required-env.sh b/common/docker-entrypoint.d/00-check-for-required-env.sh new file mode 100644 index 00000000..5e2f78be --- /dev/null +++ b/common/docker-entrypoint.d/00-check-for-required-env.sh @@ -0,0 +1,83 @@ +#!/bin/sh +# +# Copyright 2020 F5 Networks +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This script checks to see that required environment variables were correctly +# passed into the Docker container. + +set -e + +failed=0 + +if [ -z ${S3_ACCESS_KEY_ID+x} ]; then + >&2 echo "Required S3_ACCESS_KEY_ID environment variable missing" + failed=1 +fi + +if [ -z ${S3_SECRET_KEY+x} ]; then + >&2 echo "Required S3_SECRET_KEY environment variable missing" + failed=1 +fi + +if [ -z ${S3_BUCKET_NAME+x} ]; then + >&2 echo "Required S3_BUCKET_NAME environment variable missing" + failed=1 +fi + +if [ -z ${S3_SERVER+x} ]; then + >&2 echo "Required S3_SERVER environment variable missing" + failed=1 +fi + +if [ -z ${S3_SERVER_PORT+x} ]; then + >&2 echo "Required S3_SERVER_PORT environment variable missing" + failed=1 +fi + +if [ -z ${S3_SERVER_PROTO+x} ]; then + >&2 echo "Required S3_SERVER_PROTO environment variable missing" + failed=1 +fi + +if [ "${S3_SERVER_PROTO}" != "http" ] && [ "${S3_SERVER_PROTO}" != "https" ]; then + >&2 echo "S3_SERVER_PROTO contains an invalid value (${S3_SERVER_PROTO}). Valid values: http, https" + failed=1 +fi + +if [ -z ${S3_REGION+x} ]; then + >&2 echo "Required S3_REGION environment variable missing" + failed=1 +fi + +if [ -z ${AWS_SIGS_VERSION+x} ]; then + >&2 echo "Required AWS_SIGS_VERSION environment variable missing" + failed=1 +fi + +if [ "${AWS_SIGS_VERSION}" != "2" ] && [ "${AWS_SIGS_VERSION}" != "4" ]; then + >&2 echo "AWS_SIGS_VERSION contains an invalid value (${AWS_SIGS_VERSION}). Valid values: 2, 4" + failed=1 +fi + +if [ $failed -gt 0 ]; then + exit 1 +fi + +echo "S3 Backend Environment" +echo "Access Key ID: ${S3_ACCESS_KEY_ID}" +echo "Origin: ${S3_SERVER_PROTO}://${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}" +echo "Region: ${S3_REGION}" +echo "AWS Signatures Version: v${AWS_SIGS_VERSION}" diff --git a/common/etc/nginx/include/s3gateway.js b/common/etc/nginx/include/s3gateway.js new file mode 100644 index 00000000..3deccfaa --- /dev/null +++ b/common/etc/nginx/include/s3gateway.js @@ -0,0 +1,400 @@ +/* + * Copyright 2020 F5 Networks + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var mod_hmac = require('crypto'); + +/** + * Flag indicating debug mode operation. If true, additional information + * about signature generation will be logged. + * @type {boolean} + */ +var debug = _parseBoolean(process.env['S3_DEBUG']); + +/** + * The current moment as a timestamp. This timestamp will be used across + * functions in order for there to be no variations in signatures. + * @type {Date} + */ +var now = new Date(); + +/** + * Constant defining the service requests are being signed for. + * @type {string} + */ +var service = 's3'; + +/** + * Constant checksum for an empty HTTP body. + * @type {string} + */ +var emptyPayloadHash = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'; + +/** + * Constant defining the headers being signed. + * @type {string} + */ +var signedHeaders = 'host;x-amz-content-sha256;x-amz-date'; + +/** + * Outputs the timestamp used to sign the request, so that it can be added to + * the 'Date' header and sent by NGINX. + * + * @param r {Request} HTTP request object (not used, but required for NGINX configuration) + * @returns {string} RFC2616 timestamp + */ +function s3date(r) { + return now.toUTCString(); +} + +/** + * Outputs the timestamp used to sign the request, so that it can be added to + * the 'x-amz-date' header and sent by NGINX. The output format is + * ISO 8601: YYYYMMDD'T'HHMMSS'Z'. + * @see {@link https://docs.aws.amazon.com/general/latest/gr/sigv4-date-handling.html | Handling dates in Signature Version 4} + * + * @param r {Request} HTTP request object (not used, but required for NGINX configuration) + * @returns {string} ISO 8601 timestamp + */ +function awsHeaderDate(r) { + return _amzDatetime(now, _eightDigitDate(now)); +} + +/** + * Creates an AWS authentication signature based on the global settings and + * the passed request parameter. + * + * @param r HTTP request + * @returns {string} AWS authentication signature + */ +function s3auth(r) { + var accessId = process.env['S3_ACCESS_KEY_ID']; + var secret = process.env['S3_SECRET_KEY']; + var bucket = process.env['S3_BUCKET_NAME']; + var region = process.env['S3_REGION']; + var server = process.env['S3_SERVER']; + var sigver = process.env['AWS_SIGS_VERSION']; + + if (sigver == '2') { + return signatureV2(r, bucket, accessId, secret); + } else { + return signatureV4(r, now, bucket, accessId, secret, region, server); + } +} + +/** + * Redirects the request to the appropriate location. If the request is not + * a read (GET/HEAD) request, then we reject the request outright by returning + * a HTTP 405 error with a list of allowed methods. + * + * @param r {Request} HTTP request object + */ +function redirectToS3(r) { + // This is a read-only S3 gateway, so we do not support any other methods + if ( !(r.method === 'GET' || r.method === 'HEAD')) { + if (debug) { + r.log('Invalid method requested: ' + r.method); + } + r.internalRedirect("@error405"); + } else { + r.internalRedirect("@s3"); + } +} + +/** + * Create HTTP Authorization header for authenticating with an AWS compatible + * v2 API. + * + * @param r {Request} HTTP request object + * @param bucket {string} S3 bucket associated with request + * @param accessId {string} User access key credential + * @param secret {string} Secret access key + * @returns {string} HTTP Authorization header value + */ +function signatureV2(r, bucket, accessId, secret) { + var method = r.method; + var uri = r.uri; + var hmac = mod_hmac.createHmac('sha1', secret); + var httpDate = s3date(r); + var stringToSign = method + '\n\n\n' + httpDate + '\n' + '/' + bucket + uri; + + if (debug) { + r.log('AWS v2 Auth Signing String: [' + stringToSign + ']'); + } + + var s3signature = hmac.update(stringToSign).digest('base64'); + + return 'AWS '+accessId+':'+s3signature; +} + +/** + * Create HTTP Authorization header for authenticating with an AWS compatible + * v4 API. + * + * @param r {Request} HTTP request object + * @param timestamp {Date} timestamp associated with request (must fall within a skew) + * @param bucket {string} S3 bucket associated with request + * @param accessId {string} User access key credential + * @param secret {string} Secret access key + * @param region {string} API region associated with request + * @returns {string} HTTP Authorization header value + */ +function signatureV4(r, timestamp, bucket, accessId, secret, region, server) { + var eightDigitDate = _eightDigitDate(timestamp); + var amzDatetime = _amzDatetime(timestamp, eightDigitDate); + var signature = _buildSignatureV4(r, amzDatetime, eightDigitDate, bucket, secret, region, server); + var authHeader = 'AWS4-HMAC-SHA256 Credential=' + .concat(accessId, '/', eightDigitDate, '/', region, '/', service, '/aws4_request,', + 'SignedHeaders=', signedHeaders, ',Signature=', signature); + + if (debug) { + r.log('AWS v4 Auth header: [' + authHeader + ']') + } + + return authHeader; +} + +/** + * Creates a signature for use authenticating against an AWS compatible API. + * + * @see {@link https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html | AWS V4 Signing Process} + * @param r {Request} HTTP request object + * @param amzDatetime {string} ISO8601 timestamp string to sign request with + * @param eightDigitDate {string} date in the form of 'YYYYMMDD' + * @param bucket {string} S3 bucket associated with request + * @param secret {string} Secret access key + * @param region {string} API region associated with request + * @returns {string} hex encoded hash of signature HMAC value + * @private + */ +function _buildSignatureV4(r, amzDatetime, eightDigitDate, bucket, secret, region, server) { + var host = bucket + '.' + server; + var method = r.method; + var uri = _escapeURIPath(r.uri); + var canonicalRequest = _buildCanonicalRequest(method, uri, host, amzDatetime); + + if (debug) { + r.log('AWS v4 Auth Canonical Request: [' + canonicalRequest + ']'); + } + + var canonicalRequestHash = mod_hmac.createHash('sha256') + .update(canonicalRequest) + .digest('hex'); + + if (debug) { + r.log('AWS v4 Auth Canonical Request Hash: [' + canonicalRequestHash + ']'); + } + + var stringToSign = _buildStringToSign(amzDatetime, eightDigitDate, region, canonicalRequestHash) + + if (debug) { + r.log('AWS v4 Auth Signing String: [' + stringToSign + ']'); + } + + var kSigningHash = _buildSigningKeyHash(secret, eightDigitDate, service, region); + + if (debug) { + r.log('AWS v4 Signing Key Hash: [' + kSigningHash.toString('hex') + ']'); + } + + var signature = mod_hmac.createHmac('sha256', kSigningHash) + .update(stringToSign).digest('hex'); + + if (debug) { + r.log('AWS v4 Authorization Header: [' + signature + ']'); + } + + return signature; +} + +/** + * Creates a string to sign by concatenating together multiple parameters required + * by the signatures algorithm. + * + * @see {@link https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html | String to Sign} + * @param amzDatetime {string} ISO8601 timestamp string to sign request with + * @param eightDigitDate {string} date in the form of 'YYYYMMDD' + * @param region {string} region associated with server API + * @param canonicalRequestHash {string} hex encoded hash of canonical request string + * @returns {string} a concatenated string of the passed parameters formatted for signatures + * @private + */ +function _buildStringToSign(amzDatetime, eightDigitDate, region, canonicalRequestHash) { + return 'AWS4-HMAC-SHA256\n' + + amzDatetime + '\n' + + eightDigitDate + '/' + region + '/s3/aws4_request\n' + + canonicalRequestHash; +} + +/** + * Creates a canonical request that will later be signed + * + * @see {@link https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html | Creating a Canonical Request} + * @param method {string} HTTP method + * @param uri {string} URI associated with request + * @param host {string} HTTP Host header value + * @param amzDatetime {string} ISO8601 timestamp string to sign request with + * @returns {string} string with concatenated request parameters + * @private + */ +function _buildCanonicalRequest(method, uri, host, amzDatetime) { + var canonicalHeaders = 'host:' + host + '\n' + + 'x-amz-content-sha256:' + emptyPayloadHash + '\n' + + 'x-amz-date:' + amzDatetime + '\n'; + + // We hard code query parameters as empty because we don't want to forward + // query parameters to S3 proxied requests. + var emptyQueryParams = ''; + + var canonicalRequest = method+'\n'; + canonicalRequest += uri+'\n'; + canonicalRequest += emptyQueryParams+'\n'; + canonicalRequest += canonicalHeaders+'\n'; + canonicalRequest += signedHeaders+'\n'; + canonicalRequest += emptyPayloadHash; + + return canonicalRequest; +} + +/** + * Creates a signing key HMAC. This value is used to sign the request made to + * the API. + * + * @param kSecret {string} secret access key + * @param eightDigitDate {string} date in the form of 'YYYYMMDD' + * @param service {string} name of service that request is for e.g. s3, iam, etc + * @param region {string} region associated with server API + * @returns {ArrayBuffer} signing HMAC + * @private + */ +function _buildSigningKeyHash(kSecret, eightDigitDate, service, region) { + var kDate = mod_hmac.createHmac('sha256', 'AWS4'.concat(kSecret)) + .update(eightDigitDate).digest(); + var kRegion = mod_hmac.createHmac('sha256', kDate) + .update(region).digest(); + var kService = mod_hmac.createHmac('sha256', kRegion) + .update(service).digest(); + var kSigning = mod_hmac.createHmac('sha256', kService) + .update('aws4_request').digest(); + + return kSigning; +} + +/** + * Formats a timestamp into a date string in the format 'YYYYMMDD'. + * + * @param timestamp {Date} timestamp used in signature + * @returns {string} a formatted date string based on the input timestamp + * @private + */ +function _eightDigitDate(timestamp) { + var year = timestamp.getUTCFullYear(); + var month = timestamp.getUTCMonth() + 1; + var day = timestamp.getUTCDate(); + + return ''.concat(_padWithLeadingZeros(year, 4), _padWithLeadingZeros(month,2), _padWithLeadingZeros(day,2)); +} + +/** + * Creates a string in the ISO601 date format (YYYYMMDD'T'HHMMSS'Z') based on + * the supplied timestamp and date. The date is not extracted from the timestamp + * because that operation is already done once during the signing process. + * + * @param timestamp {Date} timestamp to extract date from + * @param eightDigitDate {string} 'YYYYMMDD' format date string that was already extracted from timestamp + * @returns {string} string in the format of YYYYMMDD'T'HHMMSS'Z' + * @private + */ +function _amzDatetime(timestamp, eightDigitDate) { + var hours = timestamp.getUTCHours(); + var minutes = timestamp.getUTCMinutes(); + var seconds = timestamp.getUTCSeconds(); + + return ''.concat( + eightDigitDate, + 'T', _padWithLeadingZeros(hours, 2), + _padWithLeadingZeros(minutes, 2), + _padWithLeadingZeros(seconds, 2), + 'Z'); +} + +/** + * Pads the supplied number with leading zeros. + * + * @param num {number|string} number to pad + * @param size number of leading zeros to pad + * @returns {string} a string with leading zeros + * @private + */ +function _padWithLeadingZeros(num, size) { + var s = "0" + num; + return s.substr(s.length-size); +} + +/** + * Escapes the path portion of a URI without escaping the path separator + * characters (/). + * + * @param uri {string} unescaped URI + * @returns {string} URI with each path component separately escaped + * @private + */ +function _escapeURIPath(uri) { + var components = []; + + uri.split('/').forEach(function (item, i) { + components[i] = encodeURIComponent(item); + }); + + return components.join('/'); +} + +/** + * Parses a string to and returns a boolean value based on its value. If the + * string can't be parsed, this method returns null. + * + * @param string {*} value representing a boolean + * @returns {boolean} + * @private + */ +function _parseBoolean(string) { + switch(string) { + case "TRUE": + case "true": + case "True": + case "YES": + case "yes": + case "Yes": + case "1": + return true; + default: + return false; + } +} + +export default { + awsHeaderDate, + s3date, + s3auth, + redirectToS3, + + // These functions do not need to be exposed, but they are exposed so that + // unit tests can run against them. + _padWithLeadingZeros, + _eightDigitDate, + _amzDatetime, + _buildSigningKeyHash, + _buildSignatureV4, +}; diff --git a/common/etc/nginx/nginx.conf b/common/etc/nginx/nginx.conf new file mode 100644 index 00000000..2a6a1a86 --- /dev/null +++ b/common/etc/nginx/nginx.conf @@ -0,0 +1,52 @@ +user nginx; +worker_processes 1; + +error_log /dev/stdout info; +pid /var/run/nginx.pid; + +# NJS module used for implementing S3 authentication +load_module modules/ngx_http_js_module.so; +load_module modules/ngx_http_headers_more_filter_module.so; + +# Preserve S3 environment variables for worker threads +env S3_ACCESS_KEY_ID; +env S3_SECRET_KEY; +env S3_BUCKET_NAME; +env S3_SERVER; +env S3_SERVER_PORT; +env S3_SERVER_PROTO; +env S3_REGION; +env AWS_SIGS_VERSION; +env S3_DEBUG; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + #tcp_nopush on; + + keepalive_timeout 65; + + #gzip on; + + # Settings for S3 cache + proxy_cache_path /var/cache/nginx/s3_proxy + levels=1:2 + keys_zone=s3_cache:10m + max_size=10g + inactive=60m + use_temp_path=off; + + include /etc/nginx/conf.d/*.conf; +} diff --git a/common/etc/nginx/templates/default.conf.template b/common/etc/nginx/templates/default.conf.template new file mode 100644 index 00000000..35f94c2f --- /dev/null +++ b/common/etc/nginx/templates/default.conf.template @@ -0,0 +1,113 @@ +js_import /etc/nginx/include/s3gateway.js; + +# We include only the variables needed for the authentication signatures that +# we plan to use. +include /etc/nginx/conf.d/gateway/v${AWS_SIGS_VERSION}_js_vars.conf; + +# This creates the HTTP authentication header to be sent to S3 +js_set $s3auth s3gateway.s3auth; + +# Extracts only the path from the requested URI. This strips out all query +# parameters and anchors in order to prevent extranous data from being sent to +# S3. +map $request_uri $uri_path { + "~^(?P.*?)(\?.*)*$" $path; +} + +server { + # Don't display the NGINX version number because we don't want to reveal + # information that could be used to find an exploit. + server_tokens off; + + # Uncomment this for a HTTP header that will let you know the cache status + # of an object. + # add_header X-Cache-Status $upstream_cache_status; + + # Proxy caching configuration. Customize this for your needs. + proxy_cache s3_cache; + proxy_cache_valid 200 302 1h; + proxy_cache_valid 404 1m; + proxy_cache_valid 403 30s; + proxy_cache_methods GET HEAD; + proxy_cache_convert_head off; + proxy_cache_revalidate on; + proxy_cache_background_update on; + proxy_cache_lock on; + proxy_cache_use_stale error timeout http_500 http_502 http_503 http_504; + proxy_cache_key "$request_method$host$uri"; + + # If you need to support proxying range request, refer to this article: + # https://www.nginx.com/blog/smart-efficient-byte-range-caching-nginx/ + + # Do not proxy for the site root because we do not want to make a proxy + # request to the root of the S3 API. + location = / { + return 404; + } + + # Do not proxy requests for directories because we are not providing + # directory listings nor do we want to reveal the application/x-directory + # content type which will leak to the requester that we are proxying S3. + location ~ ^\/(.*)\/$ { + return 404; + } + + # Do not proxy the S3 SOAP API. The S3 API has a less-documented feature + # where the object name "soap" is used for the SOAP API. We don't allow + # access to it. + location /soap { + return 404; + } + + location / { + # Redirect to the proper location based on the client request - either + # @s3 or @error405. + js_content s3gateway.redirectToS3; + } + + location @s3 { + # We include only the headers needed for the authentication signatures that + # we plan to use. + include /etc/nginx/conf.d/gateway/v${AWS_SIGS_VERSION}_headers.conf; + + # Don't allow any headers from the client - we don't want them messing + # with S3 at all. + proxy_pass_request_headers off; + + # Set the Authorization header to the AWS Signatures credentials + proxy_set_header Authorization $s3auth; + + # We set the host as the bucket name to inform the S3 API of the bucket + proxy_set_header Host '${S3_BUCKET_NAME}.${S3_SERVER}'; + + # Use keep alive connections in order to improve performance + proxy_http_version 1.1; + proxy_set_header Connection ''; + + # We strip off all of the AWS specific headers from the server so that + # there is nothing identifying the object as having originated in an + # object store. + more_clear_headers 'x-amz-*'; + + # Catch all errors from S3 and sanitize them so that the user can't + # gain intelligence about the S3 bucket being proxied. + proxy_intercept_errors on; + + # Comment out this line to receive the error messages returned by S3 + error_page 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 420 422 423 424 426 428 429 431 444 449 450 451 500 501 502 503 504 505 506 507 508 509 510 511 =404 @error404; + + # Change this to 'http' if you want to connect to the S3 API insecurely + proxy_pass ${S3_SERVER_PROTO}://storage_urls$uri_path; + } + + location @error404 { + return 404; + } + + # Provide a hint to the client on 405 errors of the acceptable request methods + error_page 405 @error405; + location @error405 { + add_header Allow "GET, HEAD" always; + return 405; + } +} diff --git a/common/etc/nginx/templates/gateway/v2_headers.conf.template b/common/etc/nginx/templates/gateway/v2_headers.conf.template new file mode 100644 index 00000000..5798b25b --- /dev/null +++ b/common/etc/nginx/templates/gateway/v2_headers.conf.template @@ -0,0 +1,3 @@ +# This header is needed when doing v2 signature authentication. It +# specifies the timestamp in which the signature was generated. +proxy_set_header Date $httpDate; diff --git a/common/etc/nginx/templates/gateway/v2_js_vars.conf.template b/common/etc/nginx/templates/gateway/v2_js_vars.conf.template new file mode 100644 index 00000000..04e9edae --- /dev/null +++ b/common/etc/nginx/templates/gateway/v2_js_vars.conf.template @@ -0,0 +1,4 @@ +# This header is needed when doing v2 signature authentication. It +# specifies the timestamp in which the signature was generated and is used with +# the HTTP Date header. +js_set $httpDate s3gateway.s3date; diff --git a/common/etc/nginx/templates/gateway/v4_headers.conf.template b/common/etc/nginx/templates/gateway/v4_headers.conf.template new file mode 100644 index 00000000..e6ca2daf --- /dev/null +++ b/common/etc/nginx/templates/gateway/v4_headers.conf.template @@ -0,0 +1,7 @@ +# This header is needed when doing v4 signature authentication. It +# specifies the timestamp in which the signature was generated. +proxy_set_header x-amz-date $awsDate; + +# All HTTP bodies are empty because we are only doing GET/HEAD requests, +# so we can hardcode the body checksum. +proxy_set_header x-amz-content-sha256 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'; diff --git a/common/etc/nginx/templates/gateway/v4_js_vars.conf.template b/common/etc/nginx/templates/gateway/v4_js_vars.conf.template new file mode 100644 index 00000000..b7baeee4 --- /dev/null +++ b/common/etc/nginx/templates/gateway/v4_js_vars.conf.template @@ -0,0 +1,4 @@ +# This header is needed when doing v4 signature authentication. It +# specifies the timestamp in which the signature was generated and is used with +# the x-amz-date header. +js_set $awsDate s3gateway.awsHeaderDate; diff --git a/oss/etc/nginx/templates/upstreams.conf.template b/oss/etc/nginx/templates/upstreams.conf.template new file mode 100644 index 00000000..ab80392f --- /dev/null +++ b/oss/etc/nginx/templates/upstreams.conf.template @@ -0,0 +1,8 @@ +upstream storage_urls { + # Upstreams are not refrshed until NGINX configuration is reloaded. + # NGINX Plus will dynamically reload upstreams when DNS records are changed. + + # Be sure to specify the port in the S3_SERVER and be sure that port + # corresponds to the https/http in the proxy_pass directive. + server ${S3_SERVER}:${S3_SERVER_PORT}; +} diff --git a/plus/docker-entrypoint.d/10-listen-on-ipv6-by-default.sh b/plus/docker-entrypoint.d/10-listen-on-ipv6-by-default.sh new file mode 100644 index 00000000..70f4da0e --- /dev/null +++ b/plus/docker-entrypoint.d/10-listen-on-ipv6-by-default.sh @@ -0,0 +1,77 @@ +#!/bin/sh +# +# Copyright 2020 F5 Networks +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# vim:sw=4:ts=4:et + +set -e + +ME=$(basename $0) +DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf" + +# check if we have ipv6 available +if [ ! -f "/proc/net/if_inet6" ]; then + echo >&3 "$ME: error: ipv6 not available" + exit 0 +fi + +if [ ! -f "/$DEFAULT_CONF_FILE" ]; then + echo >&3 "$ME: error: /$DEFAULT_CONF_FILE is not a file or does not exist" + exit 0 +fi + +# check if the file can be modified, e.g. not on a r/o filesystem +touch /$DEFAULT_CONF_FILE 2>/dev/null || { echo >&3 "$ME: error: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; } + +# check if the file is already modified, e.g. on a container restart +grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { echo >&3 "$ME: error: IPv6 listen already enabled"; exit 0; } + +if [ -f "/etc/os-release" ]; then + . /etc/os-release +else + echo >&3 "$ME: error: can not guess the operating system" + exit 0 +fi + +echo >&3 "$ME: Getting the checksum of /$DEFAULT_CONF_FILE" + +case "$ID" in + "debian") + CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3) + echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || { + echo >&3 "$ME: error: /$DEFAULT_CONF_FILE differs from the packaged version" + exit 0 + } + ;; + "alpine") + CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2) + echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || { + echo >&3 "$ME: error: /$DEFAULT_CONF_FILE differs from the packages version" + exit 0 + } + ;; + *) + echo >&3 "$ME: error: Unsupported distribution" + exit 0 + ;; +esac + +# enable ipv6 on default.conf listen sockets +sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE + +echo >&3 "$ME: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE" + +exit 0 diff --git a/plus/docker-entrypoint.d/20-envsubst-on-templates.sh b/plus/docker-entrypoint.d/20-envsubst-on-templates.sh new file mode 100644 index 00000000..c57faed6 --- /dev/null +++ b/plus/docker-entrypoint.d/20-envsubst-on-templates.sh @@ -0,0 +1,48 @@ +#!/bin/sh + +# +# Copyright 2020 F5 Networks +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -e + +ME=$(basename $0) + +auto_envsubst() { + local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}" + local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}" + local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}" + + local template defined_envs relative_path output_path subdir + defined_envs=$(printf '${%s} ' $(env | cut -d= -f1)) + [ -d "$template_dir" ] || return 0 + if [ ! -w "$output_dir" ]; then + echo >&3 "$ME: ERROR: $template_dir exists, but $output_dir is not writable" + return 0 + fi + find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do + relative_path="${template#$template_dir/}" + output_path="$output_dir/${relative_path%$suffix}" + subdir=$(dirname "$relative_path") + # create a subdirectory where the template file exists + mkdir -p "$output_dir/$subdir" + echo >&3 "$ME: Running envsubst on $template to $output_path" + envsubst "$defined_envs" < "$template" > "$output_path" + done +} + +auto_envsubst + +exit 0 diff --git a/plus/docker-entrypoint.sh b/plus/docker-entrypoint.sh new file mode 100644 index 00000000..749e7c31 --- /dev/null +++ b/plus/docker-entrypoint.sh @@ -0,0 +1,54 @@ +#!/bin/sh +# +# Copyright 2020 F5 Networks +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# vim:sw=4:ts=4:et + +set -e + +if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then + exec 3>&1 +else + exec 3>/dev/null +fi + +if [ "$1" = "nginx" -o "$1" = "nginx-debug" ]; then + if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then + echo >&3 "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration" + + echo >&3 "$0: Looking for shell scripts in /docker-entrypoint.d/" + find "/docker-entrypoint.d/" -follow -type f -print | sort -n | while read -r f; do + case "$f" in + *.sh) + if [ -x "$f" ]; then + echo >&3 "$0: Launching $f"; + "$f" + else + # warn on shell scripts without exec bit + echo >&3 "$0: Ignoring $f, not executable"; + fi + ;; + *) echo >&3 "$0: Ignoring $f";; + esac + done + + echo >&3 "$0: Configuration complete; ready for start up" + else + echo >&3 "$0: No files found in /docker-entrypoint.d/, skipping configuration" + fi +fi + +exec "$@" diff --git a/plus/etc/nginx/templates/upstreams.conf.template b/plus/etc/nginx/templates/upstreams.conf.template new file mode 100644 index 00000000..7a6cde94 --- /dev/null +++ b/plus/etc/nginx/templates/upstreams.conf.template @@ -0,0 +1,16 @@ +# This configuration with NGINX Plus should dynamically reload S3 backends +# as they change in DNS. + +# Docker DNS server IP +resolver 127.0.0.11; + +# Public DNS +#resolver 1.1.1.1 8.8.8.8; + +upstream storage_urls { + zone s3_backends 64k; + + # Be sure to specify the port in the S3_SERVER and be sure that port + # corresponds to the https/http in the proxy_pass directive. + server ${S3_SERVER}:${S3_SERVER_PORT} resolve; +} diff --git a/plus/etc/ssl/nginx/.gitignore b/plus/etc/ssl/nginx/.gitignore new file mode 100644 index 00000000..1855abcc --- /dev/null +++ b/plus/etc/ssl/nginx/.gitignore @@ -0,0 +1,2 @@ +nginx-repo.crt +nginx-repo.key diff --git a/plus/usr/local/bin/add_nginx_plus_repo.sh b/plus/usr/local/bin/add_nginx_plus_repo.sh new file mode 100644 index 00000000..32b3a1c5 --- /dev/null +++ b/plus/usr/local/bin/add_nginx_plus_repo.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env sh + +# +# Copyright 2020 F5 Networks +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +if [ -z "${NGINX_GPGKEY+x}" ]; then + >&2 echo "NGINX_GPGKEY environment variable containing NGINX+ GPG key was not found" + env + exit 1 +fi + +if [ ! -f "/etc/ssl/nginx/nginx-repo.crt" ]; then + >&2 echo "NGINX Plus repository certificate file not found at path: /etc/ssl/nginx/nginx-repo.crt" + exit 1 +fi + +if [ ! -f "/etc/ssl/nginx/nginx-repo.key" ]; then + >&2 echo "NGINX Plus repository key file not found at path: /etc/ssl/nginx/nginx-repo.key" + exit 1 +fi + +apt-get -qq install --no-install-recommends --no-install-suggests -y apt-transport-https gnupg1 ca-certificates + +found='' +for server in \ + ha.pool.sks-keyservers.net \ + hkp://keyserver.ubuntu.com:80 \ + hkp://p80.pool.sks-keyservers.net:80 \ + pgp.mit.edu \ +; do + echo "Fetching GPG key $NGINX_GPGKEY from $server" + apt-key adv --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break +done + test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1 + apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* + echo "Acquire::https::plus-pkgs.nginx.com::Verify-Peer \"true\";" >> /etc/apt/apt.conf.d/90nginx + echo "Acquire::https::plus-pkgs.nginx.com::Verify-Host \"true\";" >> /etc/apt/apt.conf.d/90nginx + echo "Acquire::https::plus-pkgs.nginx.com::SslCert \"/etc/ssl/nginx/nginx-repo.crt\";" >> /etc/apt/apt.conf.d/90nginx + echo "Acquire::https::plus-pkgs.nginx.com::SslKey \"/etc/ssl/nginx/nginx-repo.key\";" >> /etc/apt/apt.conf.d/90nginx + echo "deb https://plus-pkgs.nginx.com/debian buster nginx-plus" >> /etc/apt/sources.list.d/nginx-plus.list diff --git a/settings.example b/settings.example new file mode 100644 index 00000000..3454226e --- /dev/null +++ b/settings.example @@ -0,0 +1,9 @@ +S3_BUCKET_NAME=my-bucket +S3_ACCESS_KEY_ID=ZZZZZZZZZZZZZZZZZZZZ +S3_SECRET_KEY=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +S3_SERVER=s3-us-east-1.amazonaws.com +S3_SERVER_PORT=443 +S3_SERVER_PROTO=https +S3_REGION=us-east-1 +S3_DEBUG=false +AWS_SIGS_VERSION=4 diff --git a/test.sh b/test.sh new file mode 100755 index 00000000..df9f9cf4 --- /dev/null +++ b/test.sh @@ -0,0 +1,201 @@ +#!/usr/bin/env bash + +# +# Copyright 2020 F5 Networks +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -o errexit # abort on nonzero exit status +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes + +nginx_server_proto="http" +nginx_server_host="localhost" +nginx_server_port="8989" +minio_server="http://localhost:9090" +test_server="${nginx_server_proto}://${nginx_server_host}:${nginx_server_port}" +test_fail_exit_code=2 +no_dep_exit_code=3 +test_dir="$(pwd)/test" +test_compose_config="${test_dir}/docker-compose.yaml" +test_compose_project="ngt" + +p() { + printf "\033[34;1m▶\033[0m " + echo "$1" +} + +e() { + >&2 echo "$1" +} + +if [ $# -eq 0 ]; then + nginx_type="oss" + p "No argument specified - defaulting to NGINX OSS. Valid arguments: oss, plus" +elif [ "$1" = "plus" ]; then + nginx_type="plus" + p "Testing with NGINX Plus" +else + nginx_type="oss" + p "Testing with NGINX OSS" +fi + +docker_cmd="$(command -v docker)" +if ! [ -x "${docker_cmd}" ]; then + e "required dependency not found: docker not found in the path or not executable" + exit ${no_dep_exit_code} +fi + +docker_compose_cmd="$(command -v docker-compose)" +if ! [ -x "${docker_compose_cmd}" ]; then + e "required dependency not found: docker-compose not found in the path or not executable" + exit ${no_dep_exit_code} +fi + +curl_cmd="$(command -v curl)" +if ! [ -x "${curl_cmd}" ]; then + e "required dependency not found: curl not found in the path or not executable" + exit ${no_dep_exit_code} +fi + +wait_for_it_cmd="$(command -v wait-for-it)" +if [ -x "${wait_for_it_cmd}" ]; then + wait_for_it_installed=1 +else + e "wait-for-it command not available, consider installing to prevent race conditions" + wait_for_it_installed=0 +fi + +if [ "${nginx_type}" = "plus" ]; then + if [ -z "${NGINX_GPGKEY+x}" ] && [ -z "$1" ]; then + e "NGINX_GPGKEY environment variable containing NGINX+ GPG key was not found" + exit ${no_dep_exit_code} + fi + + if [ -z "${NGINX_GPGKEY+x}" ] && [ -n "$1" ]; then + p "Using GPG key from script parameter" + export NGINX_GPGKEY="$2" + fi + + if [ ! -f "./plus/etc/ssl/nginx/nginx-repo.crt" ]; then + e "NGINX Plus certificate file not found: $(pwd)/plus/etc/ssl/nginx/nginx-repo.crt" + exit ${no_dep_exit_code} + fi + + if [ ! -f "./plus/etc/ssl/nginx/nginx-repo.key" ]; then + e "NGINX Plus key file not found: $(pwd)/plus/etc/ssl/nginx/nginx-repo.key" + exit ${no_dep_exit_code} + fi +fi + +compose() { + ${docker_compose_cmd} -f "${test_compose_config}" -p "${test_compose_project}" "$@" +} + +integration_test() { + printf "\033[34;1m▶\033[0m" + printf "\e[1m Integration test suite for v%s signatures\e[22m\n" "$1" + + # See if Minio is already running, if it isn't then we don't need to build it + if [ -z "$(docker ps -q -f name=${test_compose_project}_minio_1)" ]; then + p "Building Docker Compose environment" + AWS_SIGS_VERSION=$1 compose up --no-start + + p "Adding test data to container" + echo "Copying contents of ${test_dir}/data to Docker container ${test_compose_project}_minio_1:/" + ${docker_cmd} cp "${test_dir}/data" ${test_compose_project}_minio_1:/ + echo "Docker diff output:" + ${docker_cmd} diff ${test_compose_project}_minio_1 + fi + + p "Starting Docker Compose Environment" + AWS_SIGS_VERSION=$1 compose up -d + + if [ ${wait_for_it_installed} ]; then + # Hit minio's health check end point to see if it has started up + for (( i=1; i<=3; i++ )) + do + echo "Querying minio server to see if it is ready" + minio_is_up="$(${curl_cmd} -s -o /dev/null -w '%{http_code}' ${minio_server}/minio/health/cluster)" + if [ "${minio_is_up}" = "200" ]; then + break + else + sleep 2 + fi + done + + $wait_for_it_cmd -h ${nginx_server_host} -p ${nginx_server_port} + fi + + p "Starting HTTP API tests (v$1 signatures)" + bash "${test_dir}/integration/test_api.sh" "$test_server" "$test_dir" + + # We check to see if NGINX is in fact using the correct version of AWS + # signatures as it was configured to do. + sig_versions_found_count=$(compose logs nginx-s3-gateway | grep -c "AWS Signatures Version: v$1\|AWS v$1 Auth") + + if [ "${sig_versions_found_count}" -lt 3 ]; then + e "NGINX was not detected as using the correct signatures version - examine logs" + compose logs nginx-s3-gateway + exit "$test_fail_exit_code" + fi +} + +finish() { + result=$? + + if [ $result -ne 0 ]; then + e "Error running tests - outputting container logs" + compose logs + fi + + p "Cleaning up Docker compose environment" + compose stop + compose rm -f + + exit ${result} +} +trap finish EXIT ERR SIGTERM SIGINT + +p "Building NGINX S3 gateway Docker image" +if [ "${nginx_type}" = "plus" ]; then + docker build -f Dockerfile.${nginx_type} -t nginx-s3-gateway --build-arg NGINX_GPGKEY . +else + docker build -f Dockerfile.${nginx_type} -t nginx-s3-gateway . +fi + +### UNIT TESTS + +p "Running unit tests in Docker image" +${docker_cmd} run \ + --rm \ + -v "$(pwd)/test/unit:/var/tmp" \ + --workdir /var/tmp \ + -e "S3_DEBUG=true" \ + --entrypoint /usr/bin/njs \ + nginx-s3-gateway -t module -p '/etc/nginx' /var/tmp/s3gateway_test.js + +### INTEGRATION TESTS + +# Test API with AWS Signature V2 +integration_test 2 + +# Stop NGINX container, so it can be restarted with a different AWS +# signatures version +compose stop nginx-s3-gateway + +# Test API with AWS Signature V4 +integration_test 4 + +p "All tests complete" diff --git a/test/data/bucket-1/a.txt b/test/data/bucket-1/a.txt new file mode 100644 index 00000000..272166ed --- /dev/null +++ b/test/data/bucket-1/a.txt @@ -0,0 +1 @@ +Let go, or be dragged. diff --git a/test/data/bucket-1/b/c/= b/test/data/bucket-1/b/c/= new file mode 100644 index 00000000..582ad56d --- /dev/null +++ b/test/data/bucket-1/b/c/= @@ -0,0 +1,2 @@ +This is an awful filename. +このフィール名を選ばないでください diff --git a/test/data/bucket-1/b/c/@ b/test/data/bucket-1/b/c/@ new file mode 100644 index 00000000..e69de29b diff --git a/test/data/bucket-1/b/c/d.txt b/test/data/bucket-1/b/c/d.txt new file mode 100644 index 00000000..0665a64e --- /dev/null +++ b/test/data/bucket-1/b/c/d.txt @@ -0,0 +1 @@ +When thoughts arise, then do all things arise. When thoughts vanish, then do all things vanish. diff --git a/test/data/bucket-1/b/e.txt b/test/data/bucket-1/b/e.txt new file mode 100644 index 00000000..1d3546db --- /dev/null +++ b/test/data/bucket-1/b/e.txt @@ -0,0 +1 @@ +If only you could hear the sound of snow. diff --git "a/test/data/bucket-1/b/\343\203\226\343\203\204\343\203\226\343\203\204.txt" "b/test/data/bucket-1/b/\343\203\226\343\203\204\343\203\226\343\203\204.txt" new file mode 100644 index 00000000..4e42dfdb --- /dev/null +++ "b/test/data/bucket-1/b/\343\203\226\343\203\204\343\203\226\343\203\204.txt" @@ -0,0 +1 @@ +Relax. Nothing is under control. diff --git a/test/docker-compose.yaml b/test/docker-compose.yaml new file mode 100644 index 00000000..ae7d6ffb --- /dev/null +++ b/test/docker-compose.yaml @@ -0,0 +1,45 @@ +version: "3" + +services: + nginx-s3-gateway: + # If minio client is up and running properly, we are reasonably sure that + # minio has properly started. That's why we depend on it here. + depends_on: + - "minio-client" + image: "nginx-s3-gateway" + ports: + - "8989:80/tcp" + links: + - "minio" + restart: "no" + environment: + S3_BUCKET_NAME: "bucket-1" + S3_ACCESS_KEY_ID: "AKIAIOSFODNN7EXAMPLE" + S3_SECRET_KEY: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" + S3_SERVER: "minio" + S3_SERVER_PORT: "9000" + S3_SERVER_PROTO: "http" + S3_REGION: "us-east-1" + S3_DEBUG: "true" + AWS_SIGS_VERSION: + minio: + image: "minio/minio:RELEASE.2020-09-17T04-49-20Z" + ports: + - "9090:9000/tcp" + restart: "no" + command: "server --address :9000 /data" + environment: + MINIO_ACCESS_KEY: "AKIAIOSFODNN7EXAMPLE" + MINIO_SECRET_KEY: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" + MINIO_REGION_NAME: "us-east-1" + MINIO_DOMAIN: "minio" + MINIO_BROWSER: "off" + minio-client: + depends_on: + - "minio" + image: "minio/mc" + restart: "no" + command: "admin trace --verbose nginx-test-gateway" + environment: + MC_HOST_nginx-test-gateway: "http://AKIAIOSFODNN7EXAMPLE:wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY@minio:9000" + diff --git a/test/integration/test_api.sh b/test/integration/test_api.sh new file mode 100644 index 00000000..7da3b601 --- /dev/null +++ b/test/integration/test_api.sh @@ -0,0 +1,115 @@ +#!/usr/bin/env bash + +# +# Copyright 2020 F5 Networks +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -o errexit # abort on nonzero exit status +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes + +test_server=$1 +test_dir=$2 +test_fail_exit_code=2 +no_dep_exit_code=3 +checksum_length=32 + +e() { + >&2 echo "$1" +} + +if [ -z "${test_server}" ]; then + e "missing first parameter: test server location (eg http://localhost:80)" +fi + +if [ -z "${test_dir}" ]; then + e "missing second parameter: path to test data directory" +fi + +curl_cmd="$(command -v curl)" +if ! [ -x "${curl_cmd}" ]; then + e "required dependency not found: curl not found in the path or not executable" + exit ${no_dep_exit_code} +fi + +checksum_cmd="$(command -v md5sum)" +if ! [ -x "${curl_cmd}" ]; then + e "required dependency not found: md5sum not found in the path or not executable" + exit ${no_dep_exit_code} +fi + +assertHttpRequestEquals() { + method="$1" + path="$2" + uri="${test_server}/${path}" + + printf " \033[36;1m▲\033[0m " + echo "Testing object: ${method} ${path}" + + if [ "${method}" = "HEAD" ]; then + expected_response_code="$3" + actual_response_code="$(${curl_cmd} -s -o /dev/null -w '%{http_code}' --head "${uri}")" + + if [ "${expected_response_code}" != "${actual_response_code}" ]; then + e "Response code didn't match expectation. Request [${method} ${uri}] Expected [${expected_response_code}] Actual [${actual_response_code}]" + exit ${test_fail_exit_code} + fi + elif [ "${method}" = "GET" ]; then + body_data_path="${test_dir}/$3" + checksum_output="$(${checksum_cmd} "${body_data_path}")" + expected_checksum="${checksum_output:0:${checksum_length}}" + + curl_checksum_output="$(${curl_cmd} -s -X "${method}" "${uri}" | ${checksum_cmd})" + s3_file_checksum="${curl_checksum_output:0:${checksum_length}}" + + if [ "${expected_checksum}" != "${s3_file_checksum}" ]; then + e "Checksum doesn't match expectation. Request [${method} ${uri}] Expected [${expected_checksum}] Actual [${s3_file_checksum}]" + exit ${test_fail_exit_code} + fi + else + e "Method unsupported: [${method}]" + fi +} + +# Ordinary filenames + +assertHttpRequestEquals "HEAD" "a.txt" "200" +assertHttpRequestEquals "HEAD" "a.txt?some=param&that=should&be=stripped#aaah" "200" +assertHttpRequestEquals "HEAD" "b/c/d.txt" "200" +assertHttpRequestEquals "HEAD" "b/c/../e.txt" "200" +assertHttpRequestEquals "HEAD" "b/e.txt" "200" +assertHttpRequestEquals "HEAD" "b//e.txt" "200" +assertHttpRequestEquals "HEAD" "b/ブツブツ.txt" "200" + +# Weird filenames +assertHttpRequestEquals "HEAD" "b/c/=" "200" +assertHttpRequestEquals "HEAD" "b/c/@" "200" + +# Expected 404s +assertHttpRequestEquals "HEAD" "not found" "404" +assertHttpRequestEquals "HEAD" "b/" "404" +assertHttpRequestEquals "HEAD" "b/c" "404" +assertHttpRequestEquals "HEAD" "/b/c/" "404" +assertHttpRequestEquals "HEAD" "b//c" "404" +assertHttpRequestEquals "HEAD" "/" "404" +assertHttpRequestEquals "HEAD" "/soap" "404" + +# Verify GET is working +assertHttpRequestEquals "GET" "a.txt" "data/bucket-1/a.txt" +assertHttpRequestEquals "GET" "a.txt?some=param&that=should&be=stripped#aaah" "data/bucket-1/a.txt" +assertHttpRequestEquals "GET" "b/c/d.txt" "data/bucket-1/b/c/d.txt" +assertHttpRequestEquals "GET" "b/c/=" "data/bucket-1/b/c/=" +assertHttpRequestEquals "GET" "b/e.txt" "data/bucket-1/b/e.txt" +assertHttpRequestEquals "GET" "b/ブツブツ.txt" "data/bucket-1/b/ブツブツ.txt" diff --git a/test/unit/s3gateway_test.js b/test/unit/s3gateway_test.js new file mode 100755 index 00000000..fe37fa78 --- /dev/null +++ b/test/unit/s3gateway_test.js @@ -0,0 +1,154 @@ +#!env njs + +/* + * Copyright 2020 F5 Networks + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import s3gateway from "include/s3gateway.js"; + +var fakeRequest = { + "remoteAddress" : "172.17.0.1", + "headersIn" : { + "Connection" : "keep-alive", + "Accept-Encoding" : "gzip, deflate", + "Accept-Language" : "en-US,en;q=0.7,ja;q=0.3", + "Host" : "localhost:8999", + "User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0", + "DNT" : "1", + "Cache-Control" : "max-age=0", + "Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", + "Upgrade-Insecure-Requests" : "1" + }, + "uri" : "/a/c/ramen.jpg", + "method" : "GET", + "httpVersion" : "1.1", + "headersOut" : {}, + "args" : { + "foo" : "bar" + }, + "status" : 0 +}; + +fakeRequest.log = function(msg) { + console.log(msg); +} + +function testPad() { + var padSingleDigit = s3gateway._padWithLeadingZeros(3, 2); + var expected = '03'; + + if (padSingleDigit !== expected) { + throw 'Single digit 3 was not padded with leading zero.\n' + + 'Actual: ' + padSingleDigit + '\n' + + 'Expected: ' + expected; + } +} + +function testEightDigitDate() { + var timestamp = new Date('2020-08-03T02:01:09.004Z'); + var eightDigitDate = s3gateway._eightDigitDate(timestamp); + var expected = '20200803'; + + if (eightDigitDate !== expected) { + throw 'Eight digit date was not created correctly.\n' + + 'Actual: ' + eightDigitDate + '\n' + + 'Expected: ' + expected; + } +} + +function testAmzDatetime() { + var timestamp = new Date('2020-08-03T02:01:09.004Z'); + var eightDigitDate = s3gateway._eightDigitDate(timestamp); + var amzDatetime = s3gateway._amzDatetime(timestamp, eightDigitDate); + var expected = '20200803T020109Z'; + + if (amzDatetime !== expected) { + throw 'Amazon date time was not created correctly.\n' + + 'Actual: [' + amzDatetime + ']\n' + + 'Expected: [' + expected + ']'; + } +} + +function testBuildSigningKeyHash() { + var kSecret = 'wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY'; + var date = '20150830'; + var service = 'iam'; + var region = 'us-east-1'; + var expected = 'c4afb1cc5771d871763a393e44b703571b55cc28424d1a5e86da6ed3c154a4b9'; + var signingKeyHash = s3gateway._buildSigningKeyHash(kSecret, date, service, region).toString('hex'); + + if (signingKeyHash !== expected) { + throw 'Signing key hash was not created correctly.\n' + + 'Actual: [' + signingKeyHash + ']\n' + + 'Expected: [' + expected + ']'; + } +} + +function testSignatureV4() { + // Note: since this is a read-only gateway, host, query parameters and all + // client headers will be ignored. + var r = { + "remoteAddress" : "172.17.0.1", + "headersIn" : { + "Connection" : "keep-alive", + "Accept-Encoding" : "gzip, deflate", + "Accept-Language" : "en-US,en;q=0.7,ja;q=0.3", + "Host" : "localhost:8999", + "User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0", + "DNT" : "1", + "Cache-Control" : "max-age=0", + "Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", + "Upgrade-Insecure-Requests" : "1" + }, + "uri" : "/a/c/ramen.jpg", + "method" : "GET", + "httpVersion" : "1.1", + "headersOut" : {}, + "args" : { + "foo" : "bar" + }, + "status" : 0 + }; + + r.log = function(msg) { + console.log(msg); + } + var timestamp = new Date('2020-08-11T19:42:14Z'); + var eightDigitDate = s3gateway._eightDigitDate(timestamp); + var amzDatetime = s3gateway._amzDatetime(timestamp, eightDigitDate); + var bucket = 'ez-test-bucket-1' + var secret = 'pvgoBEA1z7zZKqN9RoKVksKh31AtNou+pspn+iyb' + var region = 'us-west-2'; + var server = 's3-us-west-2.amazonaws.com'; + + var expected = 'cf4dd9e1d28c74e2284f938011efc8230d0c20704f56f67e4a3bfc2212026bec'; + var signature = s3gateway._buildSignatureV4(r, amzDatetime, eightDigitDate, bucket, secret, region, server); + + if (signature !== expected) { + throw 'V4 signature hash was not created correctly.\n' + + 'Actual: [' + signature + ']\n' + + 'Expected: [' + expected + ']'; + } +} + +function test() { + testPad(); + testEightDigitDate(); + testAmzDatetime(); + testBuildSigningKeyHash(); + testSignatureV4(); +} + +test();