diff --git a/.gitignore b/.gitignore index d6352a2a1..66c303089 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,8 @@ node_modules/ +data/ .DS_Store dist +*.iml *.log coverage .idea @@ -16,5 +18,7 @@ configurations/* !configurations/default # Secret config files +.env env.yml -env.yml-original \ No newline at end of file +env.yml-original +server.yml diff --git a/.travis.yml b/.travis.yml index e4ed1053a..368b8c6e6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,29 +3,42 @@ jdk: - oraclejdk8 install: true sudo: false +# Install mongoDB to perform persistence tests +services: mongodb cache: directories: - - "$HOME/.m2/repository" + - "$HOME/.m2" before_install: -- sed -i.bak -e 's|https://nexus.codehaus.org/snapshots/|https://oss.sonatype.org/content/repositories/codehaus-snapshots/|g' - ~/.m2/settings.xml +#- sed -i.bak -e 's|https://nexus.codehaus.org/snapshots/|https://oss.sonatype.org/content/repositories/codehaus-snapshots/|g' ~/.m2/settings.xml +# set region in AWS config for S3 setup +- mkdir ~/.aws && printf '%s\n' '[default]' 'aws_access_key_id=foo' 'aws_secret_access_key=bar' 'region=us-east-1' > ~/.aws/config +- cp configurations/default/server.yml.tmp configurations/default/server.yml script: -- mvn package -DskipTests +# package jar +- mvn package +after_success: +# Upload coverage reports to codecov.io +- bash <(curl -s https://codecov.io/bash) +# notify slack channel of build status notifications: slack: conveyal:WQxmWiu8PdmujwLw4ziW72Gc before_deploy: +# get branch name of current branch for use in jar name: https://graysonkoonce.com/getting-the-current-branch-name-during-a-pull-request-in-travis-ci/ +- export BRANCH=$(if [ "$TRAVIS_PULL_REQUEST" == "false" ]; then echo $TRAVIS_BRANCH; else echo $TRAVIS_PULL_REQUEST_BRANCH; fi) +# copy packaged jars over to deploy dir - mkdir deploy - cp target/dt-*.jar deploy/ -- cp "target/dt-$(git describe --always).jar" "deploy/dt-latest-$(git rev-parse --abbrev-ref HEAD).jar" +- cp "target/dt-$(git describe --always).jar" "deploy/dt-latest-${BRANCH}.jar" deploy: - skip_cleanup: true provider: s3 + skip_cleanup: true access_key_id: AKIAJISY76KTZBNHS4SA secret_access_key: - secure: txyT+nwgYM+JKM9m+yj+VmOzu3B8PZle12bucZN7lD/uy25y7qVSAO+hqdj5mHR2j3EZ57BYzPbrma0DnFe7pKJKYPss3C7LeU/NEsVRsJp0cpsMwZnnJGdxHYLa5Q8/RsW0LPhO6LWk2XRswJfv4/oCjPBKJOGip/IYr03rFu7+QWNmQ8CAlAdIHrgoGayfX99x/6GZ6fZ37IweUY9/YGdjT8D7IWF0XQ5kkkyaWBw/nDZAKbqZrpfPxykwJhnKzLa1SpAuFjzAU6QkGilptod/ZmF1NdT9XZMqy1Dgwvs84lGoI2T5KtZ12UCKKZa3e5masyrfQjgmnr14nDmWNq1dZswhTw8nacq9f5FSu56fuw5dA0CggZAn+BrHuEkbL+0Kp5Bdj7eOIkB3RDERO6jW8SHSGB/XiAG27j4MSsjA1FvipMYYSo8LTsik3YfFjNHapkLHcbqwzpMqW8E6pe2towcNR1Hnn+EbdIUJabxUrYAF71+TxlewbTqOF3ZcAvW6Qwn8A06aCyNvZO0+Yk6xQItuf/PWXAYp1RI5B4w6X+ylXvoDeFtB+KJK3CiJYp7UKT32Kj6YP+r1m3KJ8PJ4Spqzfll21jTLa9CYS+7ZtXHxwJunNRzTQ3h90c810KhWlgcreIT/OX5NhyafcwlnPQ1ijKa/i7o+4JjCRKI= + secure: a2PNYiv7kzgKxfSx6IhZxSCFBZTCjrbIAK/vmCB1KcpnlV4vTt/IL13i3u6XC8wAbUxhd6iJMtVRm4diIwmy0K7nnpp0h3cQDxYqWCmf1dHZWBJXkpurDpbfxW5G6IlL14i+EsTSCpmwalov+atOBDVyJWVGqfEYaj9c6Q1E0fiYNP3QwZQcsVuD1CRw91xzckfERwqYcz70p/hmTEPOgUwDHuyHsjFafJx+krY3mnBdRdDRLcnPavjcEtprjGkdiVbNETe3CHVNQrAVfqm187OoDA2tHTPjTFmlAdUedp4rYqLmF/WWbHZLzUkQb95FJkklx30vlwC0bIutP1TwIlr3ma5aCRFc58x3SzG07AeM+vbt/nh5A52cpdRjBnhctC2kL++QvwkJhwRy2xptl/WEd5AUagoN4ngnGzyDS4kk/taQFL0IAav5C2WH668kGyH17KNeWG/bCDd55oCvwNlppAYXH+WdbtylqiVb9Fllvs1wcIYWqqyX5zdYiyFEI8LyEQsNF/D5ekuAtLXcF25uwjNtHMjdAxQxHbAbBOeaaLwJd29os9GrKFI/2C0TVXZo2zaFLZyFaIsDHqAC+MXDBDtktimC9Uuozz7bXENCrOUBfsDEQXb46tkXLGaQNXeOhe3KwVKxlGDCsLb7iHIcdDyBm19hqUWhU3uA+dU= + # upload jars in deploy dir to bucket bucket: datatools-builds local-dir: deploy acl: public_read on: - repo: conveyal/datatools-server + repo: catalogueglobal/datatools-server all_branches: true diff --git a/README.md b/README.md index 54d219a65..862ad0585 100644 --- a/README.md +++ b/README.md @@ -5,3 +5,5 @@ The core application for Conveyal's transit data tools suite. ## Documentation View the [latest documentation](http://conveyal-data-tools.readthedocs.org/en/latest/) at ReadTheDocs. + +Note: `dev` branch docs can be found [here](http://conveyal-data-tools.readthedocs.org/en/dev/). \ No newline at end of file diff --git a/configurations/default/env.yml.tmp b/configurations/default/env.yml.tmp index f7b912083..e33b09933 100644 --- a/configurations/default/env.yml.tmp +++ b/configurations/default/env.yml.tmp @@ -1,7 +1,16 @@ AUTH0_CLIENT_ID: your-auth0-client-id AUTH0_DOMAIN: your-auth0-domain -AUTH0_SECRET: your-auth0-secret +# Note: One of AUTH0_SECRET or AUTH0_PUBLIC_KEY should be used depending on the signing algorithm set on the client. +# It seems that newer Auth0 accounts (2017 and later) might default to RS256 (public key). +AUTH0_SECRET: your-auth0-secret # uses HS256 signing algorithm +# AUTH0_PUBLIC_KEY: /path/to/auth0.pem # uses RS256 signing algorithm AUTH0_TOKEN: your-auth0-token +DISABLE_AUTH: false OSM_VEX: http://localhost:1000 SPARKPOST_KEY: your-sparkpost-key SPARKPOST_EMAIL: email@example.com +GTFS_DATABASE_URL: jdbc:postgresql://localhost/catalogue +# GTFS_DATABASE_USER: +# GTFS_DATABASE_PASSWORD: +#MONGO_URI: mongodb://mongo-host:27017 +MONGO_DB_NAME: catalogue diff --git a/configurations/default/server.yml b/configurations/default/server.yml.tmp similarity index 58% rename from configurations/default/server.yml rename to configurations/default/server.yml.tmp index c1f92449d..d95317b46 100644 --- a/configurations/default/server.yml +++ b/configurations/default/server.yml.tmp @@ -1,36 +1,26 @@ application: - assets_bucket: bucket-name - gtfs_s3_bucket: bucket-name - public_url: http://localhost:9000 + assets_bucket: datatools-staging # dist directory + public_url: http://localhost:9966 + notifications_enabled: false port: 4000 data: - mapdb: /tmp gtfs: /tmp - editor_mapdb: /tmp - regions: /tmp use_s3_storage: false + s3_region: us-east-1 + gtfs_s3_bucket: bucket-name modules: enterprise: enabled: false editor: - enabled: true - alerts: - enabled: false - use_extension: xyz - sign_config: enabled: false user_admin: enabled: true - validator: - enabled: true - deployment: - enabled: false gtfsapi: enabled: true load_on_fetch: false load_on_startup: false use_extension: xyz - update_frequency: 3600 # in seconds +# update_frequency: 3600 # in seconds extensions: transitland: enabled: true diff --git a/jmeter/.gitignore b/jmeter/.gitignore new file mode 100644 index 000000000..bec3034bb --- /dev/null +++ b/jmeter/.gitignore @@ -0,0 +1,6 @@ +# jmeter stuff +apache-jmeter* +*.log + +# test output +output/* diff --git a/jmeter/README.md b/jmeter/README.md new file mode 100644 index 000000000..db8f693b2 --- /dev/null +++ b/jmeter/README.md @@ -0,0 +1,143 @@ +# datatools-server jmeter tests + +This folder contains various items that to run jmeter load tests on datatools-server. + +## Installation + +Install jmeter with this nifty script: + +```sh +./install-jmeter.sh +``` + +## Running + +The jmeter test plan can be ran from the jmeter GUI or it can be ran without a GUI. In each of these cases, it is assumed that a datatools-server instance can be queried at http://localhost:4000. + +### Starting jmeter GUI + +This script starts the jmeter gui and loads the test script. + +```sh +./run-gui.sh +``` + +### Running test plan without GUI + +The test plan can be ran straight from the command line. A helper script is provided to assist in running jmeter from the command line. This script has 3 required and 1 optional positional arguments: + +| # | argument | possible values | description | +| ---- | ---- | ---- | ---- | +| 1 | test plan mode | `batch`, `fetch`, `query` or `upload` | which test plan mode to use when running the jmeter script. (see notes below for more explanation of these test plan modes) | +| 2 | number of threads | an integer greater than 0 | The number of simultaneous threads to run at a time. The threads will have staggered start times 1 second apart. | +| 3 | number of loops | an integer greater than 0 | the number of loops to run. This is combined with the number of threads, so if the number of threads is 10 and the number of loops is 8, the total number of test plans to run will be 80. | +| 4 | project name or batch csv file | string of the project name or string of file path to batch csv file | This argument is required if running the script with the `batch` test plan mode, otherwise, this argument is optional. The jmeter script will create new projects with a project name plus the current iteration number. The default name is "test project #". Also, if the s3 bucket argument is also provided, the output folder will be tarred up and with this name. | +| 5 | s3 bucket | string of an s3 bucket | OPTIONAL. If provided, the script will tar up the output folder and attempt to upload to the specified s3 bucket. This assumes that aws credentials have been setup for use by the `aws` command line tool. | + +Examples: + +_Run the test plan in upload mode 1 total times in 1 thread running 1 loop._ +```sh +./run-tests.sh upload 1 1 +``` + +_Run the test plan in query mode 80 total times in 10 threads each completing 8 loops._ +```sh +./run-tests.sh query 10 8 my-project-name my-s3-bucket +``` + +_Run in batch mode. Note that all feeds in the csv file will be processed in each loop. So in the following command, each feed in the batch.csv file would be processed 6 times. See the section below for documentation on the csv file and also see the fixtures folder for an example file._ +```sh +./run-tests.sh query 3 2 batch.csv my-s3-bucket +``` + +### Running the upload test on multiple gtfs files + +As noted above, the jmeter script can be run in `batch` mode. The provded csv file must contain the following headers and data: + +| header | description | +| ---- | ---- | +| project name | name of project to be created | +| mode | Must be either `fetch` or `upload` | +| location | The path to the file if the mode is `upload` or the http address if the mode is `fetch` | + +There is also a helper python script that can be used to run the jmeter script in `batch` mode using all files stored within an s3 bucket. This script requires that aws credentials have been setup for use by the aws command line tool. + +| # | argument | possible values | description | +| ---- | ---- | ---- | ---- | +| 1 | test plan mode | `fetch` or `upload` | The test plan mode to use. This will be written to each row of the csv file described above. | +| 2 | s3 bucket of gtfs feeds | the string of an s3 bucket | An s3 bucket that is accessbile with the credentials setup for the aws cli. Place zip files within the bucket. Each zip file will be downloaded to the local machine and the jmeter test plan will be ran in upload mode for each gtfs zip file. | +| 3 | s3 bucket for output reports | the string of an s3 bucket | OPTIONAL. After each test run, the script will tar up the output folder and attempt to upload to the specified s3 bucket. | + +Example: + +```sh +python run-upload-tests.py fetch gtfs-test-feeds datatools-jmeter-results +``` + + +## Test Plan + +A single test plan file is used for maintainablility. By default, the test plan runs 1 thread in 1 loop and will upload a feed and then perform various checks on the uploaded feed version. As noted in the above section, it is possible to run different variations of the test plan. There are 4 types of test plans that can be initiated: `batch`, `fetch`, `query` or `upload`. + +### Batch Test Plan Mode Script Steps + +When the test plan is run in batch mode, a csv file must be provided that contains rows of test plans of either `fetch` or `upload` types. Each row is then ran the with specified number of threads and loops. + +1. For Each Row: Run either the `fetch` or `upload` test plan according to the configuration in the row. + +### Upload Test Plan Mode Script Steps + +This section is run under the `upload` test plan mode or for a feed marked for uploading in the batch csv file. + +1. Create Project +1. Create Feedsource +1. Upload zip to create new Feed Version +1. Loop until job to upload feed is complete (making http requests to job status) +1. Save a record of the amount of time it took from the completion of the feed upload until receiving a status update that the feed version processing has completed +1. Continue to API Integrity Script Steps + +### Fetch Test Plan Mode Script Steps + +This section is run under the `fetch` test plan mode or for a feed marked for fetching in the batch csv file. + +1. Create Project +1. Create Feedsource +1. Create new Feed Version (which initiates a download of a feed from datatools-server) +1. Loop until job to fetch and process the feed is complete (making http requests to job status) +1. Save a record of the amount of time it took from the completion of the feed version creation request until receiving a status update that the feed version processing has completed +1. Continue to API Integrity Script Steps + +### Query Test Plan Mode Script Steps + +This section is run under the `query` test plan mode. This script assumes that each project has a feed source that has a valid feed version. + +1. Fetch all projects +1. Pick a random project +1. Fetch all feed sources from the selected project +1. Pick a random feed source +1. Fetch all feed versions from the selected feed source +1. Pick a random feed version +1. Continue to API Integrity Script Steps + +### API Integrity Script Steps + +This section is run in all test plan modes. + +1. Fetch all routes +1. Pick a random route +1. Fetch all trips on selected route +1. Check that all trips have same route_id as route +1. Fetch all patterns on selected route +1. Check that all patterns have same route_id +1. Fetch embedded stop_times from trips from a random pattern +1. Check that all stop_times have proper trip_id +1. Check that all stop_times in trips on pattern have same stop sequence as pattern + +## Reporting + +If running this script in GUI mode, it is possible to see all results in real-time by viewing the various listeners at the end of the thread group. + +When running the test plan from the command line in non-gui mode, reports will be saved to the `output` folder. The outputs will contain a csv file of all requests made and an html report summarizing the results. If the test plan mode was `batch`, `fetch` or `upload` than another csv file will be written that contains a list of the elapsed time for processing the creation of a new gtfs feed version. + +The csv files can be loaded into a jmeter GUI listener to view more details. diff --git a/jmeter/amazon-linux-startup-script.sh b/jmeter/amazon-linux-startup-script.sh new file mode 100644 index 000000000..81891a195 --- /dev/null +++ b/jmeter/amazon-linux-startup-script.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# install java 8 +yum install java-1.8.0 -y +yum remove java-1.7.0-openjdk -y + +# install jmeter +./install-jmeter.sh + +# TODO: update jmeter.properties file +# http://www.testingdiaries.com/jmeter-on-aws/ + +# start up jmeter server +apache-jmeter-3.3/bin/jmeter-server diff --git a/jmeter/fixtures/create-feedsource.json b/jmeter/fixtures/create-feedsource.json new file mode 100644 index 000000000..a22e8fafe --- /dev/null +++ b/jmeter/fixtures/create-feedsource.json @@ -0,0 +1,4 @@ +{ + "name": "test-feedsource", + "projectId": "${projectId}" +} diff --git a/jmeter/fixtures/create-project.json b/jmeter/fixtures/create-project.json new file mode 100644 index 000000000..692c0c854 --- /dev/null +++ b/jmeter/fixtures/create-project.json @@ -0,0 +1,3 @@ +{ + "name": "tester 1" +} diff --git a/jmeter/fixtures/feed_route_pattern_stops_and_trips_graphql.js b/jmeter/fixtures/feed_route_pattern_stops_and_trips_graphql.js new file mode 100644 index 000000000..88d0c1b21 --- /dev/null +++ b/jmeter/fixtures/feed_route_pattern_stops_and_trips_graphql.js @@ -0,0 +1,32 @@ +console.log( + JSON.stringify({ + query: ` + query ($namespace: String, $pattern_id: String) { + feed(namespace: $namespace) { + feed_id + feed_version + filename + patterns (pattern_id: [$pattern_id]) { + pattern_id + route_id + stops { + stop_id + } + trips { + trip_id + pattern_id + stop_times { + stop_id + trip_id + } + } + } + } + } + `, + variables: JSON.stringify({ + namespace: "${namespace}", + pattern_id: "${randomPatternId}" + }) + }) +) diff --git a/jmeter/fixtures/feed_route_pattern_trips_graphql.js b/jmeter/fixtures/feed_route_pattern_trips_graphql.js new file mode 100644 index 000000000..4c86fe6c6 --- /dev/null +++ b/jmeter/fixtures/feed_route_pattern_trips_graphql.js @@ -0,0 +1,29 @@ +console.log( + JSON.stringify({ + query: ` + query ($namespace: String, $route_id: String) { + feed(namespace: $namespace) { + feed_id + feed_version + filename + routes (route_id: [$route_id]) { + route_id + route_type + patterns { + pattern_id + route_id + trips { + trip_id + pattern_id + } + } + } + } + } + `, + variables: JSON.stringify({ + namespace: "${namespace}", + route_id: "${randomRouteId}" + }) + }) +) diff --git a/jmeter/fixtures/feed_route_trips_graphql.js b/jmeter/fixtures/feed_route_trips_graphql.js new file mode 100644 index 000000000..3f5de6821 --- /dev/null +++ b/jmeter/fixtures/feed_route_trips_graphql.js @@ -0,0 +1,25 @@ +console.log( + JSON.stringify({ + query: ` + query ($namespace: String, $route_id: String) { + feed(namespace: $namespace) { + feed_id + feed_version + filename + routes (route_id: [$route_id]) { + route_id + route_type + trips { + trip_id + route_id + } + } + } + } + `, + variables: JSON.stringify({ + namespace: "${namespace}", + route_id: "${randomRouteId}" + }) + }) +) diff --git a/jmeter/fixtures/feed_routes_graphql.js b/jmeter/fixtures/feed_routes_graphql.js new file mode 100644 index 000000000..b8f36146e --- /dev/null +++ b/jmeter/fixtures/feed_routes_graphql.js @@ -0,0 +1,20 @@ +console.log( + JSON.stringify({ + query: ` + query ($namespace: String) { + feed(namespace: $namespace) { + feed_id + feed_version + filename + routes { + route_id + route_type + } + } + } + `, + variables: JSON.stringify({ + namespace: "${namespace}" + }) + }) +) diff --git a/jmeter/fixtures/gtfs.zip b/jmeter/fixtures/gtfs.zip new file mode 100644 index 000000000..41b6c4434 Binary files /dev/null and b/jmeter/fixtures/gtfs.zip differ diff --git a/jmeter/fixtures/sample-batch.csv b/jmeter/fixtures/sample-batch.csv new file mode 100644 index 000000000..d791767c7 --- /dev/null +++ b/jmeter/fixtures/sample-batch.csv @@ -0,0 +1,3 @@ +project name,fetch or upload,file or http address +ASC-fetch,fetch,http://documents.atlantaregional.com/transitdata/gtfs_ASC.zip +ASC-upload,upload,fixtures/gtfs.zip \ No newline at end of file diff --git a/jmeter/fixtures/stops_graphql.js b/jmeter/fixtures/stops_graphql.js new file mode 100644 index 000000000..85402b402 --- /dev/null +++ b/jmeter/fixtures/stops_graphql.js @@ -0,0 +1,26 @@ +console.log( + JSON.stringify({ + query: ` + query stops($namespace: String) { + feed(namespace: $namespace) { + namespace + feed_id + feed_version + filename + row_counts { + stops + } + stops { + stop_id + stop_name + stop_lat + stop_lon + } + } + } + `, + variables: JSON.stringify({ + namespace: "${namespace}" + }) + }) +) diff --git a/jmeter/install-jmeter.sh b/jmeter/install-jmeter.sh new file mode 100755 index 000000000..793c91122 --- /dev/null +++ b/jmeter/install-jmeter.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# install jmeter +wget https://archive.apache.org/dist/jmeter/binaries/apache-jmeter-3.3.zip +unzip apache-jmeter-3.3.zip +rm -rf apache-jmeter-3.3.zip + +# install jmeter plugin manager +wget -O apache-jmeter-3.3/lib/ext/jmeter-plugins-manager-0.16.jar https://jmeter-plugins.org/get/ + +# install command line runner +wget -O apache-jmeter-3.3/lib/cmdrunner-2.0.jar http://search.maven.org/remotecontent?filepath=kg/apc/cmdrunner/2.0/cmdrunner-2.0.jar + +# run jmeter to generate command line script +java -cp apache-jmeter-3.3/lib/ext/jmeter-plugins-manager-0.16.jar org.jmeterplugins.repository.PluginManagerCMDInstaller + +# install jpgc-json-2 +apache-jmeter-3.3/bin/PluginsManagerCMD.sh install jpgc-json + +# install jar file for commons csv +wget -O apache-jmeter-3.3/lib/ext/commons-csv-1.5.jar http://central.maven.org/maven2/org/apache/commons/commons-csv/1.5/commons-csv-1.5.jar diff --git a/jmeter/run-gui.sh b/jmeter/run-gui.sh new file mode 100755 index 000000000..bdf62144b --- /dev/null +++ b/jmeter/run-gui.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +apache-jmeter-3.3/bin/jmeter.sh -t test-script.jmx diff --git a/jmeter/run-tests.sh b/jmeter/run-tests.sh new file mode 100755 index 000000000..5239d85c1 --- /dev/null +++ b/jmeter/run-tests.sh @@ -0,0 +1,83 @@ +#!/bin/sh + +if [ -z $1 ] +then + >&2 echo 'Must supply "batch", "fetch", "query" or "upload" as first argument' + exit 1 +fi + +if [ -z $2 ] +then + >&2 echo 'Must supply second argument (number of threads)' + exit 1 +fi + +if [ -z $3 ] +then + >&2 echo 'Must supply third argument (number of loops)' + exit 1 +fi + +if [ -z $4 ] +then + if [ "$1" == "batch" ] + then + >&2 echo 'Must supply fourth argument (csv file) in batch mode' + exit 1 + else + echo 'WARNING: name of project not supplied. In upload mode, all projects will be named "test project #"' + fi +fi + +if [ -z $5 ] +then + echo 'WARNING: s3 bucket not supplied, results will not be uploaded to s3' +fi + +# clean up old output +rm -rf output +mkdir output +mkdir output/result +mkdir output/report + +echo "starting jmeter script" + +jmeter_cmd="apache-jmeter-3.3/bin/jmeter.sh -n -t test-script.jmx -l output/result/result.csv -e -o output/report -Jmode=$1 -Jthreads=$2 -Jloops=$3" + +if [ -n "$4" ] +then + if [ "$1" == "batch" ] + then + jmeter_cmd="$jmeter_cmd -Jbatchfile=$4" + else + jmeter_cmd="$jmeter_cmd -Jproject=$4" + fi +fi + +echo "$jmeter_cmd" +eval "$jmeter_cmd" + +tar_file="output.tar.gz" +if [ -n "$4" ] && [ "$1" != "batch" ] +then + tar_file="$4.tar.gz" +fi +tar -czf $tar_file output + +if [ -z $5 ] +then + echo 'WARNING: s3 bucket not supplied, results will not be uploaded to s3' +else + s3location="s3://$5/dt_jmeter_run_" + if [ -n "$4" ] + then + s3location=${s3location}${tar_file}_ + fi + s3location="$s3location$(date +%Y-%m-%dT%H-%M-%S-%Z).tar.gz" + + echo "Uploading to $s3location" + aws s3 cp $tar_file $s3location + echo "Uploaded to $s3location" +fi + +echo "done" diff --git a/jmeter/run_upload_tests.py b/jmeter/run_upload_tests.py new file mode 100644 index 000000000..f5b11343c --- /dev/null +++ b/jmeter/run_upload_tests.py @@ -0,0 +1,99 @@ +# a script to download all files from an s3 bucket and then run the upload jmeter test plan +# to reduce needed dependencies, this script calls the aws cli tool instead of using +# the python library for aws + +import csv +import os +import re +import subprocess +import sys + +# parse args +if len(sys.argv) < 3: + print '''Usage: + run_upload_tests.py test-plan-mode gtfs-feeds-s3-bucket [results-s3-bucket] + + test-plan-mode + must be `upload` or `fetch` in terms of what datatools-server does. + if `upload` this script will download all zip files from the s3 bucket + and upload them to datatools-server. if `fetch` this script will instruct + datatools-server to create a feed version by fetching from a url + + gtfs-feeds-s3-bucket + bucket to grab feeds from. + If using fetch mode, these feeds must be publicly accessible from s3. + must be a s3 bucket that is accessbile via your aws credentials + + resuts-s3-bucket + Optional. + bucket to upload results to + ''' + sys.exit() + +test_plan_mode = sys.argv[1] +gtfs_feeds_s3_bucket = sys.argv[2] + +if test_plan_mode != 'upload' and test_plan_mode != 'fetch': + print 'Invalid test plan mode' + sys.exit() + +# download a list of all files from an s3 bucket +output = subprocess.check_output(['aws', 's3', 'ls', gtfs_feeds_s3_bucket]) + +# prepare csv file headers +rows = [['project name', 'fetch or upload', 'file or http address']] + +# loop through output +num_feeds_found = 0 +for line in output.split('\n'): + # determine if file in bucket is a zip file + match = re.search('\d\s([\w-]*\.zip)', line) + if match: + num_feeds_found += 1 + zipfile = match.group(1) + project_name = zipfile.rsplit('.', 1)[0] + + # download zip file if running in upload mode + if test_plan_mode == 'upload': + # create feed download dir if it doesn't exist + try: + os.makedirs('fixtures/feeds') + except: + pass + + # dl gtfs file + file_or_location = 'fixtures/feeds/{0}'.format(zipfile) + dl_args = [ + 'aws', + 's3', + 'cp', + 's3://{0}/{1}'.format(gtfs_feeds_s3_bucket, zipfile), + file_or_location + ] + print ' '.join(dl_args) + subprocess.check_output(dl_args) + else: + file_or_location = 'https://{0}.s3.amazonaws.com/{1}'.format(gtfs_feeds_s3_bucket, zipfile) + + # append row + rows.append([ + project_name, + test_plan_mode, + file_or_location + ]) + +print 'Found {0} feeds in this bucket'.format(num_feeds_found) + +# write csv file +csv_filename = 'fixtures/s3-batch.csv' +with open(csv_filename, 'w') as f: + writer = csv.writer(f) + writer.writerows(rows) + +# run jmeter +jmeter_args = ['./run-tests.sh', 'batch', '1', '1', csv_filename] +if len(sys.argv) == 4: + jmeter_args.append(sys.argv[3]) + +print ' '.join(jmeter_args) +subprocess.check_output(jmeter_args) diff --git a/jmeter/setup-instances-and-tunnels.py b/jmeter/setup-instances-and-tunnels.py new file mode 100644 index 000000000..46c3dbfa7 --- /dev/null +++ b/jmeter/setup-instances-and-tunnels.py @@ -0,0 +1,14 @@ +import sys + +# start ec2 instances + +# for each instance + # install jmeter script with custom port number + + # run jmeter script on instance + +# setup ssh tunnel on localhost for each ec2 instace + +# ssh -L 24001:127.0.0.1:24001 \ +# -R 25000:127.0.0.1:25000 \ +# -L 26001:127.0.0.1:26001 -N -f @ diff --git a/jmeter/test-script.jmx b/jmeter/test-script.jmx new file mode 100644 index 000000000..60e890cae --- /dev/null +++ b/jmeter/test-script.jmx @@ -0,0 +1,1496 @@ + + + + + + false + false + + + + + + + + startnextloop + + false + ${__P(loops, 1)} + + ${__P(threads, 1)} + ${__P(threads, 1)} + 1508372483000 + 1508372483000 + false + + + + + + 1 + + 1 + runNumber + + false + + + + + + batchCsvFile + ${__P(batchfile, fixtures/sample-batch.csv)} + = + + + continueBatchLoop + true + = + + + feedUrl + http://documents.atlantaregional.com/transitdata/gtfs_ASC.zip + = + + + processingFeedVersionJob + true + = + + + projectName + ${__P(project,test project)} + = + + + testPlanMode + ${__P(mode,upload)} + = + + + uploadPath + fixtures/gtfs.zip + = + + + + + + + + + localhost + 4000 + + + + Assumes that datatools-server is running on port 4000 + 6 + + + + + + + + Accept-Encoding + gzip + + + + + + ${continueBatchLoop} + + + + 1 + + 1 + batchFeedIdx + + true + true + + + + "${testPlanMode}"=="batch" || "${testPlanMode}"=="fetch" || "${testPlanMode}"=="upload" + false + + + + + + + /* + * This is sort of a pre-everything processing thing. It needs to be under this sampler because otherwise it applies to all samplers. + */ + +import org.apache.commons.csv.CSVFormat; +import org.apache.commons.csv.CSVRecord; + +import java.io.*; +import java.util.*; + +/** + * ---------------------------------------------------------------------------------------------------- + * create directory for output results if needed + * ---------------------------------------------------------------------------------------------------- + */ +try { + + // returns pathnames for files and directory + File f = new File("output/result"); + + // create + boolean bool = f.mkdirs(); + + // print + if (f.mkdirs()) { + System.out.println("Output directory created"); + } + +} catch(Exception e) { + + // if any error occurs + e.printStackTrace(); +} + +// if file already exists, don't overwrite it +File f = new File("output/result/feed-processing-job-times.csv"); +if (!f.exists()) { + // write new file with headers + FileOutputStream out = new FileOutputStream("output/result/feed-processing-job-times.csv"); + String headers = "timeStamp,elapsed,label,threadName,success,failureMessage"; + out.write(headers.getBytes("UTF-8")); + out.write(System.getProperty("line.separator").getBytes("UTF-8")); + out.flush(); + out.close(); +} + +/** + * ---------------------------------------------------------------------------------------------------- + * set variables for each thread loop, cause jmeter doesn't update it??? + * ---------------------------------------------------------------------------------------------------- + */ +vars.put("processingFeedVersionJob", "true"); + +/** + * ---------------------------------------------------------------------------------------------------- + * read csv file for batch processing if needed + * ---------------------------------------------------------------------------------------------------- + */ +if (vars.get("testPlanMode").equals("batch")) { + int feedIdx = Integer.parseInt(vars.get("batchFeedIdx")); + + // read csv file + Reader rdr = null; + try { + rdr = new FileReader(vars.get("batchCsvFile")); + Iterable<CSVRecord> records = CSVFormat.RFC4180.withFirstRecordAsHeader().parse(rdr); + int curRowIdx = 0; + Iterator<CSVRecord> recordIterator = records.iterator(); + while (recordIterator.hasNext() || curRowIdx < feedIdx) { + curRowIdx++; + CSVRecord record = recordIterator.next(); + + // extract data from need row + if (curRowIdx == feedIdx) { + vars.put("projectName", record.get("project name")); + String testPlanMode = record.get("fetch or upload"); + vars.put("feedVersionCreationMode", testPlanMode); + if (testPlanMode.equals("upload")) { + vars.put("uploadPath", record.get("file or http address")); + } else { + vars.put("feedUrl", record.get("file or http address")); + } + + // check if loop should be exited + if (!recordIterator.hasNext()) { + vars.put("continueBatchLoop", "false"); + } + // data has been found, exit loop + break; + } + + // check if loop should be exited + if (!recordIterator.hasNext()) { + vars.put("continueBatchLoop", "false"); + } + } + } catch (Exception e) { + e.printStackTrace(); + } +} else { + // not in batch mode, set needed vars + vars.put("feedVersionCreationMode", vars.get("testPlanMode")); + vars.put("continueBatchLoop", "false"); +} + + groovy + + + + true + + + + false + { + "name": "${projectName} ${runNumber}" +} + = + + + + + + + + /api/manager/secure/project + POST + true + false + true + false + + + + + + + projectId + $.id + + + + + + + Content-Type + application/json + + + + + + + "${feedVersionCreationMode}"=="upload" + false + + + + true + + + + false + { + "name": "test-feedsource", + "projectId": "${projectId}" +} + = + + + + + + + + /api/manager/secure/feedsource + POST + true + false + true + false + + + + + + + 2000 + 1000.0 + + + + feedSourceId + $.id + + + + + + + Content-Type + application/json + + + + + + + + + + ${uploadPath} + + + + + + + + + + + + + /api/manager/secure/feedversion?feedSourceId=${feedSourceId}&lastModified=1508291519372 + POST + true + false + true + false + + + + + + + + + Content-Type + application/zip + + + + + + 2000 + 1000.0 + + + + Store time that upload completed + false + + + // get time that upload completed +long feedUploadCompleteTime = prev.getEndTime(); + +// store time to variable +vars.put("feedUploadCompleteTime", String.valueOf(feedUploadCompleteTime)); + + + + jobId + $.jobId + + + + + + + "${feedVersionCreationMode}"=="fetch" + false + + + + true + + + + false + { + "name": "test-feedsource", + "projectId": "${projectId}", + "url": "${feedUrl}" +} + = + + + + + + + + /api/manager/secure/feedsource + POST + true + false + true + false + + + + + + + 2000 + 1000.0 + + + + feedSourceId + $.id + + + + + + + Content-Type + application/json + + + + + + + + + + + + + + /api/manager/secure/feedsource/${feedSourceId}/fetch + POST + true + false + true + false + + + + + + + + + Content-Type + application/zip + + + + + + 2000 + 1000.0 + + + + Store time that upload completed + false + + + // get time that upload completed +long feedUploadCompleteTime = prev.getEndTime(); + +// store time to variable +vars.put("feedUploadCompleteTime", String.valueOf(feedUploadCompleteTime)); + + + + jobId + $.jobId + + + + + + + ${processingFeedVersionJob} + + + + + + + + + + + /api/manager/secure/status/jobs/${jobId} + GET + true + false + true + false + + + + + + + 2000 + + + + feedSourceJobComplete;feedVersionId;feedSourceJobError;feedSourceJobMessage + $.status.completed;$.feedVersionId;$.status.error;$.status.message + + false;false;false;no message + + + + + + false + if (vars.get("feedSourceJobComplete") == "true") { + // save variable to exit loop + vars.put("processingFeedVersionJob", "false"); + + // get time that job completed + long jobCompleteTime = prev.getEndTime(); + + // get HTTP Sampler 1 execution time from variable + long feedUploadCompleteTime = Long.parseLong(vars.get("feedUploadCompleteTime")); + + // calculate difference + long feedProcessingTime = (jobCompleteTime - feedUploadCompleteTime); + + // open file for writing in append mode + FileOutputStream out = new FileOutputStream("output/result/feed-processing-job-times.csv", true); + + // prepare row string + // timeStamp + String row = String.valueOf(jobCompleteTime) + ","; + + // elapsed + row += String.valueOf(feedProcessingTime) + ","; + + // label + row += "Feed Processing Time,"; + + // threadName + row += "Thread Group 1-" + (ctx.getThreadNum() + 1) + ","; + + // success and failureMessage + String error = vars.get("feedSourceJobError"); + if (error.equals("true")) { + String errorMessage = vars.get("feedSourceJobMessage"); + row += "false," + errorMessage; + } else { + row += "true,"; + } + + // write data + out.write(row.getBytes("UTF-8")); + out.write(System.getProperty("line.separator").getBytes("UTF-8")); + out.flush(); + out.close(); +} + Check if job is complete and if so, prepare to exit loop and calculate feed processing time + + + + + + + def failureMessage = ""; + +// see if job is complete +if (vars.get("processingFeedVersionJob") == "false") { + if (vars.get("feedSourceJobError") == "true") { + failureMessage += "an error occurred while processing the feed: " + failureMessage += vars.get("feedSourceJobMessage") + "\n" + } +} else { + // capture null response in event that job status returns null before it returns completion message + if (prev.getResponseDataAsString().equals("null")) { + failureMessage += "received null job status response before job status completion was set to true\n" + } +} + +// set assertion result to fail if an error happened +if (failureMessage?.trim()) { + AssertionResult.setFailureMessage(failureMessage); + AssertionResult.setFailure(true); +} + groovy + + + + + + + "${testPlanMode}"=="query" + false + + + + false + true + false + + + + + + + + + + + /api/manager/secure/project + GET + true + false + true + false + + + + + + + false + + + // do not do batch loop in this case +vars.put("continueBatchLoop", "false") + + + + projectId + $[*].id + 0 + + + + + + + + + + + + /api/manager/secure/feedsource?projectId=${projectId} + GET + true + false + true + false + + + + + + + 2000 + 1000.0 + + + + feedSourceId + $[*].id + 0 + + + + + + + + + + + + /api/manager/secure/feedversion?feedSourceId=${feedSourceId} + GET + true + false + true + false + + + + + + + 2000 + 1000.0 + + + + feedVersionId + $[*].id + 0 + + + + + + + + + + + fixtures/gtfs_ASC.zip + file + application/zip + + + + + + + + + + + /api/manager/secure/feedversion/${feedVersionId} + GET + true + false + true + true + + + + + + + 2000 + 1000.0 + + + + namespace + $.namespace + + + + + + true + + + + false + {"query":"\n query stops($namespace: String) {\n feed(namespace: $namespace) {\n namespace\n feed_id\n feed_version\n filename\n row_counts {\n stops\n }\n stops {\n stop_id\n stop_name\n stop_lat\n stop_lon\n }\n }\n }\n ", "variables": {"namespace": "${namespace}" }} + = + + + + + + + + /api/manager/secure/gtfs/graphql + POST + true + false + true + false + + + + + + + 2000 + 1000.0 + + + + + + Content-Type + application/json + + + + + + + + + import groovy.json.JsonSlurper; + +def failureMessage = ""; +def jsonResponse = null; + +JsonSlurper JSON = new JsonSlurper (); + +// parse json +try { + jsonResponse = JSON.parseText(prev.getResponseDataAsString()); +} catch (Exception e) { + failureMessage += "Invalid JSON.\n" +} + +def totalNumStops = jsonResponse.data.feed.row_counts.stops +def lenQueriedStops = jsonResponse.data.feed.stops.size() + +if (totalNumStops == 0) { + failureMessage += "recieved stop row count of 0 stops\n" +} + + +if (lenQueriedStops == 0) { + failureMessage += "recieved 0 returned stops\n" +} + +if (lenQueriedStops > 50) { + failureMessage += "recieved more than 50 returned stops\n" +} + +if (totalNumStops < lenQueriedStops) { + failureMessage += "recieved more returned stops (" + lenQueriedStops + failureMessage += ") than amount listed in row count of stops (" + totalNumStops + ")\n" +} + +// set assertion result to fail if an error happened +if (failureMessage?.trim()) { + AssertionResult.setFailureMessage(failureMessage); + AssertionResult.setFailure(true); +} + groovy + + + + + true + + + + false + {"query":"\n query ($namespace: String) {\n feed(namespace: $namespace) {\n feed_id\n feed_version\n filename\n routes {\n route_id\n route_type\n }\n }\n }\n ", "variables": {"namespace": "${namespace}"}} + = + + + + + + + + /api/manager/secure/gtfs/graphql + POST + true + false + true + false + + + + + + + 1000 + 1000.0 + + + + + + Content-Type + application/json + + + + + + randomRouteId + $.data.feed.routes[*].route_id + 0 + + + + + true + + + + false + {"query":"\n query ($namespace: String, $route_id: String) {\n feed(namespace: $namespace) {\n feed_id\n feed_version\n filename\n routes (route_id: [$route_id]) {\n route_id\n route_type\n trips {\n trip_id\n route_id\n }\n }\n }\n }\n ", "variables": {"namespace": "${namespace}", "route_id": "${randomRouteId}"}} + = + + + + + + + + /api/manager/secure/gtfs/graphql + POST + true + false + true + false + + + + + + + 1000 + 1000.0 + + + + + + Content-Type + application/json + + + + + + + + + import groovy.json.JsonSlurper; + +def failureMessage = ""; +def jsonResponse = null; + +JsonSlurper JSON = new JsonSlurper (); + +// parse json +try { + jsonResponse = JSON.parseText(prev.getResponseDataAsString()); +} catch (Exception e) { + failureMessage += "Invalid JSON.\n" +} + +def trips = jsonResponse.data.feed.routes[0].trips + +trips.each { + if (!it.route_id.equals(vars.get("randomRouteId"))) { + failureMessage += "route_id mismatch on trip: " + it.trip_id + "\n" + } +} + +// set assertion result to fail if an error happened +if (failureMessage?.trim()) { + AssertionResult.setFailureMessage(failureMessage); + AssertionResult.setFailure(true); +} + groovy + + + + + true + + + + false + {"query":"\n query ($namespace: String, $route_id: String) {\n feed(namespace: $namespace) {\n feed_id\n feed_version\n filename\n routes (route_id: [$route_id]) {\n route_id\n route_type\n patterns {\n pattern_id\n route_id\n trips {\n trip_id\n pattern_id\n }\n }\n }\n }\n }\n ", "variables": {"namespace": "${namespace}", "route_id": "${randomRouteId}"}} + = + + + + + + + + /api/manager/secure/gtfs/graphql + POST + true + false + true + false + + + + + + + 1000 + 1000.0 + + + + + + Content-Type + application/json + + + + + + + + + import groovy.json.JsonSlurper; + +def failureMessage = ""; +def jsonResponse = null; + +JsonSlurper JSON = new JsonSlurper (); + +// parse json +try { + jsonResponse = JSON.parseText(prev.getResponseDataAsString()); +} catch (Exception e) { + failureMessage += "Invalid JSON.\n" +} + +def patterns = jsonResponse.data.feed.routes[0].patterns + +patterns.each { + if (!it.route_id.equals(vars.get("randomRouteId"))) { + failureMessage += "route_id mismatch on trip: " + it.trip_id + "\n" + } +} + +// set assertion result to fail if an error happened +if (failureMessage?.trim()) { + AssertionResult.setFailureMessage(failureMessage); + AssertionResult.setFailure(true); +} + groovy + + + + randomPatternId + $.data.feed.routes[0].patterns[*].pattern_id + 0 + + + + + true + + + + false + {"query":"\n query ($namespace: String, $pattern_id: String) {\n feed(namespace: $namespace) {\n feed_id\n feed_version\n filename\n patterns (pattern_id: [$pattern_id]) {\n pattern_id\n route_id\n stops {\n stop_id\n }\n trips {\n trip_id\n pattern_id\n stop_times {\n stop_id\n trip_id\n }\n }\n }\n }\n }\n ", "variables": {"namespace": "${namespace}", "pattern_id": "${randomPatternId}"}} + = + + + + + + + + /api/manager/secure/gtfs/graphql + POST + true + false + true + false + + + + + + + 1000 + 1000.0 + + + + + + Content-Type + application/json + + + + + + + + + import groovy.json.JsonSlurper; + +def failureMessage = ""; +def jsonResponse = null; + +JsonSlurper JSON = new JsonSlurper (); + +// parse json +try { + jsonResponse = JSON.parseText(prev.getResponseDataAsString()); +} catch (Exception e) { + failureMessage += "Invalid JSON.\n" +} + +def trips = jsonResponse.data.feed.patterns[0].trips + +trips.each { trip -> + trip.stop_times.each { stop_time -> + if (!trip.trip_id.equals(stop_time.trip_id)) { + failureMessage += "trip_id mismatch." + failureMessage += "Parent trip has trip_id: " + trip.trip_id + failureMessage += " Stop Time has stop_id: " + stop_time.stop_id + failureMessage += " and trip_id: " + stop_time.trip_id + "\n" + } + } +} + +// set assertion result to fail if an error happened +if (failureMessage?.trim()) { + AssertionResult.setFailureMessage(failureMessage); + AssertionResult.setFailure(true); +} + groovy + + + + + + + import groovy.json.JsonSlurper; + +def failureMessage = ""; +def jsonResponse = null; + +JsonSlurper JSON = new JsonSlurper (); + +// parse json +try { + jsonResponse = JSON.parseText(prev.getResponseDataAsString()); +} catch (Exception e) { + failureMessage += "Invalid JSON.\n" +} + +def numStopsInPattern = jsonResponse.data.feed.patterns[0].stops.size() +def trips = jsonResponse.data.feed.patterns[0].trips +def numStopTimesInTrip = 0 + +trips.each { trip -> + numStopTimesInTrip = trip.stop_times.size() + if (numStopTimesInTrip != numStopsInPattern) { + failureMessage += "mismatch in number of trip stops vs number of pattern stops." + failureMessage += "There are " + numStopsInPattern + " pattern stops" + failureMessage += ", but there are " + numStopTimesInTrip + " stop_times" + failureMessage += " in trip " + trip.trip_id + "\n" + } else { + trip.stop_times.eachWithIndex { stop_time, idx -> + if (!stop_time.stop_id.equals(trip.stop_times[idx].stop_id)) { + failureMessage += "stop_id mismatch." + failureMessage += "Pattern stop list stop_id: " + trip.stop_times[idx].stop_id + failureMessage += " at index: " + idx + failureMessage += " Stop Time of trip " + trip.trip_id + failureMessage += " at index: " + idx + failureMessage += " has stop_id: " + stop_time.stop_id + "\n" + } + } + } +} + +// set assertion result to fail if an error happened +if (failureMessage?.trim()) { + AssertionResult.setFailureMessage(failureMessage); + AssertionResult.setFailure(true); +} + groovy + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + true + true + false + false + false + true + 0 + true + true + true + true + true + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + true + true + true + true + true + false + true + true + true + false + true + 0 + true + true + true + true + true + true + true + true + true + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + true + true + true + true + true + true + true + true + true + false + true + 0 + true + true + true + true + true + true + true + true + true + + + + + + + + + true + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + + + + + + + + diff --git a/pom.xml b/pom.xml index e290ac2f9..88247d3f6 100644 --- a/pom.xml +++ b/pom.xml @@ -6,27 +6,65 @@ com.conveyal datatools-server - 1.0.0 + 3.2.0-SNAPSHOT + + + MIT License + https://opensource.org/licenses/MIT + + - + + + + Landon Reed + lreed@conveyal.com + Conveyal + http://conveyal.com/ + + + Andrew Byrd + abyrd@conveyal.com + Conveyal + http://conveyal.com/ + + + David Emory + demory@conveyal.com + Conveyal + http://conveyal.com/ + + + Evan Siroky + esiroky@conveyal.com + Conveyal + http://conveyal.com/ + + + + - scm:git:https://github.com/conveyal/datatools-server.git - scm:git:ssh://git@github.com/conveyal/datatools-server.git - https://github.com/conveyal/datatools-server.git + scm:git:https://github.com/catalogueglobal/datatools-server.git + scm:git:ssh://git@github.com/catalogueglobal/datatools-server.git + https://github.com/catalogueglobal/datatools-server.git - - - - - - conveyal-maven-repo - Conveyal Maven Repository - s3://maven.conveyal.com/ - - - + + 2.9.0 + + + + src/main/resources + true + + **/*.properties + gtfs/* + public/* + + + org.apache.maven.plugins @@ -39,12 +77,6 @@ dt-${git.commit.id.describe} - - - com.conveyal.datatools.manager.DataManager - - - @@ -68,6 +100,7 @@ org.apache.maven.plugins maven-compiler-plugin + 3.7.0 1.8 1.8 @@ -86,19 +119,62 @@ ${project.basedir}/.git + + true + true + + + + org.apache.maven.plugins + maven-jar-plugin + + + + com.conveyal.datatools.manager.DataManager + + + + + org.jacoco + jacoco-maven-plugin + 0.8.2 + + + + prepare-agent + + + + report + test + + report + + + + + + + maven-surefire-plugin + 2.22.0 + + + org.junit.platform + junit-platform-surefire-provider + 1.3.1 + + + org.junit.jupiter + junit-jupiter-engine + 5.3.1 + + + - - - - - org.kuali.maven.wagons - maven-s3-wagon - 1.2.1 - - - com.sparkjava spark-core 2.5 + org.slf4j slf4j-api @@ -149,124 +240,125 @@ 1.1.3 + org.mapdb mapdb 1.0.8 + org.eclipse.persistence javax.persistence 2.1.0 + com.conveyal gtfs-lib - 2.0.1 + 4.1.1 + org.mongodb mongodb-driver - 3.3.0 + 3.5.0 + com.google.guava guava - 15.0 - - - - com.conveyal - gtfs-api - 0.5-SNAPSHOT + 18.0 + - com.conveyal - gtfs-validator-json - 0.0.1-SNAPSHOT + com.fasterxml.jackson.core + jackson-core + ${jackson.version} - + - com.conveyal - r5 - 2.0.0-SNAPSHOT + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + ${jackson.version} - com.amazonaws - aws-java-sdk-s3 - 1.11.103 - - - com.amazonaws - aws-java-sdk - 1.11.103 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-yaml - 2.5.4 + com.fasterxml.jackson.core + jackson-databind + ${jackson.version} - org.geotools - gt-geojson - 14.3 + com.fasterxml.jackson.core + jackson-annotations + ${jackson.version} + com.sparkpost sparkpost-lib 0.15 + net.sf.trove4j trove4j 3.0.3 + org.geotools gt-shapefile - 14.0 + 19.2 + - com.conveyal - jackson2-geojson - 0.8 + org.junit.jupiter + junit-jupiter-api + 5.3.1 + test + - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - 2.6.1 + com.bugsnag + [3.0,4.0) + bugsnag + - commons-io - commons-io - 2.4 + com.auth0 + java-jwt + 2.3.0 + + - junit - junit - 4.12 + io.rest-assured + rest-assured + 3.1.1 test - - - - - - - + + + org.hamcrest + java-hamcrest + 2.0.0.0 + diff --git a/src/main/java/com/conveyal/datatools/common/persistence/DataStore.java b/src/main/java/com/conveyal/datatools/common/persistence/DataStore.java deleted file mode 100644 index dca0b9840..000000000 --- a/src/main/java/com/conveyal/datatools/common/persistence/DataStore.java +++ /dev/null @@ -1,7 +0,0 @@ -package com.conveyal.datatools.common.persistence; - -/** - * Created by landon on 11/3/16. - */ -public class DataStore { -} diff --git a/src/main/java/com/conveyal/datatools/common/status/MonitorableJob.java b/src/main/java/com/conveyal/datatools/common/status/MonitorableJob.java index e38ca288d..7853beea5 100644 --- a/src/main/java/com/conveyal/datatools/common/status/MonitorableJob.java +++ b/src/main/java/com/conveyal/datatools/common/status/MonitorableJob.java @@ -1,104 +1,223 @@ package com.conveyal.datatools.common.status; import com.conveyal.datatools.manager.DataManager; -import com.google.common.eventbus.EventBus; -import com.google.common.eventbus.Subscribe; -import jdk.nashorn.internal.scripts.JO; +import org.apache.commons.lang3.exception.ExceptionUtils; +import org.eclipse.jetty.util.ConcurrentHashSet; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; -import java.util.*; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; /** * Created by landon on 6/13/16. */ public abstract class MonitorableJob implements Runnable { + private static final Logger LOG = LoggerFactory.getLogger(MonitorableJob.class); + protected final String owner; - protected String owner; - protected String name; - protected JobType type; - protected EventBus eventBus; + // Public fields will be serialized over HTTP API and visible to the web client + public final JobType type; + public String parentJobId; + public JobType parentJobType; + // Status is not final to allow some jobs to have extra status fields. + public Status status = new Status(); + // Name is not final in case it needs to be amended during job processing. + public String name; + public final String jobId = UUID.randomUUID().toString(); - public String jobId = UUID.randomUUID().toString(); - - protected List nextJobs = new ArrayList<>(); + /** + * Additional jobs that will be run after the main logic of this job has completed. + * This job is not considered entirely completed until its sub-jobs have all completed. + */ + protected List subJobs = new ArrayList<>(); public enum JobType { UNKNOWN_TYPE, BUILD_TRANSPORT_NETWORK, CREATE_FEEDVERSION_FROM_SNAPSHOT, - PROCESS_SNAPSHOT, + // **** Legacy snapshot jobs + PROCESS_SNAPSHOT_MERGE, + PROCESS_SNAPSHOT_EXPORT, + // **** + LOAD_FEED, VALIDATE_FEED, + DEPLOY_TO_OTP, FETCH_PROJECT_FEEDS, FETCH_SINGLE_FEED, - MAKE_PROJECT_PUBLIC + MAKE_PROJECT_PUBLIC, + PROCESS_FEED, + CREATE_SNAPSHOT, + EXPORT_SNAPSHOT_TO_GTFS, + CONVERT_EDITOR_MAPDB_TO_SQL, + VALIDATE_ALL_FEEDS, + MERGE_PROJECT_FEEDS } public MonitorableJob(String owner, String name, JobType type) { - // register job with eventBus - this.eventBus = new EventBus(); - eventBus.register(this); - this.owner = owner; this.name = name; this.type = type; - storeJob(); + registerJob(); } public MonitorableJob(String owner) { this(owner, "Unnamed Job", JobType.UNKNOWN_TYPE); } - public String getName() { - return name; - } - - public JobType getType() { - return type; - } - - public abstract Status getStatus(); - - protected void storeJob() { - Set userJobs = DataManager.userJobsMap.get(this.owner); + /** + * This method should never be called directly or overridden. + * It is a standard start-up stage for all monitorable jobs. + */ + private void registerJob() { + ConcurrentHashSet userJobs = DataManager.userJobsMap.get(this.owner); if (userJobs == null) { - userJobs = new HashSet<>(); + userJobs = new ConcurrentHashSet<>(); } userJobs.add(this); DataManager.userJobsMap.put(this.owner, userJobs); } - protected void jobFinished() { - // kick off any next jobs - for(Runnable job : nextJobs) { - new Thread(job).start(); - } - + /** + * This method should never be called directly or overridden. It is a standard clean up stage for all + * monitorable jobs. + */ + private void unRegisterJob () { // remove this job from the user-job map - Set userJobs = DataManager.userJobsMap.get(this.owner); + ConcurrentHashSet userJobs = DataManager.userJobsMap.get(this.owner); if (userJobs != null) userJobs.remove(this); } - public void addNextJob(Runnable job) { - nextJobs.add(job); + /** + * This method must be overridden by subclasses to perform the core steps of the job. + */ + public abstract void jobLogic() throws Exception; + + /** + * This method may be overridden in the event that you want to perform a special final step after this job and + * all sub-jobs have completed. + */ + public void jobFinished () { + // do nothing by default. } - @Subscribe - public abstract void handleStatusEvent (Map statusMap); + /** + * This implements Runnable. All monitorable jobs should use this exact sequence of steps. Don't override this method; + * override jobLogic and jobFinished method(s). + */ + public void run () { + boolean parentJobErrored = false; + boolean subTaskErrored = false; + String cancelMessage = ""; + long startTimeNanos = System.nanoTime(); + try { + // First execute the core logic of the specific MonitorableJob subclass + jobLogic(); + if (status.error) { + parentJobErrored = true; + cancelMessage = String.format("Task cancelled due to error in %s task", getClass().getSimpleName()); + } + // Immediately run any sub-jobs in sequence in the current thread. + // This hogs the current thread pool thread but makes execution order predictable. + int subJobNumber = 1; + int subJobsTotal = subJobs.size() + 1; + + for (MonitorableJob subJob : subJobs) { + if (!parentJobErrored && !subTaskErrored) { + // Run sub-task if no error has errored during parent job or previous sub-task execution. + // FIXME this will overwrite a message if message is set somewhere else. + // FIXME If a subtask fails, cancel the parent task and cancel or remove subsequent sub-tasks. +// status.message = String.format("Finished %d/%d sub-tasks", subJobNumber, subJobsTotal); + status.percentComplete = subJobNumber * 100D / subJobsTotal; + status.error = false; // FIXME: remove this error=false assignment + subJob.run(); + + // Record if there has been an error in the execution of the sub-task. (Note: this will not + // incorrectly overwrite a 'true' value with 'false' because the sub-task is only run if + // jobHasErrored is false. + if (subJob.status.error) { + subTaskErrored = true; + cancelMessage = String.format("Task cancelled due to error in %s task", subJob.getClass().getSimpleName()); + } + } else { + // Cancel (fail) next sub-task and continue. + subJob.cancel(cancelMessage); + } + subJobNumber += 1; + } + // FIXME: should jobFinished be run if task or any sub-task fails? + if (subTaskErrored) { + // Cancel parent job completion if an error was encountered in task/sub-task. No need to cancel sub-task + // because the error presumably already occurred and has a better error message. + cancel(cancelMessage); + } + + // Run final steps of job pending completion or error. Note: any tasks that depend on job success should + // check job status to determine if final step should be executed (e.g., storing feed version in MongoDB). + // TODO: should we add separate hooks depending on state of job/sub-tasks (e.g., success, catch, finally) + jobFinished(); + + status.completed = true; + + // We retain finished or errored jobs on the server until they are fetched via the API, which implies they + // could be displayed by the client. + } catch (Exception ex) { + // Set job status to failed + // Note that when an exception occurs during job execution we do not call unRegisterJob, + // so the job continues to exist in the failed state and the user can see it. + LOG.error("Job failed", ex); + status.update(true, ex.getMessage(), 100, true); + } + status.startTime = TimeUnit.NANOSECONDS.toMillis(startTimeNanos); + status.duration = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeNanos); + LOG.info("{} {} {} in {} ms", type, jobId, status.error ? "errored" : "completed", status.duration); + } + + /** + * An alternative method to run(), this method updates job status with error and should contain any other + * clean up steps needed to complete job in an errored state (generally due to failure in a previous task in + * the chain). + */ + private void cancel(String message) { + // Updating the job status with error is all we need to do in order to move the job into completion. Once the + // user fetches the errored job, it will be automatically removed from the system. + status.update(true, message, 100); + status.completed = true; + // FIXME: Do we need to run any clean up here? + } + + /** + * Enqueues a sub-job to be run when the main logic of this job has finished. + */ + public void addNextJob(MonitorableJob ...jobs) { + for (MonitorableJob job : jobs) { + job.parentJobId = this.jobId; + job.parentJobType = this.type; + subJobs.add(job); + } + } /** * Represents the current status of this job. */ - public static class Status implements Cloneable { + public static class Status { /** What message (defined in messages.) should be displayed to the user? */ public String message; + /** Detailed exception method to display to user (to help with support requests) */ + public String exceptionType; + public String exceptionDetails; + /** Is this deployment completed (successfully or unsuccessfully) */ - public boolean completed; + public boolean completed = false; /** What was the error (null if no error)? */ - public boolean error; + public boolean error = false; /** Is the item currently being uploaded to the server? */ public boolean uploading; @@ -109,35 +228,51 @@ public static class Status implements Cloneable { /** How much of task is complete? */ public double percentComplete; + public long startTime; + public long duration; + // When was the job initialized? - public String initialized; + public String initialized = LocalDateTime.now().format(DateTimeFormatter.ISO_DATE_TIME); // When was the job last modified? - public String modified; + public String modified = LocalDateTime.now().format(DateTimeFormatter.ISO_DATE_TIME); // Name of file/item once completed public String completedName; - public Status() { - this.error = false; - this.completed = false; - this.initialized = LocalDateTime.now().format(DateTimeFormatter.ISO_DATE_TIME); - this.modified= LocalDateTime.now().format(DateTimeFormatter.ISO_DATE_TIME); - this.percentComplete = 0; + public void update (String message, double percentComplete) { + this.message = message; + this.percentComplete = percentComplete; + } + + public void update (boolean isError, String message, double percentComplete) { + this.error = isError; + this.message = message; + this.percentComplete = percentComplete; } - public Status clone () { - Status ret = new Status(); - ret.message = message; - ret.completed = completed; - ret.error = error; - ret.uploading = uploading; - ret.name = name; - ret.percentComplete = percentComplete; - ret.initialized = initialized; - ret.modified = modified; - ret.completedName = completedName; - return ret; + public void update (boolean isError, String message, double percentComplete, boolean isComplete) { + this.error = isError; + this.message = message; + this.percentComplete = percentComplete; + this.completed = isComplete; } + + public void fail (String message, Exception e) { + this.error = true; + this.percentComplete = 100; + this.completed = true; + this.message = message; + this.exceptionDetails = ExceptionUtils.getStackTrace(e); + this.exceptionType = e.getMessage(); + } + + public void fail (String message) { + this.error = true; + this.percentComplete = 100; + this.completed = true; + this.message = message; + } + } } diff --git a/src/main/java/com/conveyal/datatools/common/utils/Consts.java b/src/main/java/com/conveyal/datatools/common/utils/Consts.java new file mode 100644 index 000000000..eee448005 --- /dev/null +++ b/src/main/java/com/conveyal/datatools/common/utils/Consts.java @@ -0,0 +1,8 @@ +package com.conveyal.datatools.common.utils; + +/** + * Created by landon on 4/14/17. + */ +public final class Consts { + public static final String COLUMN_SPLIT = ",(?=([^\"]*\"[^\"]*\")*[^\"]*$)"; +} diff --git a/src/main/java/com/conveyal/datatools/manager/utils/CorsFilter.java b/src/main/java/com/conveyal/datatools/common/utils/CorsFilter.java similarity index 59% rename from src/main/java/com/conveyal/datatools/manager/utils/CorsFilter.java rename to src/main/java/com/conveyal/datatools/common/utils/CorsFilter.java index 5e72ce81f..bcfecc044 100644 --- a/src/main/java/com/conveyal/datatools/manager/utils/CorsFilter.java +++ b/src/main/java/com/conveyal/datatools/common/utils/CorsFilter.java @@ -1,17 +1,17 @@ -package com.conveyal.datatools.manager.utils; +package com.conveyal.datatools.common.utils; /** - * Created by landon on 4/21/16. + * Created by demory on 9/2/16. */ -import java.util.HashMap; -import spark.Filter; -import spark.Request; -import spark.Response; + import spark.Spark; +import java.util.HashMap; + /** * Really simple helper for enabling CORS in a spark application; */ + public final class CorsFilter { private static final HashMap corsHeaders = new HashMap(); @@ -24,14 +24,6 @@ public final class CorsFilter { } public final static void apply() { - Filter filter = new Filter() { - @Override - public void handle(Request request, Response response) throws Exception { - corsHeaders.forEach((key, value) -> { - response.header(key, value); - }); - } - }; - Spark.after(filter); + Spark.after((request, response) -> corsHeaders.forEach((key, value) -> response.header(key, value))); } } \ No newline at end of file diff --git a/src/main/java/com/conveyal/datatools/common/utils/S3Utils.java b/src/main/java/com/conveyal/datatools/common/utils/S3Utils.java new file mode 100644 index 000000000..ecbda5f1f --- /dev/null +++ b/src/main/java/com/conveyal/datatools/common/utils/S3Utils.java @@ -0,0 +1,112 @@ +package com.conveyal.datatools.common.utils; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.HttpMethod; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.CannedAccessControlList; +import com.amazonaws.services.s3.model.GeneratePresignedUrlRequest; +import com.amazonaws.services.s3.model.PutObjectRequest; +import com.conveyal.datatools.manager.DataManager; +import com.conveyal.datatools.manager.persistence.FeedStore; +import org.apache.commons.io.IOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import spark.Request; +import spark.Response; + +import javax.servlet.MultipartConfigElement; +import javax.servlet.ServletException; +import javax.servlet.http.Part; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.URL; +import java.util.Date; + +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; + +/** + * Created by landon on 8/2/16. + */ +public class S3Utils { + + private static final Logger LOG = LoggerFactory.getLogger(S3Utils.class); + private static final int REQUEST_TIMEOUT_MSEC = 30 * 1000; + + public static String uploadBranding(Request req, String key) throws IOException, ServletException { + String url; + + String s3Bucket = DataManager.getConfigPropertyAsText("application.data.gtfs_s3_bucket"); + if (s3Bucket == null) { + haltWithMessage(req, 400, "s3bucket is incorrectly configured on server"); + } + + // Get file from request + if (req.raw().getAttribute("org.eclipse.jetty.multipartConfig") == null) { + MultipartConfigElement multipartConfigElement = new MultipartConfigElement(System.getProperty("java.io.tmpdir")); + req.raw().setAttribute("org.eclipse.jetty.multipartConfig", multipartConfigElement); + } + Part part = req.raw().getPart("file"); + String extension = "." + part.getContentType().split("/", 0)[1]; + File tempFile = File.createTempFile(key + "_branding", extension); + InputStream inputStream; + try { + inputStream = part.getInputStream(); + FileOutputStream out = new FileOutputStream(tempFile); + IOUtils.copy(inputStream, out); + } catch (Exception e) { + e.printStackTrace(); + haltWithMessage(req, 400, "Unable to read uploaded file"); + } + + try { + String keyName = "branding/" + key + extension; + url = "https://s3.amazonaws.com/" + s3Bucket + "/" + keyName; + // FIXME: This may need to change during feed store refactor + AmazonS3 s3client = FeedStore.s3Client; + s3client.putObject(new PutObjectRequest( + s3Bucket, keyName, tempFile) + // grant public read + .withCannedAcl(CannedAccessControlList.PublicRead)); + return url; + } catch (AmazonServiceException ase) { + ase.printStackTrace(); + haltWithMessage(req, 400, "Error uploading file to S3"); + return null; + } finally { + boolean deleted = tempFile.delete(); + if (!deleted) { + LOG.error("Could not delete s3 upload file."); + } + } + } + + /** + * Download an object in the selected format from S3, using presigned URLs. + * @param s3 + * @param bucket name of the bucket + * @param filename both the key and the format + * @param redirect + * @param res + * @return + */ + public static String downloadFromS3(AmazonS3 s3, String bucket, String filename, boolean redirect, Response res){ + Date expiration = new Date(); + expiration.setTime(expiration.getTime() + REQUEST_TIMEOUT_MSEC); + + GeneratePresignedUrlRequest presigned = new GeneratePresignedUrlRequest(bucket, filename); + presigned.setExpiration(expiration); + presigned.setMethod(HttpMethod.GET); + URL url = s3.generatePresignedUrl(presigned); + + if (redirect) { + res.type("text/plain"); // override application/json + res.redirect(url.toString()); + res.status(302); // temporary redirect, this URL will soon expire + return null; + } else { + return SparkUtils.formatJSON("url", url.toString()); + } + } +} diff --git a/src/main/java/com/conveyal/datatools/common/utils/SparkUtils.java b/src/main/java/com/conveyal/datatools/common/utils/SparkUtils.java index 8645919d0..89a2ba3c9 100644 --- a/src/main/java/com/conveyal/datatools/common/utils/SparkUtils.java +++ b/src/main/java/com/conveyal/datatools/common/utils/SparkUtils.java @@ -1,45 +1,214 @@ package com.conveyal.datatools.common.utils; +import com.conveyal.datatools.manager.auth.Auth0UserProfile; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.google.common.io.ByteStreams; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import spark.HaltException; +import spark.Request; import spark.Response; -import java.io.BufferedInputStream; -import java.io.BufferedOutputStream; +import javax.servlet.ServletOutputStream; +import javax.servlet.http.HttpServletResponse; import java.io.File; import java.io.FileInputStream; +import java.io.IOException; +import java.util.Arrays; +import static com.conveyal.datatools.manager.DataManager.getConfigPropertyAsText; import static spark.Spark.halt; /** - * Created by landon on 12/15/16. + * Contains a collection of utility methods used in conjunction with the Spark HTTP requests and responses. */ public class SparkUtils { + private static final Logger LOG = LoggerFactory.getLogger(SparkUtils.class); + private static final ObjectMapper mapper = new ObjectMapper(); + private static final String BASE_URL = getConfigPropertyAsText("application.public_url"); + private static final int DEFAULT_LINES_TO_PRINT = 10; - public static Object downloadFile(File file, Response res) { - if(file == null) halt(404, "File is null"); + /** + * Write out the supplied file to the Spark response as an octet-stream. + */ + public static HttpServletResponse downloadFile(File file, String filename, Request req, Response res) { + if (file == null) haltWithMessage(req, 404, "File is null"); + HttpServletResponse raw = res.raw(); + raw.setContentType("application/octet-stream"); + raw.setHeader("Content-Disposition", "attachment; filename=" + filename); + // Override the gzip content encoding applied to standard API responses. + res.header("Content-Encoding", "identity"); + try ( + FileInputStream fileInputStream = new FileInputStream(file); + ServletOutputStream outputStream = raw.getOutputStream() + ) { + // Write the file input stream to the response's output stream. + ByteStreams.copy(fileInputStream, outputStream); + // TODO: Is flushing the stream necessary? + outputStream.flush(); + } catch (Exception e) { + LOG.error("Could not write file to output stream", e); + e.printStackTrace(); + haltWithMessage(req, 500, "Error serving GTFS file", e); + } + return raw; + } + + /** + * Constructs a JSON string containing the provided key/value pair. + */ + public static String formatJSON (String key, String value) { + return mapper.createObjectNode() + .put(key, value) + .toString(); + } + + /** + * Constructs an object node with a result (i.e., OK or ERR), message, code, and if the exception argument is + * supplied details about the exception encountered. + */ + public static ObjectNode getObjectNode(String message, int code, Exception e) { + String detail = e != null ? e.getMessage() : null; + return mapper.createObjectNode() + .put("result", code >= 400 ? "ERR" : "OK") + .put("message", message) + .put("code", code) + .put("detail", detail); + } - res.raw().setContentType("application/octet-stream"); - res.raw().setHeader("Content-Disposition", "attachment; filename=" + file.getName()); + /** + * Constructs a JSON string with a result (i.e., OK or ERR), message, code, and if the exception argument is + * supplied details about the exception encountered. + */ + public static String formatJSON(String message, int code, Exception e) { + return getObjectNode(message, code, e).toString(); + } + + /** + * Wrapper around Spark halt method that formats message as JSON using {@link SparkUtils#formatJSON}. + */ + public static void haltWithMessage(Request request, int statusCode, String message) throws HaltException { + haltWithMessage(request, statusCode, message, null); + } + /** + * Wrapper around Spark halt method that formats message as JSON using {@link SparkUtils#formatJSON}. Exception + */ + public static void haltWithMessage( + Request request, + int statusCode, + String message, + Exception e + ) throws HaltException { + JsonNode json = getObjectNode(message, statusCode, e); + String logString = null; try { - BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(res.raw().getOutputStream()); - BufferedInputStream bufferedInputStream = new BufferedInputStream(new FileInputStream(file)); + logString = "\n" + mapper.writerWithDefaultPrettyPrinter().writeValueAsString(json); + } catch (JsonProcessingException jpe) { + logString = message; + } + logRequestOrResponse(false, request, logString, statusCode); + halt(statusCode, json.toString()); + } - byte[] buffer = new byte[1024]; - int len; - while ((len = bufferedInputStream.read(buffer)) > 0) { - bufferedOutputStream.write(buffer, 0, len); - } + /** + * Convenience wrapper around formatJSON that excludes the exception argument. + */ + public static String formatJSON(String message, int code) { + return formatJSON(message, code, null); + } - bufferedOutputStream.flush(); - bufferedOutputStream.close(); - } catch (Exception e) { - halt(500, "Error serving GTFS file"); + /** + * Construct JSON string response that contains message and jobId fields. + */ + public static String formatJobMessage (String jobId, String message) { + return mapper.createObjectNode() + .put("jobId", jobId) + .put("message", message) + .toString(); + } + + /** + * Log Spark requests. + */ + public static void logRequest(Request request, Response response) { + logRequestOrResponse(true, request, response); + } + + /** + * Log Spark responses. + */ + public static void logResponse(Request request, Response response) { + logRequestOrResponse(false, request, response); + } + + /** + * Log request/response. Pretty print JSON if the content-type is JSON. + */ + public static void logRequestOrResponse(boolean logRequest, Request request, Response response) { + // NOTE: Do not attempt to read the body into a string until it has been determined that the content-type is + // JSON. + HttpServletResponse raw = response.raw(); + String bodyString = ""; + try { + String contentType; + if (logRequest) { + contentType = request.contentType(); + } else { + contentType = raw.getHeader("content-type"); + } + if ("application/json".equals(contentType)) { + bodyString = logRequest ? request.body() : response.body(); + if (bodyString != null) { + // Pretty print JSON if ContentType is JSON and body is not empty + JsonNode jsonNode = mapper.readTree(bodyString); + // Add new line for legibility when printing + bodyString = "\n" + mapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonNode); + } else { + bodyString = "{body content is null}"; + } + } else if (contentType != null) { + bodyString = String.format("\nnon-JSON body type: %s", contentType); + } + } catch (IOException e) { + LOG.warn("Could not parse JSON", e); + bodyString = "\nBad JSON:\n" + bodyString; } + logRequestOrResponse(logRequest, request, bodyString, raw.getStatus()); + } - return res.raw(); + public static void logRequestOrResponse( + boolean logRequest, + Request request, + String bodyString, + int statusCode + ) { + Auth0UserProfile userProfile = request.attribute("user"); + String userEmail = userProfile != null ? userProfile.getEmail() : "no-auth"; + String queryString = request.queryParams().size() > 0 ? "?" + request.queryString() : ""; + LOG.info( + "{} {} {}: {}{}{}{}", + logRequest ? "req" : String.format("res (%s)", statusCode), + userEmail, + request.requestMethod(), + BASE_URL, + request.pathInfo(), + queryString, + trimLines(bodyString) + ); } - public static String formatJSON(String message, int code) { - return String.format("{\"result\":\"ERR\",\"message\":\"%s\",\"code\":%d}", message, code); + private static String trimLines(String str) { + if (str == null) return ""; + String[] lines = str.split("\n"); + if (lines.length <= DEFAULT_LINES_TO_PRINT) return str; + return String.format( + "%s \n...and %d more lines", + String.join("\n", Arrays.copyOfRange(lines, 0, DEFAULT_LINES_TO_PRINT - 1)), + lines.length - DEFAULT_LINES_TO_PRINT + ); } } diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/Base.java b/src/main/java/com/conveyal/datatools/editor/controllers/Base.java deleted file mode 100755 index 1a7c5450b..000000000 --- a/src/main/java/com/conveyal/datatools/editor/controllers/Base.java +++ /dev/null @@ -1,43 +0,0 @@ -package com.conveyal.datatools.editor.controllers; - -import com.conveyal.datatools.editor.models.transit.GtfsRouteType; -import com.conveyal.datatools.editor.models.transit.TripDirection; -import com.conveyal.geojson.GeoJsonModule; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonGenerator; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.module.SimpleModule; -import java.time.LocalDate; - -import com.conveyal.datatools.editor.utils.JacksonSerializers; - -import java.io.IOException; -import java.io.StringWriter; - -public class Base { - public static ObjectMapper mapper = new ObjectMapper(); - private static JsonFactory jf = new JsonFactory(); - - static { - SimpleModule mod = new SimpleModule(); - mod.addDeserializer(LocalDate.class, new JacksonSerializers.LocalDateDeserializer()); - mod.addSerializer(LocalDate.class, new JacksonSerializers.LocalDateSerializer()); - mod.addDeserializer(GtfsRouteType.class, new JacksonSerializers.GtfsRouteTypeDeserializer()); - mod.addSerializer(GtfsRouteType.class, new JacksonSerializers.GtfsRouteTypeSerializer()); - mod.addDeserializer(TripDirection.class, new JacksonSerializers.TripDirectionDeserializer()); - mod.addSerializer(TripDirection.class, new JacksonSerializers.TripDirectionSerializer()); - mapper.registerModule(mod); - mapper.registerModule(new GeoJsonModule()); - } - - public static String toJson(Object pojo, boolean prettyPrint) - throws IOException { - StringWriter sw = new StringWriter(); - JsonGenerator jg = jf.createGenerator(sw); - if (prettyPrint) { - jg.useDefaultPrettyPrinter(); - } - mapper.writeValue(jg, pojo); - return sw.toString(); - } -} \ No newline at end of file diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/EditorLockController.java b/src/main/java/com/conveyal/datatools/editor/controllers/EditorLockController.java new file mode 100644 index 000000000..cb6fe1b73 --- /dev/null +++ b/src/main/java/com/conveyal/datatools/editor/controllers/EditorLockController.java @@ -0,0 +1,211 @@ +package com.conveyal.datatools.editor.controllers; + +import com.conveyal.datatools.common.utils.SparkUtils; +import com.conveyal.datatools.manager.auth.Auth0UserProfile; +import com.conveyal.datatools.manager.models.JsonViews; +import com.conveyal.datatools.manager.utils.json.JsonManager; +import com.google.gson.JsonObject; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import spark.Request; +import spark.Response; +import spark.Session; + +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; +import static spark.Spark.delete; +import static spark.Spark.post; +import static spark.Spark.put; + +/** + * Handles the locking of feed sources being edited to prevent concurrent feed editing. In addition to locking against + * concurrent editing by more than one user, this restricts editing by a single user on multiple sessions/tabs. + */ +public class EditorLockController { + + private static final JsonManager json = new JsonManager<>(EditorLockController.class, JsonViews.UserInterface.class); + private static final Logger LOG = LoggerFactory.getLogger(EditorLockController.class); + public static final Map sessionsForFeedIds = new HashMap<>(); + private static final long SESSION_LENGTH_IN_SECONDS = 10 * 60; // Ten minutes + + + private static String lockFeed (Request req, Response res) { + // FIXME: why is content type not being set in before()/after()? + res.type("application/json"); + Auth0UserProfile userProfile = req.attribute("user"); + String feedId = req.queryParams("feedId"); + EditorSession currentSession = sessionsForFeedIds.get(feedId); + if (currentSession == null) { + // If there is no active session for the feed ID, create a new one, which allows only the current user + + // session to edit. + // Create new session + String newSessionId = invalidateAndCreateNewSession(req); + EditorSession newEditorSession = new EditorSession(feedId, newSessionId, userProfile); + sessionsForFeedIds.put(feedId, newEditorSession); + LOG.info("Locking feed {} for editing session {} by user {}", feedId, newSessionId, userProfile.getEmail()); + return formatJSON("Locking editor feed for user " + newEditorSession.userEmail, + 200, + feedId, + newSessionId); + } + + long secondsSinceLastCheckIn = TimeUnit.MILLISECONDS.toSeconds (System.currentTimeMillis() - currentSession.lastCheckIn); + long minutesSinceLastCheckIn = TimeUnit.SECONDS.toMinutes(secondsSinceLastCheckIn); + long minutesUntilExpiration = TimeUnit.SECONDS.toMinutes(SESSION_LENGTH_IN_SECONDS - secondsSinceLastCheckIn); + if (secondsSinceLastCheckIn > SESSION_LENGTH_IN_SECONDS) { + // There is an active session, but the user with active session has not checked in for some time. Booting + // the current session in favor of new session. + // Create new session + String newSessionId = invalidateAndCreateNewSession(req); + LOG.info("User {} (session ID: {}) has not maintained lock for {} minutes. Booting.", currentSession.userEmail, currentSession.sessionId, minutesSinceLastCheckIn); + EditorSession newEditorSession = new EditorSession(feedId, newSessionId, userProfile); + sessionsForFeedIds.put(feedId, newEditorSession); + return formatJSON("Locking editor feed for user " + newEditorSession.userEmail, 200, feedId, newSessionId); + } else if (!currentSession.userId.equals(userProfile.getUser_id())) { + // If the session has not expired, and another user has the active session. + LOG.warn("Edit session {} for user {} in progress for feed {}. User {} not permitted to lock feed for {} minutes.", currentSession.sessionId, currentSession.userEmail, currentSession.feedId, userProfile.getEmail(), minutesUntilExpiration); + haltWithMessage(req, 400, getLockedFeedMessage(currentSession, minutesUntilExpiration)); + return null; + } else { + String sessionId = req.session().id(); + LOG.warn("User {} is editing feed {} in another session {}. Cannot create lock for session {}", userProfile.getEmail(), feedId, currentSession.sessionId, sessionId); + haltWithMessage(req, 400, "Warning! You are editing this feed in another session/browser tab!"); + return null; + } + } + + private static String getLockedFeedMessage(EditorSession session, long minutesUntilExpiration) { + String timestamp = session.lastEdit > 0 + ? SimpleDateFormat.getInstance().format(new Date(session.lastEdit)) + : null; + String lastEditMessage = timestamp == null ? "no edits since session began" : "last edit was " + timestamp; + return String.format( + "Warning! There is an editor session already in progress for user %s. " + + "Their session will expire after %d minutes of inactivity (%s).", + session.userEmail, + minutesUntilExpiration, + lastEditMessage); + } + + private static String invalidateAndCreateNewSession(Request req) { + req.session().invalidate(); + Session session = req.session(true); + String newSessionId = session.id(); + return newSessionId; + } + + private static String maintainLock(Request req, Response res) { + // FIXME: why is content type not being set in before()/after()? + res.type("application/json"); + String sessionId = req.params("id"); + String feedId = req.queryParams("feedId"); + Auth0UserProfile userProfile = req.attribute("user"); + EditorSession currentSession = sessionsForFeedIds.get(feedId); + if (currentSession == null) { + // If there is no current session to maintain, request that user reloads browser. + LOG.warn("No active editor session to maintain {}.", sessionId); + haltWithMessage(req, 400, "No active session for feedId. Please refresh your browser and try editing later."); + return null; + } else if (!currentSession.sessionId.equals(sessionId)) { + long secondsSinceLastCheckIn = TimeUnit.MILLISECONDS.toSeconds (System.currentTimeMillis() - currentSession.lastCheckIn); + long minutesUntilExpiration = TimeUnit.SECONDS.toMinutes(SESSION_LENGTH_IN_SECONDS - secondsSinceLastCheckIn); + // If there is an active session but it doesn't match the session, someone else (or the current user) is + // editing elsewhere. A user should only be trying to maintain a lock if it had an active session at one + // point. If we get to this point, it is because the user's session has expired and some other session took + // its place. + if (currentSession.userEmail.equals(userProfile.getEmail())) { + // If the new current session is held by this user, give them the option to evict the current session / + // unlock the feed. + LOG.warn("User {} already has an active editor session () for feed {}.", userProfile.getEmail(), currentSession.sessionId, currentSession.feedId); + haltWithMessage(req, 400, "Warning! You have an active editing session for this feed underway in a different browser tab."); + } else { + LOG.warn("User {} attempted editor session for feed {} while active session underway for user {}.", userProfile.getEmail(), currentSession.feedId, currentSession.userEmail); + haltWithMessage(req, 400, getLockedFeedMessage(currentSession, minutesUntilExpiration)); + } + return null; + } else { + // Otherwise, the current session matches the session the user is attempting to maintain. Update the + // lastEdited time. + currentSession.lastCheckIn = System.currentTimeMillis(); +// LOG.info("Updating session {} check-in time to {} for user {}", currentSession.sessionId, currentSession.lastCheckIn, currentSession.userEmail); + return formatJSON("Updating time for user " + currentSession.userEmail, 200, feedId, null); + } + } + + private static String deleteFeedLock(Request req, Response res) { + // FIXME: why is content type not being set in before()/after()? + res.type("application/json"); + Auth0UserProfile userProfile = req.attribute("user"); + String feedId = req.queryParams("feedId"); + String sessionId = req.params("id"); + EditorSession currentSession = sessionsForFeedIds.get(feedId); + if (currentSession == null) { + // If there is no current session to delete/overwrite, request that user reloads browser. + LOG.warn("No active session to overwrite/delete."); + return SparkUtils.formatJSON("No active session to take over. Please refresh your browser and try editing later.", 202); + } else if (!currentSession.sessionId.equals(sessionId)) { + // If there is a different active session for some user, allow deletion / overwrite. + // Note: There used to be a check here that the requesting user was the same as the user with an open + // session; however, this has been removed because in practice it became a nuisance. Respectful users with + // shared access to a feed can generally be trusted not to boot one another out in a combative manner. + boolean overwrite = Boolean.valueOf(req.queryParams("overwrite")); + if (overwrite) { + sessionId = invalidateAndCreateNewSession(req); + EditorSession newEditorSession = new EditorSession(feedId, sessionId, userProfile); + sessionsForFeedIds.put(feedId, newEditorSession); + LOG.warn("Previously active session {} has been overwritten with new session {}.", currentSession.sessionId, newEditorSession.sessionId); + return formatJSON("Previous session lock has been overwritten with new session.", 200, feedId, sessionId); + } else { + LOG.warn("Not overwriting session {} for user {}.", currentSession.sessionId, currentSession.userEmail); + return SparkUtils.formatJSON("Not processing request to delete lock. There is already an active session for user " + currentSession.userEmail, 202); + } + } else { + LOG.info("Current session: {} {}; User session: {} {}", currentSession.userEmail, currentSession.sessionId, userProfile.getEmail(), sessionId); + // Otherwise, the current session matches the session from which the delete request came. This indicates that + // the user's editing session has been closed (by either exiting the editor or closing the browser tab). + LOG.info("Closed session {} for feed {} successfully.", currentSession.sessionId, currentSession.feedId); + sessionsForFeedIds.remove(feedId); + return formatJSON("Session has been closed successfully.", 200, feedId, sessionId); + } + } + + public static void register(String apiPrefix) { + post(apiPrefix + "secure/lock", EditorLockController::lockFeed, json::write); + delete(apiPrefix + "secure/lock/:id", EditorLockController::deleteFeedLock, json::write); + put(apiPrefix + "secure/lock/:id", EditorLockController::maintainLock, json::write); + } + + private static String formatJSON(String message, int code, String feedId, String sessionId) { + JsonObject object = new JsonObject(); + object.addProperty("result", code >= 400 ? "ERR" : "OK"); + object.addProperty("message", message); + object.addProperty("code", code); + if (sessionId != null) { + object.addProperty("sessionId", sessionId); + } + object.addProperty("feedId", feedId); + return object.toString(); + } + + public static class EditorSession { + public final String feedId; + public final String sessionId; + public final String userId; + public final String userEmail; + public long lastCheckIn; + public long lastEdit; + + EditorSession (String feedId, String sessionId, Auth0UserProfile userProfile) { + this.feedId = feedId; + this.sessionId = sessionId; + this.userId = userProfile != null ? userProfile.getUser_id() : "no_user_id"; + this.userEmail = userProfile != null ? userProfile.getEmail() : "no_user_email"; + lastCheckIn = System.currentTimeMillis(); + } + } +} diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/AgencyController.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/AgencyController.java deleted file mode 100644 index 462b6210d..000000000 --- a/src/main/java/com/conveyal/datatools/editor/controllers/api/AgencyController.java +++ /dev/null @@ -1,197 +0,0 @@ -package com.conveyal.datatools.editor.controllers.api; - -import com.conveyal.datatools.editor.controllers.Base; -import com.conveyal.datatools.editor.datastore.FeedTx; -import com.conveyal.datatools.editor.datastore.GlobalTx; -import com.conveyal.datatools.editor.datastore.VersionedDataStore; -import com.conveyal.datatools.editor.models.transit.Agency; -import com.conveyal.datatools.editor.utils.S3Utils; -import com.conveyal.datatools.manager.models.JsonViews; -import com.conveyal.datatools.manager.utils.json.JsonManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import spark.HaltException; -import spark.Request; -import spark.Response; - -import static spark.Spark.*; - -public class AgencyController { - public static JsonManager json = - new JsonManager<>(Agency.class, JsonViews.UserInterface.class); - private static Logger LOG = LoggerFactory.getLogger(AgencyController.class); - public static Object getAgency(Request req, Response res) { - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - Object json = null; - try { - final FeedTx tx = VersionedDataStore.getFeedTx(feedId); - if(id != null) { - if (!tx.agencies.containsKey(id)) { - tx.rollback(); - halt(404); - } - - json = Base.toJson(tx.agencies.get(id), false); - } - else { - json = Base.toJson(tx.agencies.values(), false); - } - - tx.rollback(); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - e.printStackTrace(); - halt(400); - } - return json; - } - - public static Object createAgency(Request req, Response res) { - Agency agency; - String feedId = req.queryParams("feedId"); - if (feedId == null) - halt(400, "You must provide a valid feedId"); - - try { - agency = Base.mapper.readValue(req.body(), Agency.class); - - final FeedTx tx = VersionedDataStore.getFeedTx(feedId); - if (tx.agencies.containsKey(agency.id)) { - tx.rollback(); - halt(400, "Agency " + agency.id + " already exists"); - } - - tx.agencies.put(agency.id, agency); - tx.commit(); - - return agency; - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - e.printStackTrace(); - halt(400); - } - return null; - } - - - public static Object updateAgency(Request req, Response res) { - Agency agency; - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - - try { - agency = Base.mapper.readValue(req.body(), Agency.class); - - final FeedTx tx = VersionedDataStore.getFeedTx(feedId); - if(!tx.agencies.containsKey(agency.id)) { - tx.rollback(); - halt(400); - } - - tx.agencies.put(agency.id, agency); - tx.commit(); - - return Base.toJson(agency, false); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - e.printStackTrace(); - halt(400); - } - return null; - } - - public static Object uploadAgencyBranding(Request req, Response res) { - Agency agency; - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - - try { - if (feedId == null) { - halt(400); - } - FeedTx tx = VersionedDataStore.getFeedTx(feedId); - - if (!tx.agencies.containsKey(id)) { - tx.rollback(); - halt(404); - } - - agency = tx.agencies.get(id); - - String url = S3Utils.uploadBranding(req, id); - System.out.println(url); - // set agencyBrandingUrl to s3 location - agency.agencyBrandingUrl = url; - - tx.agencies.put(id, agency); - tx.commit(); - - return agency; - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - e.printStackTrace(); - halt(400); - } - return null; - } - public static Object deleteAgency(Request req, Response res) { - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - if(id == null) { - halt(400); - } - final FeedTx tx = VersionedDataStore.getFeedTx(feedId); - if(!tx.agencies.containsKey(id)) { - tx.rollback(); - halt(400); - } - - tx.agencies.remove(id); - tx.commit(); - - return true; // ok(); - } - - /** duplicate an agency */ - public static Object duplicateAgency(Request req, Response res) { - - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - - // make sure the agency exists -// GlobalTx gtx = VersionedDataStore.getGlobalTx(); - final FeedTx tx = VersionedDataStore.getFeedTx(feedId); - if (!tx.agencies.containsKey(id)) { - tx.rollback(); - halt(404); - } - tx.rollback(); - - FeedTx.duplicate(id); - return true; // ok(); - } - - public static void register (String apiPrefix) { - get(apiPrefix + "secure/agency/:id", AgencyController::getAgency, json::write); - options(apiPrefix + "secure/agency", (q, s) -> ""); - get(apiPrefix + "secure/agency", AgencyController::getAgency, json::write); - post(apiPrefix + "secure/agency", AgencyController::createAgency, json::write); - put(apiPrefix + "secure/agency/:id", AgencyController::updateAgency, json::write); - post(apiPrefix + "secure/agency/:id/duplicate", AgencyController::duplicateAgency, json::write); - post(apiPrefix + "secure/agency/:id/uploadbranding", AgencyController::uploadAgencyBranding, json::write); - delete(apiPrefix + "secure/agency/:id", AgencyController::deleteAgency, json::write); - - // Public routes -// get(apiPrefix + "public/agency/:id", AgencyController::getFeedSource, json::write); -// get(apiPrefix + "public/agency", AgencyController::getAllFeedSources, json::write); - } -} diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/CalendarController.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/CalendarController.java deleted file mode 100644 index 809aafa06..000000000 --- a/src/main/java/com/conveyal/datatools/editor/controllers/api/CalendarController.java +++ /dev/null @@ -1,240 +0,0 @@ -package com.conveyal.datatools.editor.controllers.api; - -import com.conveyal.datatools.common.utils.SparkUtils; -import com.conveyal.datatools.editor.datastore.FeedTx; -import com.conveyal.datatools.manager.models.JsonViews; -import com.conveyal.datatools.manager.utils.json.JsonManager; -import com.google.common.base.Function; -import com.google.common.collect.Collections2; -import com.google.common.collect.Sets; -import com.conveyal.datatools.editor.controllers.Base; -import com.conveyal.datatools.editor.datastore.VersionedDataStore; -import com.conveyal.datatools.editor.models.transit.ScheduleException; -import com.conveyal.datatools.editor.models.transit.ServiceCalendar; -import com.conveyal.datatools.editor.models.transit.ServiceCalendar.ServiceCalendarForPattern; -import com.conveyal.datatools.editor.models.transit.Trip; -import org.mapdb.Fun; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import spark.HaltException; -import spark.Request; -import spark.Response; - -import static com.conveyal.datatools.common.utils.SparkUtils.formatJSON; -import static spark.Spark.*; - -import java.util.Calendar; -import java.util.Collection; -import java.util.Set; - - -public class CalendarController { - public static JsonManager json = - new JsonManager<>(Calendar.class, JsonViews.UserInterface.class); - private static final Logger LOG = LoggerFactory.getLogger(CalendarController.class); - public static Object getCalendar(Request req, Response res) { - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - String patternId = req.queryParams("patternId"); - - if (feedId == null) { - feedId = req.session().attribute("feedId"); - } - - if (feedId == null) { - halt(400); - } - - final FeedTx tx = VersionedDataStore.getFeedTx(feedId); - - try { - if (id != null) { - if (!tx.calendars.containsKey(id)) { - halt(404); - tx.rollback(); - } - - else { - ServiceCalendar c = tx.calendars.get(id); - c.addDerivedInfo(tx); - return c; - } - } - else if (patternId != null) { - if (!tx.tripPatterns.containsKey(patternId)) { - tx.rollback(); - halt(404); - } - - Set serviceCalendarIds = Sets.newHashSet(); - for (Trip trip : tx.getTripsByPattern(patternId)) { - serviceCalendarIds.add(trip.calendarId); - } - - Collection ret = - Collections2.transform(serviceCalendarIds, new Function() { - - @Override - public ServiceCalendarForPattern apply(String input) { - ServiceCalendar cal = tx.calendars.get(input); - - Long count = tx.tripCountByPatternAndCalendar.get(new Fun.Tuple2(patternId, cal.id)); - - if (count == null) count = 0L; - - return new ServiceCalendarForPattern(cal, tx.tripPatterns.get(patternId), count); - } - - }); - - return ret; - } - else { - Collection cals = tx.calendars.values(); - for (ServiceCalendar c : cals) { - c.addDerivedInfo(tx); - } - return cals; - } - - tx.rollback(); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - tx.rollback(); - e.printStackTrace(); - halt(400); - } - return null; - } - - public static Object createCalendar(Request req, Response res) { - ServiceCalendar cal; - FeedTx tx = null; - - try { - cal = Base.mapper.readValue(req.body(), ServiceCalendar.class); - - if (!VersionedDataStore.feedExists(cal.feedId)) { - halt(400); - } - - if (req.session().attribute("feedId") != null && !req.session().attribute("feedId").equals(cal.feedId)) - halt(400); - - tx = VersionedDataStore.getFeedTx(cal.feedId); - - if (tx.calendars.containsKey(cal.id)) { - tx.rollback(); - halt(400); - } - - // check if gtfsServiceId is specified, if not create from DB id - if(cal.gtfsServiceId == null) { - cal.gtfsServiceId = "CAL_" + cal.id.toString(); - } - - cal.addDerivedInfo(tx); - - tx.calendars.put(cal.id, cal); - tx.commit(); - - return cal; - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - if (tx != null) tx.rollback(); - e.printStackTrace(); - halt(400); - } - return null; - } - - public static Object updateCalendar(Request req, Response res) { - ServiceCalendar cal; - FeedTx tx = null; - - try { - cal = Base.mapper.readValue(req.body(), ServiceCalendar.class); - - if (!VersionedDataStore.feedExists(cal.feedId)) { - halt(400); - } - - if (req.session().attribute("feedId") != null && !req.session().attribute("feedId").equals(cal.feedId)) - halt(400); - - tx = VersionedDataStore.getFeedTx(cal.feedId); - - if (!tx.calendars.containsKey(cal.id)) { - tx.rollback(); - halt(400); - } - - // check if gtfsServiceId is specified, if not create from DB id - if(cal.gtfsServiceId == null) { - cal.gtfsServiceId = "CAL_" + cal.id.toString(); - } - - cal.addDerivedInfo(tx); - - tx.calendars.put(cal.id, cal); - - Object json = Base.toJson(cal, false); - - tx.commit(); - - return json; - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - if (tx != null) tx.rollback(); - e.printStackTrace(); - halt(400); - } - return null; - } - - public static Object deleteCalendar(Request req, Response res) { - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - - FeedTx tx = VersionedDataStore.getFeedTx(feedId); - - if (id == null || !tx.calendars.containsKey(id)) { - tx.rollback(); - halt(404); - } - - // we just don't let you delete calendars unless there are no trips on them - Long count = tx.tripCountByCalendar.get(id); - if (count != null && count > 0) { - tx.rollback(); - halt(400, formatJSON("Cannot delete calendar that is referenced by trips.", 400)); - } - - // drop this calendar from any schedule exceptions - for (ScheduleException ex : tx.getExceptionsByCalendar(id)) { - ex.customSchedule.remove(id); - tx.exceptions.put(ex.id, ex); - } - - tx.calendars.remove(id); - - tx.commit(); - - return true; // ok(); - } - - public static void register (String apiPrefix) { - get(apiPrefix + "secure/calendar/:id", CalendarController::getCalendar, json::write); - options(apiPrefix + "secure/calendar", (q, s) -> ""); - get(apiPrefix + "secure/calendar", CalendarController::getCalendar, json::write); - post(apiPrefix + "secure/calendar", CalendarController::createCalendar, json::write); - put(apiPrefix + "secure/calendar/:id", CalendarController::updateCalendar, json::write); - delete(apiPrefix + "secure/calendar/:id", CalendarController::deleteCalendar, json::write); - } -} diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorController.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorController.java new file mode 100644 index 000000000..5c6f4a922 --- /dev/null +++ b/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorController.java @@ -0,0 +1,308 @@ +package com.conveyal.datatools.editor.controllers.api; + +import com.conveyal.datatools.common.utils.S3Utils; +import com.conveyal.datatools.editor.controllers.EditorLockController; +import com.conveyal.datatools.manager.auth.Auth0UserProfile; +import com.conveyal.datatools.manager.models.FeedSource; +import com.conveyal.datatools.manager.models.JsonViews; +import com.conveyal.datatools.manager.persistence.Persistence; +import com.conveyal.datatools.manager.utils.json.JsonManager; +import com.conveyal.gtfs.loader.JdbcTableWriter; +import com.conveyal.gtfs.loader.Table; +import com.conveyal.gtfs.model.Entity; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.commons.dbutils.DbUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import spark.HaltException; +import spark.Request; +import spark.Response; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; + +import static com.conveyal.datatools.common.utils.SparkUtils.formatJSON; +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; +import static com.conveyal.datatools.editor.controllers.EditorLockController.sessionsForFeedIds; +import static spark.Spark.delete; +import static spark.Spark.options; +import static spark.Spark.post; +import static spark.Spark.put; + +/** + * Abstract controller that sets HTTP endpoints for editing GTFS entities. This class contains methods that can be + * overridden that serve as hooks to perform custom logic on + */ +public abstract class EditorController { + private static final String ID_PARAM = "/:id"; + private final String ROOT_ROUTE; + private static final String SECURE = "secure/"; + private static final Logger LOG = LoggerFactory.getLogger(EditorController.class); + private DataSource datasource; + private final String classToLowercase; + private static final ObjectMapper mapper = new ObjectMapper(); + public static final JsonManager json = new JsonManager<>(Entity.class, JsonViews.UserInterface.class); + private final Table table; + + EditorController(String apiPrefix, Table table, DataSource datasource) { + this.table = table; + this.datasource = datasource; + this.classToLowercase = table.getEntityClass().getSimpleName().toLowerCase(); + this.ROOT_ROUTE = apiPrefix + SECURE + classToLowercase; + registerRoutes(); + } + + /** + * Add static HTTP endpoints to Spark static instance. + */ + private void registerRoutes() { + LOG.info("Registering editor routes for {}", ROOT_ROUTE); + // Note, get single and multiple entity methods are handled by GraphQLGtfsSchema class. Only create, update, and + // delete HTTP endpoints are handled as REST. TODO: migrate these REST endpoints to GraphQL mutations. + // Options response for CORS + options(ROOT_ROUTE, (q, s) -> ""); + // Create entity request + post(ROOT_ROUTE, this::createOrUpdate, json::write); + // Update entity request + put(ROOT_ROUTE + ID_PARAM, this::createOrUpdate, json::write); + // Handle uploading agency and route branding to s3 + // TODO: Merge as a hook into createOrUpdate? + if ("agency".equals(classToLowercase) || "route".equals(classToLowercase)) { + post(ROOT_ROUTE + ID_PARAM + "/uploadbranding", this::uploadEntityBranding, json::write); + } + // Delete entity request + delete(ROOT_ROUTE + ID_PARAM, this::deleteOne, json::write); + + // Handle special multiple delete method for trip endpoint + if ("trip".equals(classToLowercase)) { + delete(ROOT_ROUTE, this::deleteMultipleTrips, json::write); + } + + // Handle update useFrequency field. Hitting this endpoint will delete all trips for a pattern and update the + // useFrequency field. + if ("pattern".equals(classToLowercase)) { + delete(ROOT_ROUTE + ID_PARAM + "/trips", this::deleteTripsForPattern, json::write); + } + } + + /** + * HTTP endpoint to delete all trips for a given string pattern_id (i.e., not the integer ID field). + */ + private String deleteTripsForPattern(Request req, Response res) { + long startTime = System.currentTimeMillis(); + String namespace = getNamespaceAndValidateSession(req); + // NOTE: This is a string pattern ID, not the integer ID that all other HTTP endpoints use. + String patternId = req.params("id"); + if (patternId == null) { + haltWithMessage(req, 400, "Must provide valid pattern_id"); + } + try { + JdbcTableWriter tableWriter = new JdbcTableWriter(Table.TRIPS, datasource, namespace); + int deletedCount = tableWriter.deleteWhere("pattern_id", patternId, true); + return formatJSON(String.format("Deleted %d.", deletedCount), 200); + } catch (Exception e) { + e.printStackTrace(); + haltWithMessage(req, 400, "Error deleting entity", e); + return null; + } finally { + LOG.info("Delete operation took {} msec", System.currentTimeMillis() - startTime); + } + } + + /** + * Currently designed to delete multiple trips in a single transaction. Trip IDs should be comma-separated in a query + * parameter. TODO: Implement this for other entity types? + */ + private String deleteMultipleTrips(Request req, Response res) { + long startTime = System.currentTimeMillis(); + String namespace = getNamespaceAndValidateSession(req); + String[] tripIds = req.queryParams("tripIds").split(","); + try { + JdbcTableWriter tableWriter = new JdbcTableWriter(table, datasource, namespace); + for (String tripId: tripIds) { + // Delete each trip ID found in query param WITHOUT auto-committing. + int result = tableWriter.delete(Integer.parseInt(tripId), false); + if (result != 1) { + // If exactly one entity was not deleted, throw an error. + String message = String.format("Could not delete trip %s. Result: %d", tripId, result); + throw new SQLException(message); + } + } + // Commit the transaction after iterating over trip IDs (because the deletes where made without autocommit). + tableWriter.commit(); + LOG.info("Deleted {} trips", tripIds.length); + } catch (Exception e) { + e.printStackTrace(); + haltWithMessage(req, 400, "Error deleting entity", e); + } finally { + LOG.info("Delete operation took {} msec", System.currentTimeMillis() - startTime); + } + return formatJSON(String.format("Deleted %d.", tripIds.length), 200); + } + + /** + * HTTP endpoint to delete one GTFS editor entity specified by the integer ID field. + */ + private String deleteOne(Request req, Response res) { + long startTime = System.currentTimeMillis(); + String namespace = getNamespaceAndValidateSession(req); + Integer id = getIdFromRequest(req); + try { + JdbcTableWriter tableWriter = new JdbcTableWriter(table, datasource, namespace); + if (tableWriter.delete(id, true) == 1) { + // FIXME: change return message based on result value + return formatJSON(String.valueOf("Deleted one."), 200); + } + } catch (Exception e) { + e.printStackTrace(); + haltWithMessage(req, 400, "Error deleting entity", e); + } finally { + LOG.info("Delete operation took {} msec", System.currentTimeMillis() - startTime); + } + return null; + } + + /** + * HTTP endpoint to upload branding image to S3 for either agency or route entities. The endpoint also handles + * updating the branding URL field to match the S3 URL. + */ + private String uploadEntityBranding (Request req, Response res) { + int id = getIdFromRequest(req); + String url = null; + try { + // FIXME: remove cast to string. + String idAsString = String.valueOf(id); + url = S3Utils.uploadBranding(req, String.join("_", classToLowercase, idAsString)); + } catch (HaltException e) { + // Do not re-catch halts thrown for exceptions that have already been caught. + LOG.error("Halt encountered", e); + throw e; + } catch (Exception e) { + String message = String.format("Could not upload branding for %s id=%d", classToLowercase, id); + LOG.error(message); + e.printStackTrace(); + haltWithMessage(req, 400, message, e); + } + String namespace = getNamespaceAndValidateSession(req); + // Prepare json object for response. (Note: this is not the full entity object, but just the URL field). + ObjectNode jsonObject = mapper.createObjectNode(); + jsonObject.put(String.format("%s_branding_url", classToLowercase), url); + Connection connection = null; + // Update URL in GTFS entity with simple SQL update. Note: the request object only contains an image file, so + // the standard JdbcTableWriter update method that requires a complete JSON string cannot be used. + try { + connection = datasource.getConnection(); + String updateSql = String.format("update %s.%s set %s_branding_url = ?", namespace, table.name, classToLowercase); + PreparedStatement preparedStatement = connection.prepareStatement(updateSql); + preparedStatement.setString(1, url); + preparedStatement.executeUpdate(); + connection.commit(); + return jsonObject.toString(); + } catch (SQLException e) { + e.printStackTrace(); + haltWithMessage(req, 400, "Could not update branding url", e); + return null; + } finally { + DbUtils.closeQuietly(connection); + } + } + + /** + * HTTP endpoint to create or update a single GTFS editor entity. If the ID param is supplied and the HTTP method is + * PUT, an update operation will be applied to the specified entity using the JSON body. Otherwise, a new entity will + * be created. + */ + private String createOrUpdate(Request req, Response res) { + long startTime = System.currentTimeMillis(); + // Check if an update or create operation depending on presence of id param + // This needs to be final because it is used in a lambda operation below. + if (req.params("id") == null && req.requestMethod().equals("PUT")) { + haltWithMessage(req, 400, "Must provide id"); + } + final boolean isCreating = req.params("id") == null; + String namespace = getNamespaceAndValidateSession(req); + Integer id = getIdFromRequest(req); + // Get the JsonObject + try { + JdbcTableWriter tableWriter = new JdbcTableWriter(table, datasource, namespace); + if (isCreating) { + return tableWriter.create(req.body(), true); + } else { + return tableWriter.update(id, req.body(), true); + } + } catch (Exception e) { + e.printStackTrace(); + haltWithMessage(req, 400, "Operation failed.", e); + } finally { + String operation = isCreating ? "Create" : "Update"; + LOG.info("{} operation took {} msec", operation, System.currentTimeMillis() - startTime); + } + return null; + } + + /** + * Get the namespace for the feed ID found in the request. Also, check that the user has an active editing session + * for the provided feed ID. + */ + private static String getNamespaceAndValidateSession(Request req) { + String feedId = req.queryParams("feedId"); + String sessionId = req.queryParams("sessionId"); + FeedSource feedSource = Persistence.feedSources.getById(feedId); + if (feedSource == null) { + haltWithMessage(req, 400, "Feed ID is invalid"); + } + // FIXME: Switch to using spark session IDs rather than query parameter? +// String sessionId = req.session().id(); + EditorLockController.EditorSession currentSession = sessionsForFeedIds.get(feedId); + if (currentSession == null) { + haltWithMessage(req, 400, "There is no active editing session for user."); + } + if (!currentSession.sessionId.equals(sessionId)) { + // This session does not match the current active session for the feed. + Auth0UserProfile userProfile = req.attribute("user"); + if (currentSession.userEmail.equals(userProfile.getEmail())) { + LOG.warn("User {} already has editor session {} for feed {}. Same user cannot make edits on session {}.", currentSession.userEmail, currentSession.sessionId, feedId, req.session().id()); + haltWithMessage(req, 400, "You have another editing session open for " + feedSource.name); + } else { + LOG.warn("User {} already has editor session {} for feed {}. User {} cannot make edits on session {}.", currentSession.userEmail, currentSession.sessionId, feedId, userProfile.getEmail(), req.session().id()); + haltWithMessage(req, 400, "Somebody else is editing the " + feedSource.name + " feed."); + } + } else { + currentSession.lastEdit = System.currentTimeMillis(); + LOG.info("Updating session {} last edit time to {}", sessionId, currentSession.lastEdit); + } + String namespace = feedSource.editorNamespace; + if (namespace == null) { + haltWithMessage(req, 400, "Cannot edit feed that has not been snapshotted (namespace is null)."); + } + return namespace; + } + + /** + * Get integer entity ID from request. + */ + private Integer getIdFromRequest(Request req) { + Integer id = null; + // FIXME: what if a null value is specified in id param + if (req.params("id") != null) { + // If an update, parse the id param + try { + // If we cannot parse the integer, the ID is not valid + id = Integer.valueOf(req.params("id")); + } catch (NumberFormatException e) { + LOG.error("ID provided must be an integer", e); + haltWithMessage(req, 400, "ID provided is not a number"); + } + } + return id; + } + + // TODO add hooks + abstract void getEntityHook(T entity); + abstract void createEntityHook(T entity); + abstract void updateEntityHook(T entity); + abstract void deleteEntityHook(T entity); +} diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorControllerImpl.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorControllerImpl.java new file mode 100644 index 000000000..fa796e887 --- /dev/null +++ b/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorControllerImpl.java @@ -0,0 +1,32 @@ +package com.conveyal.datatools.editor.controllers.api; + +import com.conveyal.gtfs.loader.Table; +import com.conveyal.gtfs.model.Entity; + +import javax.sql.DataSource; + +public class EditorControllerImpl extends EditorController { + public EditorControllerImpl(String apiPrefix, Table table, DataSource dataSource){ + super(apiPrefix, table, dataSource); + } + + @Override + void getEntityHook(Entity entity) { + + } + + @Override + void createEntityHook(Entity entity) { + + } + + @Override + void updateEntityHook(Entity entity) { + + } + + @Override + void deleteEntityHook(Entity entity) { + + } +} diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/FareController.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/FareController.java deleted file mode 100644 index 20ae7a8f1..000000000 --- a/src/main/java/com/conveyal/datatools/editor/controllers/api/FareController.java +++ /dev/null @@ -1,188 +0,0 @@ -package com.conveyal.datatools.editor.controllers.api; - -import com.conveyal.datatools.editor.controllers.Base; -import com.conveyal.datatools.editor.datastore.FeedTx; -import com.conveyal.datatools.editor.datastore.VersionedDataStore; -import com.conveyal.datatools.editor.models.transit.Fare; -import com.conveyal.datatools.manager.models.JsonViews; -import com.conveyal.datatools.manager.utils.json.JsonManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import spark.HaltException; -import spark.Request; -import spark.Response; - -import java.util.Calendar; -import java.util.Collection; - -import static spark.Spark.*; -import static spark.Spark.delete; - -/** - * Created by landon on 6/22/16. - */ -public class FareController { - public static JsonManager json = - new JsonManager<>(Calendar.class, JsonViews.UserInterface.class); - private static final Logger LOG = LoggerFactory.getLogger(FareController.class); - public static Object getFare(Request req, Response res) { - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - - if (feedId == null) { - feedId = req.session().attribute("feedId"); - } - - if (feedId == null) { - halt(400); - } - - final FeedTx tx = VersionedDataStore.getFeedTx(feedId); - - try { - if (id != null) { - if (!tx.fares.containsKey(id)) { - halt(404); - tx.rollback(); - } - - else { - Fare fare = tx.fares.get(id); -// fare.addDerivedInfo(tx); - return fare; - } - } - else { - Collection fares = tx.fares.values(); - for (Fare fare : fares) { -// fare.addDerivedInfo(tx); - } - return fares; - } - - tx.rollback(); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - tx.rollback(); - e.printStackTrace(); - halt(400); - } - return null; - } - - public static Object createFare(Request req, Response res) { - Fare fare; - FeedTx tx = null; - - try { - fare = Base.mapper.readValue(req.body(), Fare.class); - - if (!VersionedDataStore.feedExists(fare.feedId)) { - halt(400); - } - - if (req.session().attribute("feedId") != null && !req.session().attribute("feedId").equals(fare.feedId)) - halt(400); - - tx = VersionedDataStore.getFeedTx(fare.feedId); - - if (tx.fares.containsKey(fare.id)) { - tx.rollback(); - halt(400); - } - - // check if gtfsFareId is specified, if not create from DB id - if(fare.gtfsFareId == null) { - fare.gtfsFareId = "CAL_" + fare.id.toString(); - } - -// fare.addDerivedInfo(tx); - - tx.fares.put(fare.id, fare); - tx.commit(); - - return fare; - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - if (tx != null) tx.rollback(); - e.printStackTrace(); - } - return null; - } - - public static Object updateFare(Request req, Response res) { - Fare fare; - FeedTx tx = null; - - try { - fare = Base.mapper.readValue(req.body(), Fare.class); - - if (!VersionedDataStore.feedExists(fare.feedId)) { - halt(400); - } - - if (req.session().attribute("feedId") != null && !req.session().attribute("feedId").equals(fare.feedId)) - halt(400); - - tx = VersionedDataStore.getFeedTx(fare.feedId); - - if (!tx.fares.containsKey(fare.id)) { - tx.rollback(); - halt(400); - } - - // check if gtfsFareId is specified, if not create from DB id - if(fare.gtfsFareId == null) { - fare.gtfsFareId = "CAL_" + fare.id.toString(); - } - -// fare.addDerivedInfo(tx); - - tx.fares.put(fare.id, fare); - - Object json = Base.toJson(fare, false); - - tx.commit(); - - return json; - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - if (tx != null) tx.rollback(); - e.printStackTrace(); - halt(400); - } - return null; - } - - public static Object deleteFare(Request req, Response res) { - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - - FeedTx tx = VersionedDataStore.getFeedTx(feedId); - - if (id == null || !tx.fares.containsKey(id)) { - tx.rollback(); - halt(404); - } - - tx.fares.remove(id); - - tx.commit(); - - return true; // ok(); - } - public static void register (String apiPrefix) { - get(apiPrefix + "secure/fare/:id", FareController::getFare, json::write); - options(apiPrefix + "secure/fare", (q, s) -> ""); - get(apiPrefix + "secure/fare", FareController::getFare, json::write); - post(apiPrefix + "secure/fare", FareController::createFare, json::write); - put(apiPrefix + "secure/fare/:id", FareController::updateFare, json::write); - delete(apiPrefix + "secure/fare/:id", FareController::deleteFare, json::write); - } -} diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/FeedInfoController.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/FeedInfoController.java deleted file mode 100644 index ee4067b1d..000000000 --- a/src/main/java/com/conveyal/datatools/editor/controllers/api/FeedInfoController.java +++ /dev/null @@ -1,253 +0,0 @@ -package com.conveyal.datatools.editor.controllers.api; - -import com.conveyal.datatools.editor.controllers.Base; -import com.conveyal.datatools.editor.datastore.FeedTx; -import com.conveyal.datatools.editor.datastore.GlobalTx; -import com.conveyal.datatools.editor.datastore.VersionedDataStore; -import com.conveyal.datatools.editor.models.transit.EditorFeed; -import com.conveyal.datatools.editor.models.transit.GtfsRouteType; -import com.conveyal.datatools.manager.models.FeedSource; -import com.conveyal.datatools.manager.models.JsonViews; -import com.conveyal.datatools.manager.utils.json.JsonManager; -import com.conveyal.gtfs.model.FeedInfo; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import spark.HaltException; -import spark.Request; -import spark.Response; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.time.LocalDate; -import java.util.Iterator; -import java.util.Map; - -import static spark.Spark.*; -import static spark.Spark.delete; -import static spark.Spark.put; - -/** - * Created by landon on 6/14/16. - */ -public class FeedInfoController { - public static JsonManager json = - new JsonManager<>(EditorFeed.class, JsonViews.UserInterface.class); - private static final Logger LOG = LoggerFactory.getLogger(FeedInfoController.class); - public static Object getFeedInfo(Request req, Response res) { - String id = req.params("id"); - - if (id == null) { - return null; - // TODO: return all feedInfos for project? - } - GlobalTx gtx = VersionedDataStore.getGlobalTx(); - if (!gtx.feeds.containsKey(id)) { - // create new EditorFeed if id exists in manager - if (FeedSource.get(id) != null) { - EditorFeed fs = new EditorFeed(id); - gtx.feeds.put(fs.id, fs); - gtx.commit(); - try { - return Base.toJson(fs, false); - } catch (IOException e) { - e.printStackTrace(); - } - } - else { - gtx.rollback(); - halt(404, "Feed id does not exist"); - return null; - } - } - EditorFeed fs = gtx.feeds.get(id); - return fs; - } - - public static Object createFeedInfo(Request req, Response res) { - EditorFeed fs; - GlobalTx gtx = VersionedDataStore.getGlobalTx(); - - try { - fs = Base.mapper.readValue(req.body(), EditorFeed.class); -// -// // check if gtfsAgencyId is specified, if not create from DB id -// if(fs.gtfsAgencyId == null) { -// fs.gtfsAgencyId = "AGENCY_" + fs.id; -// } - - if (gtx.feeds.containsKey(fs.id)) { - gtx.rollback(); - halt(404, "Feed id already exists in editor database"); - return null; - } - - gtx.feeds.put(fs.id, fs); - gtx.commit(); - - return Base.toJson(fs, false); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - gtx.rollbackIfOpen(); - e.printStackTrace(); - halt(404); - return null; - } - } - - - public static Object updateFeedInfo(Request req, Response res) throws IOException { - String id = req.params("id"); - - EditorFeed feed; - - try { - feed = Base.mapper.readValue(req.body(), EditorFeed.class); - GlobalTx gtx = VersionedDataStore.getGlobalTx(); - - if(!gtx.feeds.containsKey(feed.id)) { - gtx.rollback(); - halt(400); - return null; - } - - gtx.feeds.put(feed.id, feed); - gtx.commit(); - - return Base.toJson(feed, false); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - e.printStackTrace(); - halt(400); - } - return null; - } - - public static void applyJsonToFeedInfo(EditorFeed source, String json) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - JsonNode node = mapper.readTree(json); - Iterator> fieldsIter = node.fields(); - while (fieldsIter.hasNext()) { - Map.Entry entry = fieldsIter.next(); - - if (entry.getValue().isNull()) { - continue; - } - - if(entry.getKey().equals("color")) { - source.color = entry.getValue().asText(); - } - - if(entry.getKey().equals("defaultLat")) { - source.defaultLat = entry.getValue().asDouble(); - } - - if(entry.getKey().equals("defaultLon")) { - source.defaultLon = entry.getValue().asDouble(); - } - - if(entry.getKey().equals("routeTypeId")) { - source.defaultRouteType = GtfsRouteType.fromGtfs(entry.getValue().asInt()); - } - - if(entry.getKey().equals("feedPublisherName")) { - source.feedPublisherName = entry.getValue().asText(); - } - - if(entry.getKey().equals("feedVersion")) { - source.feedVersion = entry.getValue().asText(); - } - - if(entry.getKey().equals("feedPublisherUrl")) { - String url = entry.getValue().asText(); - try { - source.feedPublisherUrl = new URL(url); - - } catch (MalformedURLException e) { - halt(400, "URL '" + url + "' not valid."); - } - source.feedPublisherUrl = new URL(entry.getValue().asText()); - } - - if(entry.getKey().equals("feedLang")) { - source.feedLang = entry.getValue().asText(); - } - - if(entry.getKey().equals("feedStartDate")) { - System.out.println(entry.getValue()); - Long seconds = entry.getValue().asLong(); - Long days = seconds / 60 / 60 / 24; -// System.out.println(days); - try { - LocalDate date = LocalDate.ofEpochDay(days); -// System.out.println(date.format(DateTimeFormatter.BASIC_ISO_DATE)); - source.feedStartDate = date; - } catch (Exception e) { - e.printStackTrace(); - halt(400, seconds + " is not a valid date"); - } - } - - if(entry.getKey().equals("feedEndDate")) { - Long seconds = entry.getValue().asLong(); - Long days = seconds / 60 / 60 / 24; - try { - LocalDate date = LocalDate.ofEpochDay(days); - source.feedEndDate = date; - } catch (Exception e) { - e.printStackTrace(); - halt(400, seconds + " is not a valid date"); - } - } - } - } - // TODO: deleting editor feed is handled in delete feed source? -// public static Object deleteFeedInfo(Request req, Response res) { -// String id = req.params("id"); -// String feedId = req.queryParams("feedId"); -// Object json = null; -// -// if (feedId == null) -// feedId = req.session().attribute("feedId"); -// -// if (feedId == null) { -// halt(400); -// } -// -// FeedTx tx = VersionedDataStore.getFeedTx(feedId); -// try { -// if (!tx.stops.containsKey(id)) { -// halt(404); -// } -// -// if (!tx.getTripPatternsByStop(id).isEmpty()) { -// halt(400); -// } -// -// FeedInfo s = tx.stops.remove(id); -// tx.commit(); -// json = Base.toJson(s, false); -// } catch (Exception e) { -// halt(400); -// e.printStackTrace(); -// } finally { -// tx.rollbackIfOpen(); -// } -// return json; -// } - - public static void register (String apiPrefix) { - get(apiPrefix + "secure/feedinfo/:id", FeedInfoController::getFeedInfo, json::write); - options(apiPrefix + "secure/feedinfo", (q, s) -> ""); - get(apiPrefix + "secure/feedinfo", FeedInfoController::getFeedInfo, json::write); - post(apiPrefix + "secure/feedinfo/:id", FeedInfoController::createFeedInfo, json::write); - put(apiPrefix + "secure/feedinfo/:id", FeedInfoController::updateFeedInfo, json::write); -// delete(apiPrefix + "secure/feedinfo/:id", FeedInfoController::deleteFeedInfo, json::write); - } -} diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/RouteController.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/RouteController.java deleted file mode 100644 index b31f8bfcd..000000000 --- a/src/main/java/com/conveyal/datatools/editor/controllers/api/RouteController.java +++ /dev/null @@ -1,380 +0,0 @@ -package com.conveyal.datatools.editor.controllers.api; - -import com.conveyal.datatools.editor.utils.S3Utils; -import com.conveyal.datatools.editor.datastore.FeedTx; -import com.conveyal.datatools.manager.auth.Auth0UserProfile; -import com.conveyal.datatools.manager.models.FeedSource; -import com.conveyal.datatools.manager.models.JsonViews; -import com.conveyal.datatools.manager.utils.json.JsonManager; -import com.google.common.base.Function; -import com.google.common.collect.Collections2; -import com.conveyal.datatools.editor.controllers.Base; -import com.conveyal.datatools.editor.datastore.GlobalTx; -import com.conveyal.datatools.editor.datastore.VersionedDataStore; -import com.conveyal.datatools.editor.models.transit.Route; -import com.conveyal.datatools.editor.models.transit.Trip; -import com.conveyal.datatools.editor.models.transit.TripPattern; -import org.mapdb.Fun; -import org.mapdb.Fun.Tuple2; - -import java.util.Collection; -import java.util.Set; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import spark.HaltException; -import spark.Request; -import spark.Response; - -import static spark.Spark.*; - - -public class RouteController { - public static JsonManager json = - new JsonManager<>(Route.class, JsonViews.UserInterface.class); - private static final Logger LOG = LoggerFactory.getLogger(RouteController.class); - public static Object getRoute(Request req, Response res) { - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - Object json = null; - - if (feedId == null) - feedId = req.session().attribute("feedId"); - - if (feedId == null) { - halt(400); - } - - final FeedTx tx = VersionedDataStore.getFeedTx(feedId); - - try { - if (id != null) { - if (!tx.routes.containsKey(id)) { - tx.rollback(); - halt(400); - } - - Route route = tx.routes.get(id); - route.addDerivedInfo(tx); - - json = Base.toJson(route, false); - -// return route; - } - else { - Route[] ret = tx.routes.values().toArray(new Route[tx.routes.size()]); - - for (Route r : ret) { - r.addDerivedInfo(tx); - } - - json = Base.toJson(ret, false); - tx.rollback(); -// return json; - } - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - tx.rollbackIfOpen(); - e.printStackTrace(); - halt(400); - } finally { - tx.rollbackIfOpen(); - } - return json; - } - - public static Object createRoute(Request req, Response res) { - Route route; - - try { - route = Base.mapper.readValue(req.body(), Route.class); - - GlobalTx gtx = VersionedDataStore.getGlobalTx(); - if (!gtx.feeds.containsKey(route.feedId)) { - gtx.rollback(); - halt(400, String.join("Feed %s does not exist in editor", route.feedId)); - } - - if (req.session().attribute("feedId") != null && !req.session().attribute("feedId").equals(route.feedId)) - halt(400); - - gtx.rollback(); - - FeedTx tx = VersionedDataStore.getFeedTx(route.feedId); - - if (tx.routes.containsKey(route.id)) { - tx.rollback(); - halt(400, "Failed to create route with duplicate id"); - } - - // check if gtfsRouteId is specified, if not create from DB id - if(route.gtfsRouteId == null) { - route.gtfsRouteId = "ROUTE_" + route.id; - } - - tx.routes.put(route.id, route); - tx.commit(); - - return route; - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - e.printStackTrace(); - halt(400); - } - return null; - } - - public static Object updateRoute(Request req, Response res) { - Route route; - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - - try { - route = Base.mapper.readValue(req.body(), Route.class); - if (feedId == null) { - halt(400); - } - FeedTx tx = VersionedDataStore.getFeedTx(feedId); - - if (!tx.routes.containsKey(id)) { - tx.rollback(); - halt(404); - } - - - // check if gtfsRouteId is specified, if not create from DB id - if(route.gtfsRouteId == null) { - route.gtfsRouteId = "ROUTE_" + id; - } - - tx.routes.put(id, route); - tx.commit(); - - return route; - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - e.printStackTrace(); - halt(400); - } - return null; - } - - public static Object uploadRouteBranding(Request req, Response res) { - Route route; - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - - try { - if (feedId == null) { - halt(400); - } - FeedTx tx = VersionedDataStore.getFeedTx(feedId); - - if (!tx.routes.containsKey(id)) { - tx.rollback(); - halt(404); - } - - route = tx.routes.get(id); - - String url = S3Utils.uploadBranding(req, id); - - // set routeBrandingUrl to s3 location - route.routeBrandingUrl = url; - - tx.routes.put(id, route); - tx.commit(); - - return route; - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - e.printStackTrace(); - halt(400); - } - return null; - } - - public static Object deleteRoute(Request req, Response res) { - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - - if (feedId == null) - feedId = req.session().attribute("feedId"); - - if(id == null || feedId == null) - halt(400); - - FeedTx tx = VersionedDataStore.getFeedTx(feedId); - - - - try { - if (!tx.routes.containsKey(id)) { - tx.rollback(); - halt(404); - } - - Route r = tx.routes.get(id); - - // delete affected trips - Set> affectedTrips = tx.tripsByRoute.subSet(new Tuple2(r.id, null), new Tuple2(r.id, Fun.HI)); - for (Tuple2 trip : affectedTrips) { - tx.trips.remove(trip.b); - } - - // delete affected patterns - // note that all the trips on the patterns will have already been deleted above - Set> affectedPatts = tx.tripPatternsByRoute.subSet(new Tuple2(r.id, null), new Tuple2(r.id, Fun.HI)); - for (Tuple2 tp : affectedPatts) { - tx.tripPatterns.remove(tp.b); - } - - tx.routes.remove(id); - tx.commit(); - return true; // ok(); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - tx.rollback(); - e.printStackTrace(); - halt(404, e.getMessage()); - } - return null; - } - - /** merge route from into route into, for the given agency ID */ - public static Object mergeRoutes (Request req, Response res) { - String from = req.queryParams("from"); - String into = req.queryParams("into"); - - String feedId = req.queryParams("feedId"); - - if (feedId == null) - feedId = req.session().attribute("feedId"); - - if (feedId == null || from == null || into == null) - halt(400); - - final FeedTx tx = VersionedDataStore.getFeedTx(feedId); - - try { - // ensure the routes exist - if (!tx.routes.containsKey(from) || !tx.routes.containsKey(into)) { - tx.rollback(); - halt(400); - } - - // get all the trip patterns for route from - // note that we clone them here so we can later modify them - Collection tps = Collections2.transform( - tx.tripPatternsByRoute.subSet(new Tuple2(from, null), new Tuple2(from, Fun.HI)), - new Function, TripPattern>() { - @Override - public TripPattern apply(Tuple2 input) { - try { - return tx.tripPatterns.get(input.b).clone(); - } catch (CloneNotSupportedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - throw new RuntimeException(e); - } - } - }); - - for (TripPattern tp : tps) { - tp.routeId = into; - tx.tripPatterns.put(tp.id, tp); - } - - // now move all the trips - Collection ts = Collections2.transform( - tx.tripsByRoute.subSet(new Tuple2(from, null), new Tuple2(from, Fun.HI)), - new Function, Trip>() { - @Override - public Trip apply(Tuple2 input) { - try { - return tx.trips.get(input.b).clone(); - } catch (CloneNotSupportedException e) { - e.printStackTrace(); - throw new RuntimeException(e); - } - } - }); - - for (Trip t : ts) { - t.routeId = into; - tx.trips.put(t.id, t); - } - - tx.routes.remove(from); - - tx.commit(); - return true; // ok(); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - e.printStackTrace(); - tx.rollback(); - throw e; - } - } -// public static FeedTx requestFeedTx(Request req, FeedSource s, String action) { -// Auth0UserProfile userProfile = req.attribute("user"); -// Boolean publicFilter = Boolean.valueOf(req.queryParams("public")); -// -// // check for null feedsource -// if (s == null) -// halt(400, "Feed source ID does not exist"); -// -// boolean authorized; -// switch (action) { -// case "manage": -// authorized = userProfile.canManageFeed(s.projectId, s.id); -// break; -// case "view": -// authorized = userProfile.canViewFeed(s.projectId, s.id); -// break; -// default: -// authorized = false; -// break; -// } -// -// // if requesting public sources -// if (publicFilter){ -// // if feed not public and user not authorized, halt -// if (!s.isPublic && !authorized) -// halt(403, "User not authorized to perform action on feed source"); -// // if feed is public, but action is managerial, halt (we shouldn't ever get here, but just in case) -// else if (s.isPublic && action.equals("manage")) -// halt(403, "User not authorized to perform action on feed source"); -// -// } -// else { -// if (!authorized) -// halt(403, "User not authorized to perform action on feed source"); -// } -// -// // if we make it here, user has permission and it's a valid feedsource -// return s; -// } - public static void register (String apiPrefix) { - get(apiPrefix + "secure/route/:id", RouteController::getRoute, json::write); - options(apiPrefix + "secure/route", (q, s) -> ""); - get(apiPrefix + "secure/route", RouteController::getRoute, json::write); - post(apiPrefix + "secure/route/merge", RouteController::mergeRoutes, json::write); - post(apiPrefix + "secure/route", RouteController::createRoute, json::write); - put(apiPrefix + "secure/route/:id", RouteController::updateRoute, json::write); - post(apiPrefix + "secure/route/:id/uploadbranding", RouteController::uploadRouteBranding, json::write); - delete(apiPrefix + "secure/route/:id", RouteController::deleteRoute, json::write); - } -} diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/RouteTypeController.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/RouteTypeController.java deleted file mode 100644 index 2aaf551be..000000000 --- a/src/main/java/com/conveyal/datatools/editor/controllers/api/RouteTypeController.java +++ /dev/null @@ -1,123 +0,0 @@ -package com.conveyal.datatools.editor.controllers.api; - -import com.conveyal.datatools.editor.controllers.Base; -import com.conveyal.datatools.editor.datastore.GlobalTx; -import com.conveyal.datatools.editor.datastore.VersionedDataStore; -import com.conveyal.datatools.editor.models.transit.RouteType; - -import com.conveyal.datatools.manager.models.JsonViews; -import com.conveyal.datatools.manager.utils.json.JsonManager; -import spark.Request; -import spark.Response; - -import static spark.Spark.*; - - -public class RouteTypeController { - public static JsonManager json = - new JsonManager<>(RouteType.class, JsonViews.UserInterface.class); - public static Object getRouteType(Request req, Response res) { - String id = req.params("id"); - Object json = null; - try { - GlobalTx tx = VersionedDataStore.getGlobalTx(); - - if(id != null) { - if(tx.routeTypes.containsKey(id)) - json = Base.toJson(tx.routeTypes.get(id), false); - else - halt(404); - - tx.rollback(); - } - else { - json = Base.toJson(tx.routeTypes.values(), false); - tx.rollback(); - } - } catch (Exception e) { - e.printStackTrace(); - halt(400); - } - return json; - } - - public static Object createRouteType(Request req, Response res) { - RouteType routeType; - - try { - routeType = Base.mapper.readValue(req.body(), RouteType.class); - - GlobalTx tx = VersionedDataStore.getGlobalTx(); - - if (tx.routeTypes.containsKey(routeType.id)) { - tx.rollback(); - halt(400); - } - - tx.routeTypes.put(routeType.id, routeType); - tx.commit(); - - return routeType; - } catch (Exception e) { - e.printStackTrace(); - halt(400); - } - return null; - } - - - public static Object updateRouteType(Request req, Response res) { - RouteType routeType; - - try { - routeType = Base.mapper.readValue(req.body(), RouteType.class); - - if(routeType.id == null) { - halt(400); - } - - GlobalTx tx = VersionedDataStore.getGlobalTx(); - if (!tx.routeTypes.containsKey(routeType.id)) { - tx.rollback(); - halt(404); - } - - tx.routeTypes.put(routeType.id, routeType); - tx.commit(); - - return routeType; - } catch (Exception e) { - e.printStackTrace(); - halt(400); - } - return null; - } - - // TODO: cascaded delete, etc. - public static Object deleteRouteType(Request req, Response res) { - String id = req.params("id"); - if (id == null) - halt(400); - - GlobalTx tx = VersionedDataStore.getGlobalTx(); - - if (!tx.routeTypes.containsKey(id)) { - tx.rollback(); - halt(400); - } - - tx.routeTypes.remove(id); - tx.commit(); - - return true; // ok(); - } - - public static void register (String apiPrefix) { - get(apiPrefix + "secure/routetype/:id", RouteTypeController::getRouteType, json::write); - options(apiPrefix + "secure/routetype", (q, s) -> ""); - get(apiPrefix + "secure/routetype", RouteTypeController::getRouteType, json::write); - post(apiPrefix + "secure/routetype", RouteTypeController::createRouteType, json::write); - put(apiPrefix + "secure/routetype/:id", RouteTypeController::updateRouteType, json::write); - delete(apiPrefix + "secure/routetype/:id", RouteTypeController::deleteRouteType, json::write); - } -} diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/ScheduleExceptionController.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/ScheduleExceptionController.java deleted file mode 100644 index 1e7b242e1..000000000 --- a/src/main/java/com/conveyal/datatools/editor/controllers/api/ScheduleExceptionController.java +++ /dev/null @@ -1,217 +0,0 @@ -package com.conveyal.datatools.editor.controllers.api; - -import com.conveyal.datatools.editor.controllers.Base; -import com.conveyal.datatools.editor.datastore.FeedTx; -import com.conveyal.datatools.editor.datastore.VersionedDataStore; -import com.conveyal.datatools.editor.models.transit.ScheduleException; -import java.time.LocalDate; - -import com.conveyal.datatools.manager.models.JsonViews; -import com.conveyal.datatools.manager.utils.json.JsonManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import spark.HaltException; -import spark.Request; -import spark.Response; - -import static spark.Spark.*; - - -public class ScheduleExceptionController { - public static JsonManager json = - new JsonManager<>(ScheduleException.class, JsonViews.UserInterface.class); - private static final Logger LOG = LoggerFactory.getLogger(ScheduleExceptionController.class); - - /** Get all of the schedule exceptions for an agency */ - public static Object getScheduleException (Request req, Response res) { - String exceptionId = req.params("exceptionId"); - String feedId = req.queryParams("feedId"); - Object json = null; - if (feedId == null) - feedId = req.session().attribute("feedId"); - - if (feedId == null) { - halt(400); - } - - FeedTx tx = null; - - try { - tx = VersionedDataStore.getFeedTx(feedId); - - if (exceptionId != null) { - if (!tx.exceptions.containsKey(exceptionId)) - halt(400); - else - json = Base.toJson(tx.exceptions.get(exceptionId), false); - } - else { - json = Base.toJson(tx.exceptions.values(), false); - } - tx.rollback(); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - if (tx != null) tx.rollback(); - e.printStackTrace(); - halt(400); - } - return json; - } - - public static Object createScheduleException (Request req, Response res) { - FeedTx tx = null; - try { - ScheduleException ex = Base.mapper.readValue(req.body(), ScheduleException.class); - - if (!VersionedDataStore.feedExists(ex.feedId)) { - halt(400); - } - - if (req.session().attribute("feedId") != null && !req.session().attribute("feedId").equals(ex.feedId)) - halt(400); - - tx = VersionedDataStore.getFeedTx(ex.feedId); - - if (ex.customSchedule != null) { - for (String cal : ex.customSchedule) { - if (!tx.calendars.containsKey(cal)) { - tx.rollback(); - halt(400); - } - } - } - if (ex.addedService != null) { - for (String cal : ex.addedService) { - if (!tx.calendars.containsKey(cal)) { - tx.rollback(); - halt(400); - } - } - } - if (ex.removedService != null) { - for (String cal : ex.removedService) { - if (!tx.calendars.containsKey(cal)) { - tx.rollback(); - halt(400); - } - } - } - - if (tx.exceptions.containsKey(ex.id)) { - tx.rollback(); - halt(400); - } - if (ex.dates != null) { - for (LocalDate date : ex.dates) { - if (tx.scheduleExceptionCountByDate.containsKey(date) && tx.scheduleExceptionCountByDate.get(date) > 0) { - tx.rollback(); - halt(400); - } - } - } - - tx.exceptions.put(ex.id, ex); - - tx.commit(); - - return Base.toJson(ex, false); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - if (tx != null) tx.rollback(); - e.printStackTrace(); - halt(400); - } - return null; - } - - public static Object updateScheduleException (Request req, Response res) { - FeedTx tx = null; - try { - ScheduleException ex = Base.mapper.readValue(req.body(), ScheduleException.class); - - if (req.session().attribute("feedId") != null && !req.session().attribute("feedId").equals(ex.feedId)) - halt(400); - - if (!VersionedDataStore.feedExists(ex.feedId)) { - halt(400); - } - - tx = VersionedDataStore.getFeedTx(ex.feedId); - - if (ex.customSchedule != null) { - for (String cal : ex.customSchedule) { - if (!tx.calendars.containsKey(cal)) { - tx.rollback(); - halt(400); - } - } - } - if (ex.addedService != null) { - for (String cal : ex.addedService) { - if (!tx.calendars.containsKey(cal)) { - tx.rollback(); - halt(400); - } - } - } - if (ex.removedService != null) { - for (String cal : ex.removedService) { - if (!tx.calendars.containsKey(cal)) { - tx.rollback(); - halt(400); - } - } - } - - if (!tx.exceptions.containsKey(ex.id)) { - tx.rollback(); - halt(400); - } - - tx.exceptions.put(ex.id, ex); - - tx.commit(); - - return ex; - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - if (tx != null) tx.rollback(); - e.printStackTrace(); - halt(400); - } - return null; - } - - public static Object deleteScheduleException (Request req, Response res) { - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - - if (feedId == null) - feedId = req.session().attribute("feedId"); - - if (feedId == null) { - halt(400); - } - - FeedTx tx = VersionedDataStore.getFeedTx(feedId); - tx.exceptions.remove(id); - tx.commit(); - - return true; // ok(); - } - - public static void register (String apiPrefix) { - get(apiPrefix + "secure/scheduleexception/:id", ScheduleExceptionController::getScheduleException, json::write); - options(apiPrefix + "secure/scheduleexception", (q, s) -> ""); - get(apiPrefix + "secure/scheduleexception", ScheduleExceptionController::getScheduleException, json::write); - post(apiPrefix + "secure/scheduleexception", ScheduleExceptionController::createScheduleException, json::write); - put(apiPrefix + "secure/scheduleexception/:id", ScheduleExceptionController::updateScheduleException, json::write); - delete(apiPrefix + "secure/scheduleexception/:id", ScheduleExceptionController::deleteScheduleException, json::write); - } -} diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/SnapshotController.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/SnapshotController.java index 81f880fc0..40bbe1f09 100644 --- a/src/main/java/com/conveyal/datatools/editor/controllers/api/SnapshotController.java +++ b/src/main/java/com/conveyal/datatools/editor/controllers/api/SnapshotController.java @@ -1,323 +1,259 @@ package com.conveyal.datatools.editor.controllers.api; -import com.conveyal.datatools.editor.controllers.Base; -import com.conveyal.datatools.editor.datastore.FeedTx; -import com.conveyal.datatools.editor.datastore.GlobalTx; -import com.conveyal.datatools.editor.datastore.VersionedDataStore; -import com.conveyal.datatools.editor.jobs.ProcessGtfsSnapshotExport; -import com.conveyal.datatools.editor.jobs.ProcessGtfsSnapshotMerge; -import com.conveyal.datatools.editor.models.Snapshot; -import com.conveyal.datatools.editor.models.transit.Stop; + +import com.conveyal.datatools.common.utils.SparkUtils; +import com.conveyal.datatools.editor.jobs.CreateSnapshotJob; +import com.conveyal.datatools.editor.jobs.ExportSnapshotToGTFSJob; import com.conveyal.datatools.manager.DataManager; import com.conveyal.datatools.manager.auth.Auth0UserProfile; import com.conveyal.datatools.manager.controllers.api.FeedVersionController; import com.conveyal.datatools.manager.models.FeedDownloadToken; +import com.conveyal.datatools.manager.models.FeedSource; import com.conveyal.datatools.manager.models.FeedVersion; import com.conveyal.datatools.manager.models.JsonViews; +import com.conveyal.datatools.manager.models.Snapshot; +import com.conveyal.datatools.manager.persistence.FeedStore; +import com.conveyal.datatools.manager.persistence.Persistence; import com.conveyal.datatools.manager.utils.json.JsonManager; -import org.mapdb.Fun; -import org.mapdb.Fun.Tuple2; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.conveyal.datatools.editor.utils.JacksonSerializers; +import spark.Request; +import spark.Response; -import java.io.File; import java.io.IOException; import java.util.Collection; -import java.util.List; - -import spark.Request; -import spark.Response; +import static com.conveyal.datatools.common.utils.S3Utils.downloadFromS3; import static com.conveyal.datatools.common.utils.SparkUtils.downloadFile; -import static spark.Spark.*; - - +import static com.conveyal.datatools.common.utils.SparkUtils.formatJobMessage; +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; +import static spark.Spark.delete; +import static spark.Spark.get; +import static spark.Spark.options; +import static spark.Spark.post; +import static spark.Spark.put; + +/** + * HTTP CRUD endpoints for managing snapshots, which are copies of GTFS feeds stored in the editor. + */ public class SnapshotController { public static final Logger LOG = LoggerFactory.getLogger(SnapshotController.class); public static JsonManager json = new JsonManager<>(Snapshot.class, JsonViews.UserInterface.class); - public static Object getSnapshot(Request req, Response res) throws IOException { - String id = req.params("id"); - String feedId= req.queryParams("feedId"); - - GlobalTx gtx = VersionedDataStore.getGlobalTx(); - Object json = null; - try { - if (id != null) { - Tuple2 sid = JacksonSerializers.Tuple2IntDeserializer.deserialize(id); - if (gtx.snapshots.containsKey(sid)) - json = Base.toJson(gtx.snapshots.get(sid), false); - else - halt(404); - } - else { - if (feedId == null) - feedId = req.session().attribute("feedId"); - - Collection snapshots; - if (feedId == null) { - // if it's still null just give them everything - // this is used in GTFS Data Manager to get snapshots in bulk - // TODO this allows any authenticated user to fetch GTFS data for any agency - snapshots = gtx.snapshots.values(); - } - else { - snapshots = gtx.snapshots.subMap(new Tuple2(feedId, null), new Tuple2(feedId, Fun.HI)).values(); - } - - json = Base.toJson(snapshots, false); - } - } finally { - gtx.rollback(); - } - return json; + /** + * HTTP endpoint that returns a single snapshot for a specified ID. + */ + private static Snapshot getSnapshotById(Request req, Response res) { + return getSnapshotFromRequest(req); } - public static Object createSnapshot (Request req, Response res) { - GlobalTx gtx = null; - try { - // create a dummy snapshot from which to get values - Snapshot original = Base.mapper.readValue(req.body(), Snapshot.class); - Snapshot s = VersionedDataStore.takeSnapshot(original.feedId, original.name, original.comment); - s.validFrom = original.validFrom; - s.validTo = original.validTo; - gtx = VersionedDataStore.getGlobalTx(); - - // the snapshot we have just taken is now current; make the others not current - Collection snapshots = gtx.snapshots.subMap(new Tuple2(s.feedId, null), new Tuple2(s.feedId, Fun.HI)).values(); - for (Snapshot o : snapshots) { - if (o.id.equals(s.id)) - continue; - - Snapshot cloned = o.clone(); - cloned.current = false; - gtx.snapshots.put(o.id, cloned); - } - - gtx.commit(); + /** + * Wrapper method that checks requesting user's permissions on feed source (via feedId param) and returns snapshot + * for ID param if the permissions check is OK. + */ + private static Snapshot getSnapshotFromRequest(Request req) { + String id = req.params("id"); + if (id == null) haltWithMessage(req, 400, "Must provide valid snapshot ID"); + // Check user permissions on feed source. + FeedVersionController.requestFeedSourceById(req, "view", "feedId"); + return Persistence.snapshots.getById(id); + } - return Base.toJson(s, false); - } catch (IOException e) { - e.printStackTrace(); - halt(400); - if (gtx != null) gtx.rollbackIfOpen(); - } - return null; + /** + * HTTP endpoint that returns the list of snapshots for a given feed source. + */ + private static Collection getSnapshots(Request req, Response res) { + // Get feed source and check user permissions. + FeedSource feedSource = FeedVersionController.requestFeedSourceById(req, "view", "feedId"); + // FIXME Do we need a way to return all snapshots? + // Is this used in GTFS Data Manager to retrieveById snapshots in bulk? + + // Return snapshots for feed source. + return feedSource.retrieveSnapshots(); } - public static Boolean importSnapshot (Request req, Response res) { + /** + * HTTP endpoint that makes a snapshot copy of the current data loaded in the editor for a given feed source. + */ + private static String createSnapshot (Request req, Response res) throws IOException { + Auth0UserProfile userProfile = req.attribute("user"); + FeedSource feedSource = FeedVersionController.requestFeedSourceById(req, "edit", "feedId"); + // Take fields from request body for creating snapshot. + Snapshot snapshot = json.read(req.body()); + // Ensure feed source ID and snapshotOf namespace is correct + snapshot.feedSourceId = feedSource.id; + snapshot.snapshotOf = feedSource.editorNamespace; + snapshot.storeUser(userProfile); + // If there is no active buffer for feed source, set boolean to update it to the new snapshot namespace. + // Otherwise, creating a snapshot will just create a copy of the tables and leave the buffer untouched. + boolean bufferIsEmpty = feedSource.editorNamespace == null; + // Create new non-buffer snapshot. + CreateSnapshotJob createSnapshotJob = + new CreateSnapshotJob(snapshot, bufferIsEmpty, !bufferIsEmpty, false); + // Begin asynchronous execution. + DataManager.heavyExecutor.execute(createSnapshotJob); + return SparkUtils.formatJobMessage(createSnapshotJob.jobId, "Creating snapshot."); + } + /** + * Create snapshot from feedVersion and load/import into editor database. + */ + private static String importFeedVersionAsSnapshot(Request req, Response res) { Auth0UserProfile userProfile = req.attribute("user"); + // Get feed version from request (and check permissions). String feedVersionId = req.queryParams("feedVersionId"); - - if(feedVersionId == null) { - halt(400, "No FeedVersion ID specified"); - } - - FeedVersion feedVersion = FeedVersion.get(feedVersionId); - if(feedVersion == null) { - halt(404, "Could not find FeedVersion with ID " + feedVersionId); - } - - ProcessGtfsSnapshotMerge processGtfsSnapshotMergeJob = - new ProcessGtfsSnapshotMerge(feedVersion, userProfile.getUser_id()); - - new Thread(processGtfsSnapshotMergeJob).start(); - - halt(200, "{status: \"ok\"}"); - return null; + FeedVersion feedVersion = FeedVersionController.requestFeedVersion(req, "edit", feedVersionId); + FeedSource feedSource = feedVersion.parentFeedSource(); + // Create and run snapshot job + Snapshot snapshot = new Snapshot("Snapshot of " + feedVersion.name, feedSource.id, feedVersion.namespace); + snapshot.storeUser(userProfile); + // Only preserve buffer if there is already a namespace associated with the feed source and requester has + // explicitly asked for it. Otherwise, let go of the buffer. + boolean preserveBuffer = "true".equals(req.queryParams("preserveBuffer")) && feedSource.editorNamespace != null; + CreateSnapshotJob createSnapshotJob = + new CreateSnapshotJob(snapshot, true, false, preserveBuffer); + DataManager.heavyExecutor.execute(createSnapshotJob); + return formatJobMessage(createSnapshotJob.jobId, "Importing version as snapshot."); } - public static Object updateSnapshot (Request req, Response res) { - String id = req.params("id"); - GlobalTx gtx = null; - try { - Snapshot s = Base.mapper.readValue(req.body(), Snapshot.class); - - Tuple2 sid = JacksonSerializers.Tuple2IntDeserializer.deserialize(id); - - if (s == null || s.id == null || !s.id.equals(sid)) { - LOG.warn("snapshot ID not matched, not updating: {}, {}", s.id, id); - halt(400); - } - - gtx = VersionedDataStore.getGlobalTx(); - - if (!gtx.snapshots.containsKey(s.id)) { - gtx.rollback(); - halt(404); - } - - gtx.snapshots.put(s.id, s); - - gtx.commit(); - - return Base.toJson(s, false); - } catch (IOException e) { - e.printStackTrace(); - if (gtx != null) gtx.rollbackIfOpen(); - halt(400); - } + // FIXME: Is this method used anywhere? Can we delete? + private static Object updateSnapshot (Request req, Response res) { + // FIXME + haltWithMessage(req, 400, "Method not implemented"); return null; } - public static Object restoreSnapshot (Request req, Response res) { + /** + * HTTP API method to "restore" snapshot (specified by ID param) to the active editor buffer. This essentially makes + * a copy of the namespace to preserve the "save point" and then updates the working buffer to point to the newly + * created namespace. + */ + private static String restoreSnapshot (Request req, Response res) { + // Get the snapshot ID to restore (set the namespace pointer) String id = req.params("id"); - Object json = null; - Tuple2 decodedId = null; - try { - decodedId = JacksonSerializers.Tuple2IntDeserializer.deserialize(id); - } catch (IOException e1) { - halt(400); + // FIXME Ensure namespace id exists in database? + // Retrieve feed source. + FeedSource feedSource = FeedVersionController.requestFeedSourceById(req, "edit", "feedId"); + Snapshot snapshotToRestore = Persistence.snapshots.getById(id); + if (snapshotToRestore == null) { + haltWithMessage(req, 400, "Must specify valid snapshot ID"); } - - GlobalTx gtx = VersionedDataStore.getGlobalTx(); - Snapshot local; - try { - if (!gtx.snapshots.containsKey(decodedId)) { - halt(404); - } - - local = gtx.snapshots.get(decodedId); - - List stops = VersionedDataStore.restore(local); - - // the snapshot we have just restored is now current; make the others not current - // TODO: add this loop back in... taken out in order to compile - Collection snapshots = Snapshot.getSnapshots(local.feedId); - for (Snapshot o : snapshots) { - if (o.id.equals(local.id)) - continue; - - Snapshot cloned = o.clone(); - cloned.current = false; - gtx.snapshots.put(o.id, cloned); - } - - Snapshot clone = local.clone(); - clone.current = true; - gtx.snapshots.put(local.id, clone); - gtx.commit(); - - json = Base.toJson(stops, false); - } catch (IOException e) { - e.printStackTrace(); - halt(400); - } finally { - gtx.rollbackIfOpen(); + // Update editor namespace pointer. + if (snapshotToRestore.namespace == null) { + haltWithMessage(req, 400, "Failed to restore snapshot. No namespace found."); } - return json; + // Preserve existing editor buffer if requested. FIXME: should the request body also contain name and comments? + boolean preserveBuffer = "true".equals(req.queryParams("preserveBuffer")); + // Create and run snapshot job + Auth0UserProfile userProfile = req.attribute("user"); + // FIXME what if the snapshot has not had any edits made to it? In this case, we would just be making copy upon + // copy of a feed for no reason. + String name = "Restore snapshot " + snapshotToRestore.name; + Snapshot snapshot = new Snapshot(name, feedSource.id, snapshotToRestore.namespace); + snapshot.storeUser(userProfile); + CreateSnapshotJob createSnapshotJob = new CreateSnapshotJob(snapshot, true, false, preserveBuffer); + DataManager.heavyExecutor.execute(createSnapshotJob); + return formatJobMessage(createSnapshotJob.jobId, "Restoring snapshot..."); } - /** Export a snapshot as GTFS */ - public static Object exportSnapshot (Request req, Response res) { - String id = req.params("id"); - Tuple2 decodedId; - FeedDownloadToken token; - try { - decodedId = JacksonSerializers.Tuple2IntDeserializer.deserialize(id); - } catch (IOException e1) { - halt(400); - return null; - } - - GlobalTx gtx = VersionedDataStore.getGlobalTx(); - Snapshot local; - try { - if (!gtx.snapshots.containsKey(decodedId)) { - halt(404); - return null; - } - - local = gtx.snapshots.get(decodedId); - token = new FeedDownloadToken(local); - token.save(); - } finally { - gtx.rollbackIfOpen(); - } - return token; + /** + * HTTP endpoint that triggers export of specified snapshot to GTFS. Returns a job ID with which to monitor the job + * status. At the completion of the job, requester should fetch a download token for the snapshot. + */ + private static String downloadSnapshotAsGTFS(Request req, Response res) { + Auth0UserProfile userProfile = req.attribute("user"); + String userId = userProfile.getUser_id(); + Snapshot snapshot = getSnapshotFromRequest(req); + // Create and kick off export job. + // FIXME: what if a snapshot is already written to S3? + ExportSnapshotToGTFSJob exportSnapshotToGTFSJob = new ExportSnapshotToGTFSJob(userId, snapshot); + DataManager.heavyExecutor.execute(exportSnapshotToGTFSJob); + return formatJobMessage(exportSnapshotToGTFSJob.jobId, "Exporting snapshot to GTFS."); } - /** Write snapshot to disk as GTFS */ - public static boolean writeSnapshotAsGtfs (Tuple2 decodedId, File outFile) { - GlobalTx gtx = VersionedDataStore.getGlobalTx(); - Snapshot local; - try { - if (!gtx.snapshots.containsKey(decodedId)) { - return false; + /** + * Depending on the application's configuration, returns either temporary S3 credentials to download get an object + * at the specified key OR a single-use download token to download a snapshot's GTFS file. + */ + private static Object getSnapshotToken(Request req, Response res) { + Snapshot snapshot = getSnapshotFromRequest(req); + FeedDownloadToken token; + String key = "snapshots/" + snapshot.id + ".zip"; + // if storing feeds on S3, first write the snapshot to GTFS file and upload to S3 + // this needs to be completed before the credentials are delivered, so that the client has + // an actual object to download. + // FIXME: use new FeedStore. + if (DataManager.useS3) { + if (!FeedStore.s3Client.doesObjectExist(DataManager.feedBucket, key)) { + haltWithMessage(req, 400, String.format("Error downloading snapshot from S3. Object %s does not exist.", key)); } - - local = gtx.snapshots.get(decodedId); - - new ProcessGtfsSnapshotExport(local, outFile).run(); - } finally { - gtx.rollbackIfOpen(); - } - - return true; - } - public static boolean writeSnapshotAsGtfs (String id, File outFile) { - Tuple2 decodedId; - try { - decodedId = JacksonSerializers.Tuple2IntDeserializer.deserialize(id); - } catch (IOException e1) { - return false; + // Return presigned download link if using S3. + return downloadFromS3(FeedStore.s3Client, DataManager.feedBucket, key, false, res); + } else { + // If not storing on s3, just use the token download method. + token = new FeedDownloadToken(snapshot); + Persistence.tokens.create(token); + return token; } - return writeSnapshotAsGtfs(decodedId, outFile); } - public static Object deleteSnapshot(Request req, Response res) { + /** + * HTTP endpoint to PERMANENTLY delete a snapshot given for the specified ID. + */ + private static Snapshot deleteSnapshot(Request req, Response res) { + // Get the snapshot ID to restore (set the namespace pointer) String id = req.params("id"); - Tuple2 decodedId; + // FIXME Ensure namespace id exists in database. + // Check feed source permissions. + FeedSource feedSource = FeedVersionController.requestFeedSourceById(req, "edit", "feedId"); + // Retrieve snapshot + Snapshot snapshot = Persistence.snapshots.getById(id); + if (snapshot == null) haltWithMessage(req, 400, "Must provide valid snapshot ID."); try { - decodedId = JacksonSerializers.Tuple2IntDeserializer.deserialize(id); - } catch (IOException e1) { - halt(400); + // Remove the snapshot and then renumber the snapshots + Persistence.snapshots.removeById(snapshot.id); + feedSource.renumberSnapshots(); + // FIXME Are there references that need to be removed? E.g., what if the active buffer snapshot is deleted? + // FIXME delete tables from database? + return snapshot; + } catch (Exception e) { + e.printStackTrace(); + haltWithMessage(req, 400, "Unknown error deleting snapshot.", e); return null; } - - GlobalTx gtx = VersionedDataStore.getGlobalTx(); - - gtx.snapshots.remove(decodedId); - gtx.commit(); - - return true; } + /** + * This method is used only when NOT storing feeds on S3. It will deliver a + * snapshot file from the local storage if a valid token is provided. + */ private static Object downloadSnapshotWithToken (Request req, Response res) { + // FIXME: Update for refactored FeedStore String id = req.params("token"); - FeedDownloadToken token = FeedDownloadToken.get(id); + FeedDownloadToken token = Persistence.tokens.getById(id); if(token == null || !token.isValid()) { - halt(400, "Feed download token not valid"); + haltWithMessage(req, 400, "Feed download token not valid"); } - Snapshot snapshot = token.getSnapshot(); - File file = null; - - try { - file = File.createTempFile("snapshot", ".zip"); - writeSnapshotAsGtfs(snapshot.id, file); - } catch (Exception e) { - e.printStackTrace(); - String message = "Unable to create temp file for snapshot"; - LOG.error(message); - } - token.delete(); - return downloadFile(file, res); + Snapshot snapshot = token.retrieveSnapshot(); + Persistence.tokens.removeById(token.id); + String fileName = snapshot.id + ".zip"; + return downloadFile(FeedVersion.feedStore.getFeed(fileName), fileName, req, res); } + public static void register (String apiPrefix) { - get(apiPrefix + "secure/snapshot/:id", SnapshotController::getSnapshot, json::write); + get(apiPrefix + "secure/snapshot/:id", SnapshotController::getSnapshotById, json::write); options(apiPrefix + "secure/snapshot", (q, s) -> ""); - get(apiPrefix + "secure/snapshot", SnapshotController::getSnapshot, json::write); + get(apiPrefix + "secure/snapshot", SnapshotController::getSnapshots, json::write); post(apiPrefix + "secure/snapshot", SnapshotController::createSnapshot, json::write); - post(apiPrefix + "secure/snapshot/import", SnapshotController::importSnapshot, json::write); + post(apiPrefix + "secure/snapshot/import", SnapshotController::importFeedVersionAsSnapshot, json::write); put(apiPrefix + "secure/snapshot/:id", SnapshotController::updateSnapshot, json::write); post(apiPrefix + "secure/snapshot/:id/restore", SnapshotController::restoreSnapshot, json::write); - get(apiPrefix + "secure/snapshot/:id/downloadtoken", SnapshotController::exportSnapshot, json::write); + get(apiPrefix + "secure/snapshot/:id/download", SnapshotController::downloadSnapshotAsGTFS, json::write); + get(apiPrefix + "secure/snapshot/:id/downloadtoken", SnapshotController::getSnapshotToken, json::write); delete(apiPrefix + "secure/snapshot/:id", SnapshotController::deleteSnapshot, json::write); get(apiPrefix + "downloadsnapshot/:token", SnapshotController::downloadSnapshotWithToken); diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/StopController.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/StopController.java deleted file mode 100644 index 6a72ffa62..000000000 --- a/src/main/java/com/conveyal/datatools/editor/controllers/api/StopController.java +++ /dev/null @@ -1,369 +0,0 @@ -package com.conveyal.datatools.editor.controllers.api; - -import com.conveyal.datatools.editor.datastore.FeedTx; -import com.conveyal.datatools.manager.models.JsonViews; -import com.conveyal.datatools.manager.utils.json.JsonManager; -import com.google.common.base.Function; -import com.google.common.collect.Collections2; -import com.conveyal.datatools.editor.controllers.Base; -import com.conveyal.datatools.editor.datastore.VersionedDataStore; -import com.conveyal.datatools.editor.models.transit.*; -import org.geotools.referencing.GeodeticCalculator; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; - -import org.json.simple.JSONObject; -import org.mapdb.BTreeMap; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import spark.HaltException; -import spark.Request; -import spark.Response; - -import static spark.Spark.*; - - -public class StopController { - - public static JsonManager json = - new JsonManager<>(Stop.class, JsonViews.UserInterface.class); - private static final Logger LOG = LoggerFactory.getLogger(StopController.class); - public static Object getStop(Request req, Response res) { - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - String patternId = req.queryParams("patternId"); - Boolean majorStops = Boolean.valueOf(req.queryParams("majorStops")); - Double west = null; - if (req.queryParams("west") != null) - west = Double.valueOf(req.queryParams("west")); - Double east = null; - if (req.queryParams("east") != null) - east = Double.valueOf(req.queryParams("east")); - Double north = null; - if (req.queryParams("north") != null) - north = Double.valueOf(req.queryParams("north")); - Double south = null; - if (req.queryParams("south") != null) - south = Double.valueOf(req.queryParams("south")); - - if (feedId == null) - feedId = req.session().attribute("feedId"); - - if (feedId == null) { - halt(400); - } - - final FeedTx tx = VersionedDataStore.getFeedTx(feedId); - - try { - if (id != null) { - if (!tx.stops.containsKey(id)) { - tx.rollback(); - halt(404); - } - - Object json = Base.toJson(tx.stops.get(id), false); - tx.rollback(); - return json; - } - else if (Boolean.TRUE.equals(majorStops)) { - // get the major stops for the agency - Collection stops = Collections2.transform(tx.majorStops, new Function() { - @Override - public Stop apply(String input) { - // TODO Auto-generated method stub - return tx.stops.get(input); - } - }); - - Object stopsJson = Base.toJson(stops, false); - tx.rollback(); - return stopsJson; - } - else if (west != null && east != null && south != null && north != null) { - Collection matchedStops = tx.getStopsWithinBoundingBox(north, east, south, west); - Object json = Base.toJson(matchedStops, false); - tx.rollback(); - return json; - } - else if (patternId != null) { - if (!tx.tripPatterns.containsKey(patternId)) { - halt(404); - } - - TripPattern p = tx.tripPatterns.get(patternId); - - Collection ret = Collections2.transform(p.patternStops, new Function() { - @Override - public Stop apply(TripPatternStop input) { - return tx.stops.get(input.stopId); - } - }); - - Object json = Base.toJson(ret, false); - tx.rollback(); - return json; - } - // return all - else { - BTreeMap stops; - try { - stops = tx.stops; - Collection matchedStops = stops.values(); - Object json = Base.toJson(matchedStops, false); - tx.rollback(); - return json; - } catch (IllegalAccessError e) { - return new ArrayList<>(); - } - - } - - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - e.printStackTrace(); - halt(400); - tx.rollback(); - } - return null; - } - - public static Object createStop(Request req, Response res) { - FeedTx tx = null; - Object json = null; - try { - Stop stop = Base.mapper.readValue(req.body(), Stop.class); - - if (req.session().attribute("feedId") != null && !req.session().attribute("feedId").equals(stop.feedId)) - halt(400); - - if (!VersionedDataStore.feedExists(stop.feedId)) { - halt(400, "Stop must reference feed source ID"); - } - - tx = VersionedDataStore.getFeedTx(stop.feedId); - - if (tx.stops.containsKey(stop.id)) { - halt(400); - } - - tx.stops.put(stop.id, stop); - tx.commit(); - json = Base.toJson(stop, false); - } catch (IOException e) { - e.printStackTrace(); - halt(400); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - e.printStackTrace(); - } finally { - if (tx != null) tx.rollbackIfOpen(); - } - return json; - } - - - public static Object updateStop(Request req, Response res) throws IOException { - FeedTx tx = null; - Object json; - Stop stop = Base.mapper.readValue(req.body(), Stop.class); - String feedId = req.queryParams("feedId"); - if (feedId == null) { - halt(400, "Must provide feed ID"); - } - - if (!VersionedDataStore.feedExists(feedId)) { - halt(400, "Feed ID ("+feedId+") does not exist"); - } - try { - tx = VersionedDataStore.getFeedTx(feedId); - - if (!tx.stops.containsKey(stop.id)) { - halt(400); - tx.rollback(); - } - - tx.stops.put(stop.id, stop); - tx.commit(); - json = Base.toJson(stop, false); - tx.rollbackIfOpen(); - return json; - } catch (IOException e) { - e.printStackTrace(); - halt(400); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - e.printStackTrace(); - } finally { - if (tx != null) tx.rollbackIfOpen(); - } - return null; - } - - public static Object deleteStop(Request req, Response res) throws IOException { - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - Object json; - - if (feedId == null) - feedId = req.session().attribute("feedId"); - - if (feedId == null) { - halt(400); - } - - FeedTx tx = null; - try { - tx = VersionedDataStore.getFeedTx(feedId); - if (!tx.stops.containsKey(id)) { - halt(404); - tx.rollback(); - } - - if (!tx.getTripPatternsByStop(id).isEmpty()) { - Set patterns = tx.getTripPatternsByStop(id).stream().map(tripPattern -> tripPattern.name).collect(Collectors.toSet()); - Set routes = tx.getTripPatternsByStop(id).stream().map(tripPattern -> tripPattern.routeId).collect(Collectors.toSet()); - halt(400, errorMessage("Trip patterns ("+patterns.toString()+") for routes "+routes.toString()+" reference stop ID" + id)); - tx.rollback(); - } - - Stop s = tx.stops.remove(id); - tx.commit(); - json = Base.toJson(s, false); - - tx.rollbackIfOpen(); - - return json; - } catch (IOException e) { - e.printStackTrace(); - halt(400); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - e.printStackTrace(); - } finally { - if (tx != null) tx.rollbackIfOpen(); - } - return null; - } - - private static String errorMessage(String s) { - JSONObject json = new JSONObject(); - json.put("error", true); - json.put("message", s); - return json.toString(); - } - - - public static Object findDuplicateStops(Request req, Response res) { - String feedId = req.queryParams("feedId"); - Object json = null; - - if (feedId == null) - feedId = req.session().attribute("feedId"); - - if (feedId == null) { - halt(400); - } - - FeedTx atx = VersionedDataStore.getFeedTx(feedId); - - try { - List> ret = new ArrayList>(); - - for (Stop stop : atx.stops.values()) { - // find nearby stops, within 5m - // at the equator, 1 degree is 111 km - // everywhere else this will overestimate, which is why we have a distance check as well (below) - double thresholdDegrees = 5 / 111000d; - - Collection candidateStops = atx.getStopsWithinBoundingBox( - stop.getLat() + thresholdDegrees, - stop.getLon() + thresholdDegrees, - stop.getLat() - thresholdDegrees, - stop.getLon() - thresholdDegrees); - - // we will always find a single stop, this one. - if (candidateStops.size() <= 1) - continue; - - List duplicatesOfThis = new ArrayList(); - - // note: this stop will be added implicitly because it is distance zero from itself - GeodeticCalculator gc = new GeodeticCalculator(); - gc.setStartingGeographicPoint(stop.getLon(), stop.getLat()); - for (Stop other : candidateStops) { - gc.setDestinationGeographicPoint(other.getLon(), other.getLat()); - if (gc.getOrthodromicDistance() < 10) { - duplicatesOfThis.add(other); - } - } - - if (duplicatesOfThis.size() > 1) { - ret.add(duplicatesOfThis); - } - } - - json = Base.toJson(ret, false); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - e.printStackTrace(); - halt(400); - } - finally { - atx.rollback(); - } - return json; - } - - public static Object mergeStops(Request req, Response res) { - List mergedStopIds = Arrays.asList(req.queryParams("mergedStopIds").split(",")); - String feedId = req.queryParams("feedId"); - - if (mergedStopIds.size() <= 1) { - halt(400); - } - - if (feedId == null) - feedId = req.session().attribute("feedId"); - - if (feedId == null) { - halt(400); - } - - FeedTx tx = VersionedDataStore.getFeedTx(feedId); - - try { - Stop.merge(mergedStopIds, tx); - tx.commit(); - } finally { - tx.rollbackIfOpen(); - } - return true; - } - - public static void register (String apiPrefix) { - get(apiPrefix + "secure/stop/:id", StopController::getStop, json::write); - options(apiPrefix + "secure/stop", (q, s) -> ""); - get(apiPrefix + "secure/stop", StopController::getStop, json::write); - get(apiPrefix + "secure/stop/mergeStops", StopController::mergeStops, json::write); - post(apiPrefix + "secure/stop", StopController::createStop, json::write); - put(apiPrefix + "secure/stop/:id", StopController::updateStop, json::write); - delete(apiPrefix + "secure/stop/:id", StopController::deleteStop, json::write); - } -} diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/TripController.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/TripController.java deleted file mode 100644 index c97a9de2a..000000000 --- a/src/main/java/com/conveyal/datatools/editor/controllers/api/TripController.java +++ /dev/null @@ -1,247 +0,0 @@ -package com.conveyal.datatools.editor.controllers.api; - -import com.conveyal.datatools.editor.controllers.Base; -import com.conveyal.datatools.editor.datastore.FeedTx; -import com.conveyal.datatools.editor.datastore.VersionedDataStore; -import com.conveyal.datatools.editor.models.transit.StopTime; -import com.conveyal.datatools.editor.models.transit.Trip; -import com.conveyal.datatools.editor.models.transit.TripPattern; -import com.conveyal.datatools.editor.models.transit.TripPatternStop; -import com.conveyal.datatools.manager.models.JsonViews; -import com.conveyal.datatools.manager.utils.json.JsonManager; -import com.fasterxml.jackson.core.type.TypeReference; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import spark.HaltException; -import spark.Request; -import spark.Response; - -import static spark.Spark.*; - - -public class TripController { - public static final Logger LOG = LoggerFactory.getLogger(TripController.class); - - public static JsonManager json = - new JsonManager<>(Trip.class, JsonViews.UserInterface.class); - - public static Object getTrip(Request req, Response res) { - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - String patternId = req.queryParams("patternId"); - String calendarId = req.queryParams("calendarId"); - - if (feedId == null) - feedId = req.session().attribute("feedId"); - - if (feedId == null) { - halt(400); - } - - FeedTx tx = null; - - try { - tx = VersionedDataStore.getFeedTx(feedId); - if (id != null) { - if (tx.trips.containsKey(id)) - return Base.toJson(tx.trips.get(id), false); - else - halt(404); - } - else if (patternId != null && calendarId != null) { - if (!tx.tripPatterns.containsKey(patternId) || !tx.calendars.containsKey(calendarId)) { - halt(404); - } - else { - LOG.info("requesting trips for pattern/cal"); - return Base.toJson(tx.getTripsByPatternAndCalendar(patternId, calendarId), false); - } - } - - else if(patternId != null) { - return Base.toJson(tx.getTripsByPattern(patternId), false); - } - else { - return Base.toJson(tx.trips.values(), false); - } - - } catch (IOException e) { - e.printStackTrace(); - halt(400); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - if (tx != null) tx.rollbackIfOpen(); - e.printStackTrace(); - halt(400); - } finally { - if (tx != null) tx.rollbackIfOpen(); - } - return null; - } - - public static Object createTrip(Request req, Response res) { - FeedTx tx = null; - String createMultiple = req.queryParams("multiple"); - try { - List trips = new ArrayList<>(); - if (createMultiple != null && createMultiple.equals("true")) { - trips = Base.mapper.readValue(req.body(), new TypeReference>(){}); - } else { - Trip trip = Base.mapper.readValue(req.body(), Trip.class); - trips.add(trip); - } - for (Trip trip : trips) { - if (req.session().attribute("feedId") != null && !req.session().attribute("feedId").equals(trip.feedId)) - halt(400); - - if (!VersionedDataStore.feedExists(trip.feedId)) { - halt(400); - } - - tx = VersionedDataStore.getFeedTx(trip.feedId); - - if (tx.trips.containsKey(trip.id)) { - tx.rollback(); - halt(400); - } - - if (!tx.tripPatterns.containsKey(trip.patternId) || trip.stopTimes.size() != tx.tripPatterns.get(trip.patternId).patternStops.size()) { - tx.rollback(); - halt(400); - } - - tx.trips.put(trip.id, trip); - } - tx.commit(); - - return Base.toJson(trips, false); - } catch (IOException e) { - e.printStackTrace(); - halt(400); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - if (tx != null) tx.rollbackIfOpen(); - e.printStackTrace(); - halt(400); - } finally { - if (tx != null) tx.rollbackIfOpen(); - } - return null; - } - - public static Object updateTrip(Request req, Response res) { - FeedTx tx = null; - - try { - Trip trip = Base.mapper.readValue(req.body(), Trip.class); - - if (req.session().attribute("feedId") != null && !req.session().attribute("feedId").equals(trip.feedId)) - halt(400); - - if (!VersionedDataStore.feedExists(trip.feedId)) { - halt(400); - } - - tx = VersionedDataStore.getFeedTx(trip.feedId); - - if (!tx.trips.containsKey(trip.id)) { - tx.rollback(); - halt(400); - } - - if (!tx.tripPatterns.containsKey(trip.patternId) || trip.stopTimes.size() != tx.tripPatterns.get(trip.patternId).patternStops.size()) { - tx.rollback(); - halt(400); - } - - TripPattern patt = tx.tripPatterns.get(trip.patternId); - - // confirm that each stop in the trip matches the stop in the pattern - - for (int i = 0; i < trip.stopTimes.size(); i++) { - TripPatternStop ps = patt.patternStops.get(i); - StopTime st = trip.stopTimes.get(i); - - if (st == null) - // skipped stop - continue; - - if (!st.stopId.equals(ps.stopId)) { - LOG.error("Mismatch between stop sequence in trip and pattern at position {}, pattern: {}, stop: {}", i, ps.stopId, st.stopId); - tx.rollback(); - halt(400); - } - } - - tx.trips.put(trip.id, trip); - tx.commit(); - - return Base.toJson(trip, false); - } catch (IOException e) { - e.printStackTrace(); - halt(400); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - if (tx != null) tx.rollbackIfOpen(); - e.printStackTrace(); - halt(400); - } finally { - if (tx != null) tx.rollbackIfOpen(); - } - return null; - } - - public static Object deleteTrip(Request req, Response res) { - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - Object json = null; - - if (feedId == null) - feedId = req.session().attribute("feedId"); - - if (id == null || feedId == null) { - halt(400); - } - - FeedTx tx = null; - try { - tx = VersionedDataStore.getFeedTx(feedId); - Trip trip = tx.trips.remove(id); - tx.commit(); - return Base.toJson(trip, false); - } catch (IOException e) { - e.printStackTrace(); - halt(400); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - if (tx != null) tx.rollbackIfOpen(); - e.printStackTrace(); - halt(400); - } finally { - if (tx != null) tx.rollbackIfOpen(); - } - return null; - } - - public static void register (String apiPrefix) { - get(apiPrefix + "secure/trip/:id", TripController::getTrip, json::write); - options(apiPrefix + "secure/trip", (q, s) -> ""); - get(apiPrefix + "secure/trip", TripController::getTrip, json::write); - post(apiPrefix + "secure/trip", TripController::createTrip, json::write); - put(apiPrefix + "secure/trip/:id", TripController::updateTrip, json::write); - delete(apiPrefix + "secure/trip/:id", TripController::deleteTrip, json::write); - } -} diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/TripPatternController.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/TripPatternController.java deleted file mode 100644 index 1974ca81c..000000000 --- a/src/main/java/com/conveyal/datatools/editor/controllers/api/TripPatternController.java +++ /dev/null @@ -1,231 +0,0 @@ -package com.conveyal.datatools.editor.controllers.api; - -import com.conveyal.datatools.manager.models.JsonViews; -import com.conveyal.datatools.manager.utils.json.JsonManager; -import com.google.common.base.Function; -import com.google.common.collect.Collections2; -import com.conveyal.datatools.editor.controllers.Base; -import com.conveyal.datatools.editor.datastore.FeedTx; -import com.conveyal.datatools.editor.datastore.VersionedDataStore; -import com.conveyal.datatools.editor.models.transit.Trip; -import com.conveyal.datatools.editor.models.transit.TripPattern; -import org.mapdb.Fun; -import org.mapdb.Fun.Tuple2; - -import java.util.Collection; -import java.util.Set; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import spark.HaltException; -import spark.Request; -import spark.Response; - -import static spark.Spark.*; - - -public class TripPatternController { - private static final Logger LOG = LoggerFactory.getLogger(TripPatternController.class); - public static JsonManager json = - new JsonManager<>(TripPattern.class, JsonViews.UserInterface.class); - - public static Object getTripPattern(Request req, Response res) { - String id = req.params("id"); - String routeId = req.queryParams("routeId"); - String feedId = req.queryParams("feedId"); - Object json = null; - - if (feedId == null) - feedId = req.session().attribute("feedId"); - - if (feedId == null) { - halt(400); - } - - final FeedTx tx = VersionedDataStore.getFeedTx(feedId); - - try { - - if(id != null) { - if (!tx.tripPatterns.containsKey(id)) - halt(404); - else - json = Base.toJson(tx.tripPatterns.get(id), false); - } - else if (routeId != null) { - - if (!tx.routes.containsKey(routeId)) - halt(404, "routeId '" + routeId + "' does not exist"); - else { - Set> tpKeys = tx.tripPatternsByRoute.subSet(new Tuple2(routeId, null), new Tuple2(routeId, Fun.HI)); - - Collection patts = Collections2.transform(tpKeys, new Function, TripPattern>() { - - @Override - public TripPattern apply(Tuple2 input) { - return tx.tripPatterns.get(input.b); - } - }); - - json = Base.toJson(patts, false); - } - } - else { // get all patterns - json = Base.toJson(tx.tripPatterns, false); - } - - tx.rollback(); - - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - tx.rollback(); - e.printStackTrace(); - halt(400); - } - return json; - } - - public static Object createTripPattern(Request req, Response res) { - TripPattern tripPattern; - - try { - tripPattern = Base.mapper.readValue(req.body(), TripPattern.class); - - if (req.session().attribute("feedId") != null && !req.session().attribute("feedId").equals(tripPattern.feedId)) - halt(400); - - if (!VersionedDataStore.feedExists(tripPattern.feedId)) { - halt(400); - } - - FeedTx tx = VersionedDataStore.getFeedTx(tripPattern.feedId); - - if (tx.tripPatterns.containsKey(tripPattern.id)) { - tx.rollback(); - halt(400); - } - - tripPattern.calcShapeDistTraveled(); - - tx.tripPatterns.put(tripPattern.id, tripPattern); - tx.commit(); - - return Base.toJson(tripPattern, false); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - e.printStackTrace(); - halt(400); - } - return null; - } - - /** - * Update existing trip pattern. NOTE: function assumes only one stop has been - * changed (added, modified, removed) - * @param req - * @param res - * @return - */ - public static Object updateTripPattern(Request req, Response res) { - TripPattern tripPattern; - FeedTx tx = null; - try { - tripPattern = Base.mapper.readValue(req.body(), TripPattern.class); - - if (req.session().attribute("feedId") != null && !req.session().attribute("feedId").equals(tripPattern.feedId)) - halt(400); - - if (!VersionedDataStore.feedExists(tripPattern.feedId)) { - halt(400); - } - - if (tripPattern.id == null) { - halt(400); - } - - tx = VersionedDataStore.getFeedTx(tripPattern.feedId); - - TripPattern originalTripPattern = tx.tripPatterns.get(tripPattern.id); - - if(originalTripPattern == null) { - tx.rollback(); - halt(400); - } - - // check if frequency value has changed for pattern and nuke trips created for old value - // double check that we're working with the same trip pattern here - if (originalTripPattern.useFrequency != tripPattern.useFrequency) { - for (Trip trip : tx.getTripsByPattern(originalTripPattern.id)) { - if (originalTripPattern.useFrequency == trip.useFrequency) { - LOG.info("Removing frequency={} trip {}", trip.useFrequency, trip.id); - tx.trips.remove(trip.id); - } - } - } - - // update stop times - try { - TripPattern.reconcilePatternStops(originalTripPattern, tripPattern, tx); - } catch (IllegalStateException e) { - tx.rollback(); - LOG.info("Could not save trip pattern", e); - halt(400); - } - - tripPattern.calcShapeDistTraveled(); - - tx.tripPatterns.put(tripPattern.id, tripPattern); - tx.commit(); - - return Base.toJson(tripPattern, false); - } catch (HaltException e) { - LOG.error("Halt encountered", e); - throw e; - } catch (Exception e) { - if (tx != null) tx.rollback(); - e.printStackTrace(); - halt(400); - } - return null; - } - - public static Object deleteTripPattern(Request req, Response res) { - String id = req.params("id"); - String feedId = req.queryParams("feedId"); - - if (feedId == null) - feedId = req.session().attribute("feedId"); - - if(id == null || feedId == null) { - halt(400); - } - - FeedTx tx = VersionedDataStore.getFeedTx(feedId); - - try { - // first zap all trips on this trip pattern - for (Trip trip : tx.getTripsByPattern(id)) { - tx.trips.remove(trip.id); - } - - tx.tripPatterns.remove(id); - tx.commit(); - } finally { - tx.rollbackIfOpen(); - } - return null; - } - - public static void register (String apiPrefix) { - get(apiPrefix + "secure/trippattern/:id", TripPatternController::getTripPattern, json::write); - options(apiPrefix + "secure/trippattern", (q, s) -> ""); - get(apiPrefix + "secure/trippattern", TripPatternController::getTripPattern, json::write); - post(apiPrefix + "secure/trippattern", TripPatternController::createTripPattern, json::write); - put(apiPrefix + "secure/trippattern/:id", TripPatternController::updateTripPattern, json::write); - delete(apiPrefix + "secure/trippattern/:id", TripPatternController::deleteTripPattern, json::write); - } -} diff --git a/src/main/java/com/conveyal/datatools/editor/datastore/DatabaseTx.java b/src/main/java/com/conveyal/datatools/editor/datastore/DatabaseTx.java index 2bd77826b..1f8723f9a 100644 --- a/src/main/java/com/conveyal/datatools/editor/datastore/DatabaseTx.java +++ b/src/main/java/com/conveyal/datatools/editor/datastore/DatabaseTx.java @@ -17,7 +17,7 @@ /** A wrapped transaction, so the database just looks like a POJO */ public class DatabaseTx { - public static final Logger LOG = LoggerFactory.getLogger(DatabaseTx.class); + private static final Logger LOG = LoggerFactory.getLogger(DatabaseTx.class); /** the database (transaction). subclasses must initialize. */ protected final DB tx; @@ -28,7 +28,7 @@ public class DatabaseTx { /** is this transaction read-only? */ protected boolean readOnly; - /** Convenience function to get a map */ + /** Convenience function to retrieve a map */ protected final BTreeMap getMap (String name) { try { return getMapMaker(tx, name) @@ -39,7 +39,7 @@ protected final BTreeMap getMap (String name) { } } - /** get a map maker, that can then be further modified */ + /** retrieve a map maker, that can then be further modified */ private static final BTreeMapMaker getMapMaker (DB tx, String name) { return tx.createTreeMap(name) // use java serialization to allow for schema upgrades @@ -47,7 +47,7 @@ private static final BTreeMapMaker getMapMaker (DB tx, String name) { } /** - * Convenience function to get a set. These are used as indices so they use the default serialization; + * Convenience function to retrieve a set. These are used as indices so they use the default serialization; * if we make a schema change we drop and recreate them. */ protected final NavigableSet getSet (String name) { @@ -113,7 +113,7 @@ protected static int pump (DB tx, String mapName, BTreeMap source) return pump(tx, mapName, pumpSourceForMap(source)); } - /** get a pump source from a map */ + /** retrieve a pump source from a map */ protected static Iterator> pumpSourceForMap(BTreeMap source) { Iterator> values = source.descendingMap().entrySet().iterator(); Iterator> valueTuples = Iterators.transform(values, new Function, Tuple2>() { diff --git a/src/main/java/com/conveyal/datatools/editor/datastore/FeedTx.java b/src/main/java/com/conveyal/datatools/editor/datastore/FeedTx.java index 2611c22b0..04760ee2f 100644 --- a/src/main/java/com/conveyal/datatools/editor/datastore/FeedTx.java +++ b/src/main/java/com/conveyal/datatools/editor/datastore/FeedTx.java @@ -1,32 +1,42 @@ package com.conveyal.datatools.editor.datastore; import com.conveyal.datatools.editor.models.transit.*; -import com.conveyal.datatools.manager.models.FeedSource; +import com.conveyal.datatools.editor.utils.GeoUtils; +import com.conveyal.gtfs.GTFSFeed; +import com.conveyal.gtfs.model.CalendarDate; +import com.conveyal.gtfs.model.Entity; +import com.conveyal.gtfs.model.Frequency; +import com.conveyal.gtfs.model.ShapePoint; import com.google.common.base.Function; -import com.google.common.collect.Collections2; import com.google.common.collect.Iterators; import java.time.LocalDate; + +import com.google.common.collect.Maps; +import com.vividsolutions.jts.geom.Coordinate; import org.mapdb.Atomic; import org.mapdb.BTreeMap; import org.mapdb.Bind; import org.mapdb.DB; import org.mapdb.Fun; -import org.mapdb.Fun.Function2; import org.mapdb.Fun.Tuple2; -//import play.i18n.Messages; import com.conveyal.datatools.editor.utils.BindUtils; -import org.mapdb.TxRollbackException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.util.Collection; import java.util.Iterator; +import java.util.Map; import java.util.NavigableSet; import java.util.Set; import java.util.UUID; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +import static com.conveyal.datatools.editor.jobs.ProcessGtfsSnapshotExport.toGtfsDate; /** a transaction in an agency database */ public class FeedTx extends DatabaseTx { + private static final Logger LOG = LoggerFactory.getLogger(FeedTx.class); // primary com.conveyal.datatools.editor.datastores // if you add another, you MUST update SnapshotTx.java // if you don't, not only will your new data not be backed up, IT WILL BE THROWN AWAY WHEN YOU RESTORE! @@ -88,13 +98,13 @@ public class FeedTx extends DatabaseTx { // public Atomic.Boolean editedSinceSnapshot; /** - * Create an agency tx. + * Create a feed tx. */ public FeedTx(DB tx) { this(tx, true); } - /** Create an agency tx, optionally without secondary indices */ + /** Create a feed tx, optionally without secondary indices */ public FeedTx(DB tx, boolean buildSecondaryIndices) { super(tx); @@ -111,8 +121,9 @@ public FeedTx(DB tx, boolean buildSecondaryIndices) { if (buildSecondaryIndices) buildSecondaryIndices(); -// editedSinceSnapshot = tx.getAtomicBoolean("editedSinceSnapshot") == null ? tx.createAtomicBoolean("editedSinceSnapshot", false) : tx.; +// editedSinceSnapshot = tx.getAtomicBoolean("editedSinceSnapshot") == null ? tx.createAtomicBoolean("editedSinceSnapshot", false) : editedSinceSnapshot; } + public void commit () { try { // editedSinceSnapshot.set(true); @@ -123,6 +134,7 @@ public void commit () { } closed = true; } + public void buildSecondaryIndices () { // build secondary indices // we store indices in the mapdb not because we care about persistence, but because then they @@ -168,7 +180,29 @@ public void buildSecondaryIndices () { tripCountByPatternAndCalendar = getMap("tripCountByPatternAndCalendar"); Bind.histogram(trips, tripCountByPatternAndCalendar, (tripId, trip) -> new Tuple2(trip.patternId, trip.calendarId)); - scheduleExceptionCountByDate = getMap("scheduleExceptionCountByDate"); + // getting schedule exception map appears to be causing issues for some feeds + // The names of the code writers have been changed to protect the innocent. + try { + scheduleExceptionCountByDate = getMap("scheduleExceptionCountByDate"); + } catch (RuntimeException e1) { + LOG.error("Error getting scheduleExceptionCountByDate map. Getting a new one."); + int count = 0; + final int NEW_MAP_LIMIT = 100; + while (true) { + try { + scheduleExceptionCountByDate = getMap("scheduleExceptionCountByDateMapDBIsTheWORST" + count); + } catch (RuntimeException e2) { + LOG.error("Error getting {} scheduleExceptionCountByDateMapDBIsTheWORST map. Getting a new one.", count); + count++; + if (count > NEW_MAP_LIMIT) { + LOG.error("Cannot create new map. Reached limit of {}", NEW_MAP_LIMIT); + throw e2; + } + continue; + } + break; + } + } BindUtils.multiHistogram(exceptions, scheduleExceptionCountByDate, (id, ex) -> ex.dates.toArray(new LocalDate[ex.dates.size()])); tripCountByCalendar = getMap("tripCountByCalendar"); @@ -187,37 +221,34 @@ public void buildSecondaryIndices () { public Collection getTripsByPattern(String patternId) { Set> matchedKeys = tripsByTripPattern.subSet(new Tuple2(patternId, null), new Tuple2(patternId, Fun.HI)); - return Collections2.transform(matchedKeys, input -> trips.get(input.b)); + return matchedKeys.stream() + .map(input -> trips.get(input.b)) + .collect(Collectors.toList()); } public Collection getTripsByRoute(String routeId) { Set> matchedKeys = tripsByRoute.subSet(new Tuple2(routeId, null), new Tuple2(routeId, Fun.HI)); - return Collections2.transform(matchedKeys, input -> trips.get(input.b)); + return matchedKeys.stream().map(input -> trips.get(input.b)).collect(Collectors.toList()); } public Collection getTripsByCalendar(String calendarId) { Set> matchedKeys = tripsByCalendar.subSet(new Tuple2(calendarId, null), new Tuple2(calendarId, Fun.HI)); - return Collections2.transform(matchedKeys, input -> trips.get(input.b)); + return matchedKeys.stream().map(input -> trips.get(input.b)).collect(Collectors.toList()); } public Collection getExceptionsByCalendar(String calendarId) { Set> matchedKeys = exceptionsByCalendar.subSet(new Tuple2(calendarId, null), new Tuple2(calendarId, Fun.HI)); - return Collections2.transform(matchedKeys, input -> exceptions.get(input.b)); + return matchedKeys.stream().map(input -> exceptions.get(input.b)).collect(Collectors.toList()); } public Collection getTripsByPatternAndCalendar(String patternId, String calendarId) { Set, String>> matchedKeys = tripsByPatternAndCalendar.subSet(new Tuple2(new Tuple2(patternId, calendarId), null), new Tuple2(new Tuple2(patternId, calendarId), Fun.HI)); -// return Collections2.transform(matchedKeys, input -> trips.get(input.b)); - return Collections2.transform(matchedKeys, new Function, String>, Trip>() { - public Trip apply(Tuple2, String> input) { - return trips.get(input.b); - } - }); + return matchedKeys.stream().map(input -> trips.get(input.b)).collect(Collectors.toList()); } public Collection getStopsWithinBoundingBox (double north, double east, double south, double west) { @@ -229,15 +260,16 @@ public Collection getStopsWithinBoundingBox (double north, double east, do Set, String>> matchedKeys = stopsGix.subSet(new Tuple2(min, null), new Tuple2(max, Fun.HI)); - Collection matchedStops = - Collections2.transform(matchedKeys, input -> stops.get(input.b)); + Collection matchedStops = matchedKeys.stream().map(input -> stops.get(input.b)).collect(Collectors.toList()); return matchedStops; } public Collection getTripPatternsByStop (String id) { Collection> matchedPatterns = tripPatternsByStop.subSet(new Tuple2(id, null), new Tuple2(id, Fun.HI)); - return Collections2.transform(matchedPatterns, input -> tripPatterns.get(input.b)); + return matchedPatterns.stream() + .map(input -> tripPatterns.get(input.b)) + .collect(Collectors.toList()); } /** return the version number of the next snapshot */ @@ -274,7 +306,7 @@ public static String duplicate (String feedId) { } feedCopy.id = newId; -// a2.name = Messages.get("agency.copy-of", a2.name); +// a2.name = Messages.retrieveById("agency.copy-of", a2.name); gtx.feeds.put(feedCopy.id, feedCopy); @@ -425,4 +457,237 @@ public Tuple2 apply(Tuple2 input) { throw new RuntimeException(e); } } + + /** + * Convert Editor MapDB database (snapshot or active buffer) into a {@link com.conveyal.gtfs.GTFSFeed} object. This + * should be run in an asynchronously executed {@link com.conveyal.datatools.common.status.MonitorableJob} + * (see {@link com.conveyal.datatools.editor.jobs.ProcessGtfsSnapshotExport} to avoid consuming resources. + * @return + */ + public GTFSFeed toGTFSFeed(boolean ignoreRouteStatus) { + GTFSFeed feed = new GTFSFeed(); + if (agencies != null) { + LOG.info("Exporting {} agencies", agencies.size()); + for (Agency agency : agencies.values()) { + // if agencyId is null (allowed if there is only a single agency), set to empty string + if (agency.agencyId == null) { + if (feed.agency.containsKey("")) { + LOG.error("Agency with empty id field already exists. Skipping agency {}", agency); + continue; + } else { + agency.agencyId = ""; + } + } + // write the agency.txt entry + feed.agency.put(agency.agencyId, agency.toGtfs()); + } + } else { + LOG.error("Agency table should not be empty!"); + } + + if (fares != null) { + LOG.info("Exporting {} fares", fares.values().size()); + for (Fare fare : fares.values()) { + com.conveyal.gtfs.model.Fare gtfsFare = fare.toGtfs(); + + // write the fares.txt entry + feed.fares.put(fare.gtfsFareId, gtfsFare); + } + } + + // write all of the calendars and calendar dates + if (calendars != null) { + for (ServiceCalendar cal : calendars.values()) { + + int start = toGtfsDate(cal.startDate); + int end = toGtfsDate(cal.endDate); + com.conveyal.gtfs.model.Service gtfsService = cal.toGtfs(start, end); + // note: not using user-specified IDs + + // add calendar dates + if (exceptions != null) { + for (ScheduleException ex : exceptions.values()) { + if (ex.equals(ScheduleException.ExemplarServiceDescriptor.SWAP) && !ex.addedService.contains(cal.id) && !ex.removedService.contains(cal.id)) + // skip swap exception if cal is not referenced by added or removed service + // this is not technically necessary, but the output is cleaner/more intelligible + continue; + + for (LocalDate date : ex.dates) { + if (date.isBefore(cal.startDate) || date.isAfter(cal.endDate)) + // no need to write dates that do not apply + continue; + + CalendarDate cd = new CalendarDate(); + cd.date = date; + cd.service_id = gtfsService.service_id; + cd.exception_type = ex.serviceRunsOn(cal) ? 1 : 2; + + if (gtfsService.calendar_dates.containsKey(date)) + throw new IllegalArgumentException("Duplicate schedule exceptions on " + date.toString()); + + gtfsService.calendar_dates.put(date, cd); + } + } + } + feed.services.put(gtfsService.service_id, gtfsService); + } + } + + Map gtfsRoutes = Maps.newHashMap(); + + // write the routes + if(routes != null) { + LOG.info("Exporting {} routes", routes.size()); + for (Route route : routes.values()) { + // only export approved routes + // TODO: restore route approval check? + if (ignoreRouteStatus || route.status == StatusType.APPROVED) { + com.conveyal.gtfs.model.Agency agency = route.agencyId != null ? agencies.get(route.agencyId).toGtfs() : null; + com.conveyal.gtfs.model.Route gtfsRoute = route.toGtfs(agency); + feed.routes.put(route.getGtfsId(), gtfsRoute); + gtfsRoutes.put(route.id, gtfsRoute); + } else { + LOG.warn("Route {} not approved", route.gtfsRouteId); + } + } + } + + // write the trips on those routes + // we can't use the trips-by-route index because we may be exporting a snapshot database without indices + if(trips != null) { + LOG.info("Exporting {} trips", trips.size()); + for (Trip trip : trips.values()) { + if (!gtfsRoutes.containsKey(trip.routeId)) { + LOG.warn("Trip {} has no matching route. This may be because route {} was not approved", trip, trip.routeId); + continue; + } + + com.conveyal.gtfs.model.Route gtfsRoute = gtfsRoutes.get(trip.routeId); + Route route = routes.get(trip.routeId); + + com.conveyal.gtfs.model.Trip gtfsTrip = new com.conveyal.gtfs.model.Trip(); + + gtfsTrip.block_id = trip.blockId; + gtfsTrip.route_id = gtfsRoute.route_id; + gtfsTrip.trip_id = trip.getGtfsId(); + // TODO: figure out where a "" trip_id might have come from + if (gtfsTrip.trip_id == null || gtfsTrip.trip_id.equals("")) { + LOG.warn("Trip {} has no id for some reason (trip_id = {}). Skipping.", trip, gtfsTrip.trip_id); + continue; + } + // not using custom ids for calendars + gtfsTrip.service_id = feed.services.get(trip.calendarId).service_id; + gtfsTrip.trip_headsign = trip.tripHeadsign; + gtfsTrip.trip_short_name = trip.tripShortName; + + TripPattern pattern = tripPatterns.get(trip.patternId); + + // assign pattern direction if not null + if (pattern.patternDirection != null) { + gtfsTrip.direction_id = pattern.patternDirection.toGtfs(); + } + else if (trip.tripDirection != null) { + gtfsTrip.direction_id = trip.tripDirection.toGtfs(); + } + Tuple2 nextKey = feed.shape_points.ceilingKey(new Tuple2(pattern.id, null)); + if ((nextKey == null || !pattern.id.equals(nextKey.a)) && pattern.shape != null && !pattern.useStraightLineDistances) { + // this shape has not yet been saved + double[] coordDistances = GeoUtils.getCoordDistances(pattern.shape); + + for (int i = 0; i < coordDistances.length; i++) { + Coordinate coord = pattern.shape.getCoordinateN(i); + ShapePoint shape = new ShapePoint(pattern.id, coord.y, coord.x, i + 1, coordDistances[i]); + feed.shape_points.put(new Tuple2(pattern.id, shape.shape_pt_sequence), shape); + } + } + + if (pattern.shape != null && !pattern.useStraightLineDistances) + gtfsTrip.shape_id = pattern.id; + + // prefer trip wheelchair boarding value if available and not UNKNOWN + if (trip.wheelchairBoarding != null && !trip.wheelchairBoarding.equals(AttributeAvailabilityType.UNKNOWN)) { + gtfsTrip.wheelchair_accessible = trip.wheelchairBoarding.toGtfs(); + } else if (route.wheelchairBoarding != null) { + gtfsTrip.wheelchair_accessible = route.wheelchairBoarding.toGtfs(); + } + + feed.trips.put(gtfsTrip.trip_id, gtfsTrip); + + TripPattern patt = tripPatterns.get(trip.patternId); + + Iterator psi = patt.patternStops.iterator(); + + int stopSequence = 1; + + // write the stop times + int cumulativeTravelTime = 0; + for (StopTime st : trip.stopTimes) { + // FIXME: set ID field + TripPatternStop ps = psi.hasNext() ? psi.next() : null; + if (st == null) + continue; + + Stop stop = stops.get(st.stopId); + + if (!st.stopId.equals(ps.stopId)) { + throw new IllegalStateException("Trip " + trip.id + " does not match its pattern!"); + } + + com.conveyal.gtfs.model.StopTime gst = new com.conveyal.gtfs.model.StopTime(); + if (pattern.useFrequency) { + // If parent pattern uses frequencies, use absolute travel/dwell times from pattern + // stops for arrival/departure times. + gst.arrival_time = cumulativeTravelTime = cumulativeTravelTime + ps.defaultTravelTime; + gst.departure_time = cumulativeTravelTime = cumulativeTravelTime + ps.defaultDwellTime; + } else { + // Otherwise, apply trip's stop time arrival/departure times. + gst.arrival_time = st.arrivalTime != null ? st.arrivalTime : Entity.INT_MISSING; + gst.departure_time = st.departureTime != null ? st.departureTime : Entity.INT_MISSING; + } + + if (st.dropOffType != null) + gst.drop_off_type = st.dropOffType.toGtfsValue(); + else if (stop.dropOffType != null) + gst.drop_off_type = stop.dropOffType.toGtfsValue(); + + if (st.pickupType != null) + gst.pickup_type = st.pickupType.toGtfsValue(); + else if (stop.dropOffType != null) + gst.drop_off_type = stop.dropOffType.toGtfsValue(); + + gst.shape_dist_traveled = ps.shapeDistTraveled; + gst.stop_headsign = st.stopHeadsign; + gst.stop_id = stop.getGtfsId(); + + // write the stop as needed + if (!feed.stops.containsKey(gst.stop_id)) { + feed.stops.put(gst.stop_id, stop.toGtfs()); + } + + gst.stop_sequence = stopSequence++; + + if (ps.timepoint != null) + gst.timepoint = ps.timepoint ? 1 : 0; + else + gst.timepoint = Entity.INT_MISSING; + + gst.trip_id = gtfsTrip.trip_id; + + feed.stop_times.put(new Tuple2(gtfsTrip.trip_id, gst.stop_sequence), gst); + } + + // create frequencies as needed + if (trip.useFrequency != null && trip.useFrequency) { + Frequency f = new Frequency(); + f.trip_id = gtfsTrip.trip_id; + f.start_time = trip.startTime; + f.end_time = trip.endTime; + f.exact_times = 0; + f.headway_secs = trip.headway; + feed.frequencies.add(Fun.t2(gtfsTrip.trip_id, f)); + } + } + } + return feed; + } } \ No newline at end of file diff --git a/src/main/java/com/conveyal/datatools/editor/datastore/MigrateToMapDB.java b/src/main/java/com/conveyal/datatools/editor/datastore/MigrateToMapDB.java index ab07aeb16..d5d27401a 100644 --- a/src/main/java/com/conveyal/datatools/editor/datastore/MigrateToMapDB.java +++ b/src/main/java/com/conveyal/datatools/editor/datastore/MigrateToMapDB.java @@ -56,7 +56,7 @@ public class MigrateToMapDB { // /** cache custom calendars, exception ID -> calendar ID*/ // private Multimap exceptionCalendars = HashMultimap.create(); // -// /** route ID -> agency ID, needed because we need the agency ID to get a reference to the route . . . */ +// /** route ID -> agency ID, needed because we need the agency ID to retrieve a reference to the route . . . */ // TLongLongMap routeAgencyMap = new TLongLongHashMap(); // // /** pattern ID -> agency ID */ @@ -114,18 +114,18 @@ public class MigrateToMapDB { // int count = 0; // while (reader.readRecord()) { // Agency a = new Agency(); -// a.id = reader.get("id"); -// a.color = reader.get("color"); +// a.id = reader.retrieve("id"); +// a.color = reader.retrieve("color"); // a.defaultLon = reader.getDouble("defaultlon"); // a.defaultLat = reader.getDouble("defaultlat"); -// a.gtfsAgencyId = reader.get("gtfsagencyid"); -// a.lang = reader.get("lang"); -// a.name = reader.get("name"); -// a.phone = reader.get("phone"); -// a.timezone = reader.get("timezone"); -// a.url = reader.get("url"); +// a.gtfsAgencyId = reader.retrieve("gtfsagencyid"); +// a.lang = reader.retrieve("lang"); +// a.name = reader.retrieve("name"); +// a.phone = reader.retrieve("phone"); +// a.timezone = reader.retrieve("timezone"); +// a.url = reader.retrieve("url"); // // easy to maintain referential integrity; we're retaining DB IDs. -// a.routeTypeId = reader.get("defaultroutetype_id"); +// a.routeTypeId = reader.retrieve("defaultroutetype_id"); // // gtx.feeds.put(a.id, a); // count++; @@ -142,12 +142,12 @@ public class MigrateToMapDB { // int count = 0; // // while (reader.readRecord()) { -// String username = reader.get("username"); +// String username = reader.retrieve("username"); // Boolean admin = reader.getBoolean("admin"); -// String email = reader.get("email"); -// String agencyId = reader.get("agency_id"); +// String email = reader.retrieve("email"); +// String agencyId = reader.retrieve("agency_id"); // Account a = new Account(username, "password", email, admin, agencyId); -// a.password = reader.get("password"); +// a.password = reader.retrieve("password"); // a.active = reader.getBoolean("active"); // a.id = a.username; // @@ -170,23 +170,23 @@ public class MigrateToMapDB { // while (reader.readRecord()) { // Stop s = new Stop(); // s.location = gf.createPoint(new Coordinate(reader.getDouble("lon"), reader.getDouble("lat"))); -// s.agencyId = reader.get("agency_id"); +// s.agencyId = reader.retrieve("agency_id"); // s.bikeParking = reader.getAvail("bikeparking"); // s.carParking = reader.getAvail("carparking"); // s.dropOffType = reader.getPdType("dropofftype"); // s.pickupType = reader.getPdType("pickuptype"); -// s.gtfsStopId = reader.get("gtfsstopid"); +// s.gtfsStopId = reader.retrieve("gtfsstopid"); // s.locationType = reader.getLocationType("locationtype"); // s.majorStop = reader.getBoolean("majorstop"); -// s.parentStation = reader.get("parentstation"); -// s.stopCode = reader.get("stopcode"); -// s.stopIconUrl = reader.get("stopiconurl"); -// s.stopDesc = reader.get("stopdesc"); -// s.stopName = reader.get("stopname"); -// s.stopUrl = reader.get("stopurl"); +// s.parentStation = reader.retrieve("parentstation"); +// s.stopCode = reader.retrieve("stopcode"); +// s.stopIconUrl = reader.retrieve("stopiconurl"); +// s.stopDesc = reader.retrieve("stopdesc"); +// s.stopName = reader.retrieve("stopname"); +// s.stopUrl = reader.retrieve("stopurl"); // s.wheelchairBoarding = reader.getAvail("wheelchairboarding"); -// s.zoneId = reader.get("zoneid"); -// s.id = reader.get("id"); +// s.zoneId = reader.retrieve("zoneid"); +// s.id = reader.retrieve("id"); // // getFeedTx(s.agencyId).stops.put(s.id, s); // count ++; @@ -206,20 +206,20 @@ public class MigrateToMapDB { // // while (reader.readRecord()) { // Route r = new Route(); -// r.id = reader.get("id"); -// r.comments = reader.get("comments"); -// r.gtfsRouteId = reader.get("gtfsrouteid"); -// r.routeColor = reader.get("routecolor"); -// r.routeDesc = reader.get("routedesc"); -// r.routeLongName = reader.get("routelongname"); -// r.routeShortName = reader.get("routeshortname"); -// r.routeTextColor = reader.get("routetextcolor"); -// r.routeUrl = reader.get("routeurl"); -// String status = reader.get("status"); +// r.id = reader.retrieve("id"); +// r.comments = reader.retrieve("comments"); +// r.gtfsRouteId = reader.retrieve("gtfsrouteid"); +// r.routeColor = reader.retrieve("routecolor"); +// r.routeDesc = reader.retrieve("routedesc"); +// r.routeLongName = reader.retrieve("routelongname"); +// r.routeShortName = reader.retrieve("routeshortname"); +// r.routeTextColor = reader.retrieve("routetextcolor"); +// r.routeUrl = reader.retrieve("routeurl"); +// String status = reader.retrieve("status"); // r.status = status != null ? StatusType.valueOf(status) : null; // r.wheelchairBoarding = reader.getAvail("wheelchairboarding"); -// r.agencyId = reader.get("agency_id"); -// r.routeTypeId = reader.get("routetype_id"); +// r.agencyId = reader.retrieve("agency_id"); +// r.routeTypeId = reader.retrieve("routetype_id"); // // // cache the agency ID // routeAgencyMap.put(Long.parseLong(r.id), Long.parseLong(r.agencyId)); @@ -243,7 +243,7 @@ public class MigrateToMapDB { // int count = 0; // // while (reader.readRecord()) { -// shapeCache.put(reader.get("id"), reader.getLineString("shape")); +// shapeCache.put(reader.retrieve("id"), reader.getLineString("shape")); // count++; // } // @@ -265,10 +265,10 @@ public class MigrateToMapDB { // Integer ddt = reader.getInteger("defaultdwelltime"); // tps.defaultDwellTime = ddt != null ? ddt : 0; // tps.timepoint = reader.getBoolean("timepoint"); -// tps.stopId = reader.get("stop_id"); +// tps.stopId = reader.retrieve("stop_id"); // // note: not reading shape_dist_traveled as it was incorrectly computed. We'll recompute at the end. // -// Fun.Tuple2 key = new Fun.Tuple2(reader.get("pattern_id"), reader.getInteger("stopsequence")); +// Fun.Tuple2 key = new Fun.Tuple2(reader.retrieve("pattern_id"), reader.getInteger("stopsequence")); // // // make sure that we don't have a mess on our hands due to data import issues far in the past. // if (patternStopCache.containsKey(key)) { @@ -292,18 +292,18 @@ public class MigrateToMapDB { // // while (reader.readRecord()) { // TripPattern p = new TripPattern(); -// p.id = reader.get("id"); -// p.headsign = reader.get("headsign"); -// p.name = reader.get("name"); -// p.routeId = reader.get("route_id"); -// String shapeId = reader.get("shape_id"); -// p.shape = shapeId != null ? shapeCache.get(shapeId) : null; -// -// // get the pattern stops +// p.id = reader.retrieve("id"); +// p.headsign = reader.retrieve("headsign"); +// p.name = reader.retrieve("name"); +// p.routeId = reader.retrieve("route_id"); +// String shapeId = reader.retrieve("shape_id"); +// p.shape = shapeId != null ? shapeCache.retrieve(shapeId) : null; +// +// // retrieve the pattern stops // p.patternStops = new ArrayList(); // p.patternStops.addAll(patternStopCache.subMap(new Fun.Tuple2(p.id, null), new Fun.Tuple2(p.id, Fun.HI)).values()); // -// p.agencyId = routeAgencyMap.get(Long.parseLong(p.routeId)) + ""; +// p.agencyId = routeAgencyMap.retrieve(Long.parseLong(p.routeId)) + ""; // patternAgencyMap.put(Long.parseLong(p.id), Long.parseLong(p.agencyId)); // // p.calcShapeDistTraveled(getFeedTx(p.agencyId)); @@ -333,12 +333,12 @@ public class MigrateToMapDB { // st.departureTime = reader.getInteger("departuretime"); // // note: not reading shape_dist_traveled as it was incorrectly computed. We'll recompute at the end. // -// st.stopHeadsign = reader.get("stopheadsign"); +// st.stopHeadsign = reader.retrieve("stopheadsign"); // st.dropOffType = reader.getPdType("dropofftype"); // st.pickupType = reader.getPdType("pickuptype"); -// st.stopId = reader.get("stop_id"); +// st.stopId = reader.retrieve("stop_id"); // -// Fun.Tuple2 key = new Fun.Tuple2(reader.get("trip_id"), reader.getInteger("stopsequence")); +// Fun.Tuple2 key = new Fun.Tuple2(reader.retrieve("trip_id"), reader.getInteger("stopsequence")); // // if (stopTimeCache.containsKey(key)) { // throw new IllegalStateException("Duplicate stop times!"); @@ -358,34 +358,34 @@ public class MigrateToMapDB { // // while (reader.readRecord()) { // Trip t = new Trip(); -// t.id = reader.get("id"); -// t.blockId = reader.get("blockid"); +// t.id = reader.retrieve("id"); +// t.blockId = reader.retrieve("blockid"); // t.endTime = reader.getInteger("endtime"); -// t.gtfsTripId = reader.get("gtfstripid"); +// t.gtfsTripId = reader.retrieve("gtfstripid"); // t.headway = reader.getInteger("headway"); // t.invalid = reader.getBoolean("invalid"); // t.startTime = reader.getInteger("starttime"); -// t.tripDescription = reader.get("tripdescription"); -// String dir = reader.get("tripdirection"); +// t.tripDescription = reader.retrieve("tripdescription"); +// String dir = reader.retrieve("tripdirection"); // t.tripDirection = dir != null ? TripDirection.valueOf(dir) : null; -// t.tripHeadsign = reader.get("tripheadsign"); -// t.tripShortName = reader.get("tripshortname"); +// t.tripHeadsign = reader.retrieve("tripheadsign"); +// t.tripShortName = reader.retrieve("tripshortname"); // t.useFrequency = reader.getBoolean("usefrequency"); // t.wheelchairBoarding = reader.getAvail("wheelchairboarding"); -// t.patternId = reader.get("pattern_id"); -// t.routeId = reader.get("route_id"); -// t.calendarId = reader.get("servicecalendar_id"); -// t.agencyId = routeAgencyMap.get(Long.parseLong(t.routeId)) + ""; +// t.patternId = reader.retrieve("pattern_id"); +// t.routeId = reader.retrieve("route_id"); +// t.calendarId = reader.retrieve("servicecalendar_id"); +// t.agencyId = routeAgencyMap.retrieve(Long.parseLong(t.routeId)) + ""; // -// // get stop times +// // retrieve stop times // // make sure we put nulls in as needed for skipped stops // t.stopTimes = new ArrayList(); // // // loop over the pattern stops and find the stop times that match // for (Map.Entry, TripPatternStop> entry : // patternStopCache.subMap(new Fun.Tuple2(t.patternId, null), new Fun.Tuple2(t.patternId, Fun.HI)).entrySet()) { -// // get the appropriate stop time, or null if the stop is skipped -// StopTime st = stopTimeCache.get(new Fun.Tuple2(t.id, entry.getKey().b)); +// // retrieve the appropriate stop time, or null if the stop is skipped +// StopTime st = stopTimeCache.retrieve(new Fun.Tuple2(t.id, entry.getKey().b)); // t.stopTimes.add(st); // // if (st != null) @@ -411,13 +411,13 @@ public class MigrateToMapDB { // // while (reader.readRecord()) { // RouteType rt = new RouteType(); -// rt.id = reader.get("id"); -// rt.description = reader.get("description"); -// String grt = reader.get("gtfsroutetype"); +// rt.id = reader.retrieve("id"); +// rt.description = reader.retrieve("description"); +// String grt = reader.retrieve("gtfsroutetype"); // rt.gtfsRouteType = grt != null ? GtfsRouteType.valueOf(grt) : null; -// String hvt = reader.get("hvtroutetype"); +// String hvt = reader.retrieve("hvtroutetype"); // rt.hvtRouteType = hvt != null ? HvtRouteType.valueOf(hvt) : null; -// rt.localizedVehicleType = reader.get("localizedvehicletype"); +// rt.localizedVehicleType = reader.retrieve("localizedvehicletype"); // gtx.routeTypes.put(rt.id, rt); // count++; // } @@ -433,11 +433,11 @@ public class MigrateToMapDB { // // while (reader.readRecord()) { // ServiceCalendar c = new ServiceCalendar(); -// c.id = reader.get("id"); -// c.description = reader.get("description"); +// c.id = reader.retrieve("id"); +// c.description = reader.retrieve("description"); // c.endDate = reader.getLocalDate("enddate"); // c.startDate = reader.getLocalDate("startdate"); -// c.gtfsServiceId = reader.get("gtfsserviceid"); +// c.gtfsServiceId = reader.retrieve("gtfsserviceid"); // c.monday = reader.getBoolean("monday"); // c.tuesday = reader.getBoolean("tuesday"); // c.wednesday = reader.getBoolean("wednesday"); @@ -445,7 +445,7 @@ public class MigrateToMapDB { // c.friday = reader.getBoolean("friday"); // c.saturday = reader.getBoolean("saturday"); // c.sunday = reader.getBoolean("sunday"); -// c.agencyId = reader.get("agency_id"); +// c.agencyId = reader.retrieve("agency_id"); // // getFeedTx(c.agencyId).calendars.put(c.id, c); // count++; @@ -462,7 +462,7 @@ public class MigrateToMapDB { // int count = 0; // // while (reader.readRecord()) { -// exceptionDates.put(reader.get("scheduleexception_id"), reader.getLocalDate("dates")); +// exceptionDates.put(reader.retrieve("scheduleexception_id"), reader.getLocalDate("dates")); // count++; // } // @@ -477,7 +477,7 @@ public class MigrateToMapDB { // int count = 0; // // while (reader.readRecord()) { -// exceptionCalendars.put(reader.get("scheduleexception_id"), reader.get("customschedule_id")); +// exceptionCalendars.put(reader.retrieveById("scheduleexception_id"), reader.retrieveById("customschedule_id")); // count++; // } // @@ -493,13 +493,13 @@ public class MigrateToMapDB { // // while (reader.readRecord()) { // ScheduleException e = new ScheduleException(); -// e.id = reader.get("id"); -// e.exemplar = ScheduleException.ExemplarServiceDescriptor.valueOf(reader.get("exemplar")); -// e.name = reader.get("name"); -// e.agencyId = reader.get("agency_id"); +// e.id = reader.retrieve("id"); +// e.exemplar = ScheduleException.ExemplarServiceDescriptor.valueOf(reader.retrieve("exemplar")); +// e.name = reader.retrieve("name"); +// e.agencyId = reader.retrieve("agency_id"); // -// e.dates = new ArrayList(exceptionDates.get(e.id)); -// e.customSchedule = new ArrayList(exceptionCalendars.get(e.id)); +// e.dates = new ArrayList(exceptionDates.retrieve(e.id)); +// e.customSchedule = new ArrayList(exceptionCalendars.retrieve(e.id)); // // getFeedTx(e.agencyId).exceptions.put(e.id, e); // count++; @@ -522,7 +522,7 @@ public class MigrateToMapDB { // if (!atxes.containsKey(agencyId)) // atxes.put(agencyId, VersionedDataStore.getFeedTx(agencyId)); // -// return atxes.get(agencyId); +// return atxes.retrieve(agencyId); // } // // private static class DatabaseCsv { @@ -542,8 +542,8 @@ public class MigrateToMapDB { // return reader.readRecord(); // } // -// public String get (String column) throws IOException { -// String ret = reader.get(column); +// public String retrieve (String column) throws IOException { +// String ret = reader.retrieve(column); // if (ret.isEmpty()) // return null; // @@ -552,7 +552,7 @@ public class MigrateToMapDB { // // public Double getDouble(String column) { // try { -// String dbl = reader.get(column); +// String dbl = reader.retrieve(column); // return Double.parseDouble(dbl); // } catch (Exception e) { // return null; @@ -560,7 +560,7 @@ public class MigrateToMapDB { // } // // public StopTimePickupDropOffType getPdType (String column) throws Exception { -// String val = reader.get(column); +// String val = reader.retrieve(column); // // try { // return StopTimePickupDropOffType.valueOf(val); @@ -570,7 +570,7 @@ public class MigrateToMapDB { // } // // public Boolean getBoolean (String column) throws Exception { -// String val = get(column); +// String val = retrieve(column); // // if (val == null) // return null; @@ -587,7 +587,7 @@ public class MigrateToMapDB { // } // // public LineString getLineString (String column) throws Exception { -// String val = reader.get(column); +// String val = reader.retrieve(column); // // try { // return (LineString) new WKTReader().read(val); @@ -597,7 +597,7 @@ public class MigrateToMapDB { // } // // public AttributeAvailabilityType getAvail (String column) throws Exception { -// String val = reader.get(column); +// String val = reader.retrieve(column); // // try { // return AttributeAvailabilityType.valueOf(val); @@ -607,7 +607,7 @@ public class MigrateToMapDB { // } // // public Integer getInteger (String column) throws Exception { -// String val = reader.get(column); +// String val = reader.retrieve(column); // // try { // return Integer.parseInt(val); @@ -617,7 +617,7 @@ public class MigrateToMapDB { // } // // public LocationType getLocationType (String column) throws Exception { -// String val = reader.get(column); +// String val = reader.retrieve(column); // // try { // return LocationType.valueOf(val); @@ -627,7 +627,7 @@ public class MigrateToMapDB { // } // // public LocalDate getLocalDate (String column) throws Exception { -// String val = get(column); +// String val = retrieve(column); // // try { // Matcher m = datePattern.matcher(val); diff --git a/src/main/java/com/conveyal/datatools/editor/datastore/SnapshotTx.java b/src/main/java/com/conveyal/datatools/editor/datastore/SnapshotTx.java index 52893e97d..5ebbb55d0 100644 --- a/src/main/java/com/conveyal/datatools/editor/datastore/SnapshotTx.java +++ b/src/main/java/com/conveyal/datatools/editor/datastore/SnapshotTx.java @@ -63,17 +63,24 @@ public void make (FeedTx master) { */ public List restore (String agencyId) { DB targetTx = VersionedDataStore.getRawFeedTx(agencyId); - + try { + targetTx.getAll(); + } catch (RuntimeException e) { + LOG.error("Target FeedTX for feed restore may be corrupted. Consider wiping feed database editor/$FEED_ID/master.db*", e); + } for (String obj : targetTx.getAll().keySet()) { - if (obj.equals("snapshotVersion") || obj.equals("stops")) + if (obj.equals("snapshotVersion") +// || obj.equals("stops") + ) // except don't overwrite the counter that keeps track of snapshot versions // we also don't overwrite the stops completely, as we need to merge them + // NOTE: we are now overwriting the stops completely... continue; else targetTx.delete(obj); } - int acount, rcount, ccount, ecount, pcount, tcount, fcount; + int acount, rcount, ccount, ecount, pcount, tcount, fcount, scount; if (tx.exists("agencies")) acount = pump(targetTx, "agencies", (BTreeMap) this.getMap("agencies")); @@ -87,6 +94,12 @@ public List restore (String agencyId) { rcount = 0; LOG.info("Restored {} routes", rcount); + if (tx.exists("stops")) + scount = pump(targetTx, "stops", (BTreeMap) this.getMap("stops")); + else + scount = 0; + LOG.info("Restored {} stops", scount); + if (tx.exists("calendars")) ccount = pump(targetTx, "calendars", (BTreeMap) this.getMap("calendars")); else @@ -140,7 +153,7 @@ public List restore (String agencyId) { // for (TripPattern tp : atx.tripPatterns.values()) { // for (TripPatternStop ps : tp.patternStops) { // if (!atx.stops.containsKey(ps.stopId)) { -// Stop stop = oldStops.get(ps.stopId); +// Stop stop = oldStops.retrieve(ps.stopId); // atx.stops.put(ps.stopId, stop); // restoredStops.add(stop); // } @@ -149,7 +162,7 @@ public List restore (String agencyId) { // } // LOG.info("Restored {} deleted stops", restoredStops.size()); // -// atx.commit(); + atx.commit(); // // return restoredStops; return new ArrayList<>(); diff --git a/src/main/java/com/conveyal/datatools/editor/datastore/VersionedDataStore.java b/src/main/java/com/conveyal/datatools/editor/datastore/VersionedDataStore.java index 7b4eec216..91853e5f0 100644 --- a/src/main/java/com/conveyal/datatools/editor/datastore/VersionedDataStore.java +++ b/src/main/java/com/conveyal/datatools/editor/datastore/VersionedDataStore.java @@ -1,9 +1,9 @@ package com.conveyal.datatools.editor.datastore; import com.conveyal.datatools.manager.DataManager; -import com.google.common.collect.Maps; import com.conveyal.datatools.editor.models.Snapshot; import com.conveyal.datatools.editor.models.transit.Stop; +import com.google.common.collect.Maps; import org.mapdb.BTreeMap; import org.mapdb.DB; import org.mapdb.DBMaker; @@ -13,9 +13,11 @@ import com.conveyal.datatools.editor.utils.ClassLoaderSerializer; import java.io.File; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.NavigableSet; +import java.util.concurrent.ConcurrentHashMap; /** * Create a new versioned com.conveyal.datatools.editor.datastore. A versioned data store handles multiple databases, @@ -29,7 +31,8 @@ public class VersionedDataStore { private static File dataDirectory = new File(DataManager.getConfigPropertyAsText("application.data.editor_mapdb")); private static TxMaker globalTxMaker; - private static Map feedTxMakers = Maps.newConcurrentMap(); + // FIXME: is changing from Maps.newConcurrentMap() suitable here? Check with mattwigway. + private static ConcurrentHashMap feedTxMakers = new ConcurrentHashMap<>(); static { File globalDataDirectory = new File(dataDirectory, "global"); @@ -51,7 +54,7 @@ public static GlobalTx getGlobalTx () { /** * Start a transaction in an agency database. No checking is done to ensure the agency exists; - * if it does not you will get a (hopefully) empty DB, unless you've done the same thing previously. + * if it does not you will retrieveById a (hopefully) empty DB, unless you've done the same thing previously. */ public static FeedTx getFeedTx(String feedId) { return new FeedTx(getRawFeedTx(feedId)); @@ -71,6 +74,9 @@ static DB getRawFeedTx(String feedId) { TxMaker agencyTxm = DBMaker.newFileDB(new File(path, "master.db")) .mmapFileEnable() .compressionEnable() + .asyncWriteEnable() + .closeOnJvmShutdown() + .asyncWriteFlushDelay(5) .makeTxMaker(); feedTxMakers.put(feedId, agencyTxm); @@ -81,31 +87,76 @@ static DB getRawFeedTx(String feedId) { return feedTxMakers.get(feedId).makeTx(); } - /** Take a snapshot of an agency database. The snapshot will be saved in the global database. */ + /** + * WARNING: do not use unless you absolutely intend to delete active editor data for a given feedId. + * This function will delete the mapdb files for the specified feedId, but leave the snapshots for + * this feed intact. So this should only really be used for if/when an editor feed becomes corrupted. + * In that case, the steps to follow are: + * 1. Create snapshot of latest changes for feed. + * 2. Call this function. + * 3. Restore latest snapshot (new feed DB will be created where the deleted one once lived). + */ + public static void wipeFeedDB(String feedId) { + File path = new File(dataDirectory, feedId); + String[] extensions = {".db", ".db.p", ".db.t"}; + LOG.warn("Permanently deleting Feed DB for {}", feedId); + + // remove entry for feedId in feedTxMaker + feedTxMakers.remove(feedId); + // delete local cache files (including zip) when feed removed from cache + for (String type : extensions) { + File file = new File(path, "master" + type); + file.delete(); + } + } + public static Snapshot takeSnapshot (String feedId, String name, String comment) { - FeedTx tx = getFeedTx(feedId); - GlobalTx gtx = getGlobalTx(); + return takeSnapshot(feedId, null, name, comment); + } + + /** Take a snapshot of an agency database. The snapshot will be saved in the global database. */ + public static Snapshot takeSnapshot (String feedId, String feedVersionId, String name, String comment) { + FeedTx tx = null; + GlobalTx gtx = null; + boolean transactionCommitError = false; int version = -1; DB snapshot = null; - Snapshot ret = null; + Snapshot ret; try { + tx = getFeedTx(feedId); + gtx = getGlobalTx(); version = tx.getNextSnapshotId(); - - LOG.info("Creating snapshot {} for feed {}", feedId, version); + LOG.info("Creating snapshot {} for feed {}", version, feedId); long startTime = System.currentTimeMillis(); ret = new Snapshot(feedId, version); - if (gtx.snapshots.containsKey(ret.id)) - throw new IllegalStateException("Duplicate snapshot IDs"); + // if we encounter a duplicate snapshot ID, increment until there is a safe one + if (gtx.snapshots.containsKey(ret.id)) { + LOG.error("Duplicate snapshot IDs, incrementing until we have a fresh one."); + while(gtx.snapshots.containsKey(ret.id)) { + version = tx.getNextSnapshotId(); + LOG.info("Attempting to create snapshot {} for feed {}", version, feedId); + ret = new Snapshot(feedId, version); + } + } ret.snapshotTime = System.currentTimeMillis(); + ret.feedVersionId = feedVersionId; ret.name = name; ret.comment = comment; ret.current = true; snapshot = getSnapshotDb(feedId, version, false); + // if snapshot contains maps, increment the version ID until we find a snapshot that is empty + while (snapshot.getAll().size() != 0) { + version = tx.getNextSnapshotId(); + LOG.info("Attempting to create snapshot {} for feed {}", version, feedId); + ret = new Snapshot(feedId, version); + snapshot = getSnapshotDb(feedId, version, false); + } + new SnapshotTx(snapshot).make(tx); // for good measure snapshot.commit(); @@ -113,10 +164,21 @@ public static Snapshot takeSnapshot (String feedId, String name, String comment) gtx.snapshots.put(ret.id, ret); gtx.commit(); - tx.commit(); + + // unfortunately if a mapdb gets corrupted, trying to commit this transaction will cause things + // to go all haywired. Further, if we try to rollback after this commit, the snapshot will fail. + // So we keep track of transactionCommitError here and avoid rollback if an error is encountered. + // This will throw an unclosed transaction error, but since the + try { + tx.commit(); + } catch (Exception e) { + transactionCommitError = true; + LOG.error("Error committing feed transaction", e); + } String snapshotMessage = String.format("Saving snapshot took %.2f seconds", (System.currentTimeMillis() - startTime) / 1000D); LOG.info(snapshotMessage); + return ret; } catch (Exception e) { // clean up @@ -132,12 +194,13 @@ public static Snapshot takeSnapshot (String feedId, String name, String comment) } } } - +// if (tx != null) tx.rollbackIfOpen(); +// gtx.rollbackIfOpen(); // re-throw throw new RuntimeException(e); } finally { - tx.rollbackIfOpen(); - gtx.rollbackIfOpen(); + if (tx != null && !transactionCommitError) tx.rollbackIfOpen(); + if (gtx != null) gtx.rollbackIfOpen(); } } @@ -158,7 +221,7 @@ public static List restore (Snapshot s) { } } - /** get the directory in which to store a snapshot */ + /** retrieveById the directory in which to store a snapshot */ public static DB getSnapshotDb (String feedId, int version, boolean readOnly) { File thisSnapshotDir = getSnapshotDir(feedId, version); thisSnapshotDir.mkdirs(); @@ -178,7 +241,7 @@ public static DB getSnapshotDb (String feedId, int version, boolean readOnly) { return maker.make(); } - /** get the directory in which a snapshot is stored */ + /** retrieveById the directory in which a snapshot is stored */ public static File getSnapshotDir (String feedId, int version) { File agencyDir = new File(dataDirectory, feedId); File snapshotsDir = new File(agencyDir, "snapshots"); @@ -207,7 +270,7 @@ public static class DatabaseTx { /** has this transaction been closed? */ boolean closed = false; - /** Convenience function to get a map */ + /** Convenience function to retrieveById a map */ protected final BTreeMap getMap (String name) { return tx.createTreeMap(name) // use java serialization to allow for schema upgrades @@ -216,7 +279,7 @@ protected final BTreeMap getMap (String name) { } /** - * Convenience function to get a set. These are used as indices so they use the default serialization; + * Convenience function to retrieveById a set. These are used as indices so they use the default serialization; * if we make a schema change we drop and recreate them. */ protected final NavigableSet getSet (String name) { diff --git a/src/main/java/com/conveyal/datatools/editor/jobs/ConvertEditorMapDBToSQL.java b/src/main/java/com/conveyal/datatools/editor/jobs/ConvertEditorMapDBToSQL.java new file mode 100644 index 000000000..68476bd04 --- /dev/null +++ b/src/main/java/com/conveyal/datatools/editor/jobs/ConvertEditorMapDBToSQL.java @@ -0,0 +1,339 @@ +package com.conveyal.datatools.editor.jobs; + +import com.conveyal.datatools.common.status.MonitorableJob; +import com.conveyal.datatools.editor.datastore.FeedTx; +import com.conveyal.datatools.editor.datastore.VersionedDataStore; +import com.conveyal.datatools.editor.models.transit.Route; +import com.conveyal.datatools.editor.models.transit.ScheduleException; +import com.conveyal.datatools.editor.models.transit.ServiceCalendar; +import com.conveyal.datatools.editor.models.transit.Trip; +import com.conveyal.datatools.editor.models.transit.TripPattern; +import com.conveyal.datatools.editor.models.transit.TripPatternStop; +import com.conveyal.datatools.manager.DataManager; +import com.conveyal.datatools.manager.models.FeedSource; +import com.conveyal.datatools.manager.persistence.Persistence; +import com.conveyal.gtfs.GTFSFeed; +import com.conveyal.gtfs.loader.FeedLoadResult; +import com.conveyal.gtfs.loader.JdbcGtfsLoader; +import com.conveyal.gtfs.loader.Table; +import org.apache.commons.dbutils.DbUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.sql.DataSource; +import java.sql.Array; +import java.sql.Connection; +import java.sql.JDBCType; +import java.sql.PreparedStatement; +import java.sql.SQLException; + +import static com.conveyal.gtfs.loader.DateField.GTFS_DATE_FORMATTER; + +public class ConvertEditorMapDBToSQL extends MonitorableJob { + private final String feedId; + private final Integer versionNumber; + private static final Logger LOG = LoggerFactory.getLogger(ConvertEditorMapDBToSQL.class); + private Connection connection; + private DataSource dataSource; + + public ConvertEditorMapDBToSQL(String feedId, Integer versionNumber) { + // FIXME owner and job name + super("owner", "Create snapshot from legacy editor", JobType.CONVERT_EDITOR_MAPDB_TO_SQL); + this.feedId = feedId; + this.versionNumber = versionNumber; + } + + @Override + public void jobLogic() { + try { + // Iterate over the provided snapshots and convert each one. Note: this will skip snapshots for feed IDs that + // don't exist as feed sources in MongoDB. + FeedSource feedSource = Persistence.feedSources.getById(feedId); + if (feedSource == null) { + LOG.warn("Not converting snapshot. Feed source Id {} does not exist in application data", feedId); + return; + } + FeedTx feedTx; + // FIXME: This needs to share a connection with the snapshotter. + // Create connection for each snapshot + // FIXME: use GTFS_DATA_SOURCE + dataSource = DataManager.GTFS_DATA_SOURCE; + connection = dataSource.getConnection(); // DataManager.GTFS_DATA_SOURCE.getConnection(); + + // retrieveById present feed database if no snapshot version provided + boolean setEditorBuffer = false; + if (versionNumber == null) { + setEditorBuffer = true; + feedTx = VersionedDataStore.getFeedTx(feedId); + } + // else retrieveById snapshot version data + else { + feedTx = VersionedDataStore.getFeedTx(feedId, versionNumber); + } + + LOG.info("Converting {}.{} to SQL", feedId, versionNumber); + // Convert mapdb to SQL + FeedLoadResult convertFeedResult = convertFeed(feedId, versionNumber, feedTx); + // Create manager snapshot for storing in feed source. + com.conveyal.datatools.manager.models.Snapshot managerSnapshot = + new com.conveyal.datatools.manager.models.Snapshot( + feedId, versionNumber != null ? versionNumber : 0, "mapdb_editor", convertFeedResult); +// managerSnapshot.dateCreated = + LOG.info("Storing snapshot {}", managerSnapshot.id); + Persistence.snapshots.create(managerSnapshot); + if (setEditorBuffer) { + // If there is no version, that indicates that this was from the editor buffer for that feedId. + // Make this snapshot the editor namespace buffer. + LOG.info("Updating active snapshot to {}", managerSnapshot.id); + FeedSource updatedFeedSource = Persistence.feedSources.updateField( + feedSource.id, "editorNamespace", managerSnapshot.namespace); + LOG.info("Editor namespace: {}", updatedFeedSource.editorNamespace); + } + connection.commit(); + } catch (SQLException e) { + e.printStackTrace(); + try { + connection.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + DbUtils.closeQuietly(connection); + } + } + + /** + * Convert a single MapDB Editor feed (snapshot or no) to a SQL-backed snapshot. + */ + private FeedLoadResult convertFeed(String feedId, Integer version, FeedTx feedTx) throws SQLException { + GTFSFeed feed; + + feed = feedTx.toGTFSFeed(true); + + // STEP 1: Write GTFSFeed into SQL database. There are some gaps remaining after this process wraps up: + // - Routes doesn't have publicly_visible and status fields + // - Patterns do not exist + // - Pattern stops table does not exist, so it needs to be created and populated. + // - FIXME No schedule exceptions.... ugh... + // - Trips need pattern ID + + // FIXME Does FeedLoadResult need to be populated with more info about the load? (Currently it's just + // namespace and load time. + FeedLoadResult feedLoadResult = feed.toSQL(dataSource); + if (feedLoadResult.fatalException != null) { + throw new SQLException(String.format("Fatal exception converting %s.%d to SQL", feedId, version)); + } + String namespace = feedLoadResult.uniqueIdentifier; + + // FIXME: This needs to be done in the same transaction as the above operation. + // Iterate over routes and update + int batchSize = 0; + String tableName = String.join(".", namespace, Table.ROUTES.name); + String updateSql = String.format("update %s set status=?, publicly_visible=? where route_id = ?", tableName); + PreparedStatement updateRouteStatement = connection.prepareStatement(updateSql); + if (feedTx.routes != null) { + LOG.info("Updating status, publicly_visible for {} routes", feedTx.routes.size()); // FIXME NPE if (feedTx.routes != null) + for (com.conveyal.datatools.editor.models.transit.Route route : feedTx.routes.values()) { + // FIXME: Maybe it's risky to update on gtfs route ID (which may not be unique for some feeds). + // Could we alternatively update on ID field (not sure what the value for each route will be after + // insertion)? + updateRouteStatement.setInt(1, route.status == null ? 0 : route.status.toInt()); + int publiclyVisible = route.publiclyVisible == null ? 0 : route.publiclyVisible ? 1 : 0; + updateRouteStatement.setInt(2, publiclyVisible); + updateRouteStatement.setString(3, route.gtfsRouteId); + // FIXME: Do something with the return value? E.g., rollback if it hits more than one route. + // FIXME: Do this in batches? + updateRouteStatement.addBatch(); + batchSize += 1; + batchSize = handleBatchExecution(batchSize, updateRouteStatement); + } + // Handle any remaining updates. + updateRouteStatement.executeBatch(); + } else { + LOG.warn("Skipping routes conversion (feedTx.routes is null)"); + } + + // Annoyingly, a number of fields on the Editor Trip class differ from the gtfs-lib Trip class (e.g., + // patternId and calendarId refer to the editor Model#ID field not the GTFS key field). So we first + // convert the trips to gtfs trips and then insert them into the database. And while we're at it, we do + // this with stop times, too. + // OLD COMMENT: we can't use the trips-by-route index because we may be exporting a snapshot database without indices + if (feedTx.trips != null) { + batchSize = 0; + // Update pattern_id for trips. + String tripsTableName = String.join(".", namespace, Table.TRIPS.name); + LOG.info("Updating pattern_id for {} trips", feedTx.trips.size()); + String updateTripsSql = String.format("update %s set pattern_id=? where trip_id=?", tripsTableName); + PreparedStatement updateTripsStatement = connection.prepareStatement(updateTripsSql); + for (Trip trip : feedTx.trips.values()) { + TripPattern pattern = feedTx.tripPatterns.get(trip.patternId); + // FIXME: Should we exclude patterns from the original insert (GTFSFeed.toSQL)? These pattern IDs + // will not match those found in the GTFSFeed patterns. However, FeedTx.toGTFSFeed doesn't + // actually create patterns, so there are no patterns loaded to begin with. + updateTripsStatement.setString(1, pattern.id); + updateTripsStatement.setString(2, trip.gtfsTripId); + // FIXME: Do something with the return value? E.g., rollback if it hits more than one trip. + // FIXME: Do this in batches? + updateTripsStatement.addBatch(); + batchSize += 1; + // If we've accumulated a lot of prepared statement calls, pass them on to the database backend. + batchSize = handleBatchExecution(batchSize, updateTripsStatement); + // FIXME Need to cherry-pick frequency fixes made for Izmir/WRI + } + // Handle remaining updates. + updateTripsStatement.executeBatch(); + } + + // Pattern stops table has not yet been created because pattern stops do not exist in + // GTFSFeed. NOte, we want this table to be created regardless of whether patterns exist or not. + Table.PATTERN_STOP.createSqlTable(connection, namespace, true); + + // Insert all trip patterns and pattern stops into database (tables have already been created FIXME pattern_stops has not yet been created). + if (feedTx.tripPatterns != null) { + batchSize = 0; + // Handle inserting patterns + PreparedStatement insertPatternStatement = connection.prepareStatement( + Table.PATTERNS.generateInsertSql(namespace, true)); + // Handle inserting pattern stops + PreparedStatement insertPatternStopStatement = connection.prepareStatement( + Table.PATTERN_STOP.generateInsertSql(namespace, true)); + LOG.info("Inserting {} patterns", feedTx.tripPatterns.size()); + for (TripPattern pattern : feedTx.tripPatterns.values()) { + Route route = feedTx.routes.get(pattern.routeId); + insertPatternStatement.setString(1, pattern.id); + insertPatternStatement.setString(2, route.gtfsRouteId); + insertPatternStatement.setString(3, pattern.name); + if (pattern.patternDirection != null) { + insertPatternStatement.setInt(4, pattern.patternDirection.toGtfs()); + } else { + insertPatternStatement.setNull(4, JDBCType.INTEGER.getVendorTypeNumber()); + } + insertPatternStatement.setInt(5, pattern.useFrequency ? 1 : 0); + // Shape ID will match the pattern id for pattern geometries that have been converted to shapes. + // This process happens in FeedTx.toGTFSFeed. + insertPatternStatement.setString(6, pattern.id); + insertPatternStatement.addBatch(); + batchSize += 1; + + int stopSequence = 1; + // LOG.info("Inserting {} pattern stops for pattern {}", pattern.patternStops.size(), pattern.id); + for (TripPatternStop tripPatternStop : pattern.patternStops) { + // TripPatternStop's stop ID needs to be mapped to GTFS stop ID. + // FIXME Possible NPE? + String stopId = feedTx.stops.get(tripPatternStop.stopId).gtfsStopId; + insertPatternStopStatement.setString(1, pattern.id); + insertPatternStopStatement.setInt(2, stopSequence); + insertPatternStopStatement.setString(3, stopId); + insertPatternStopStatement.setInt(4, tripPatternStop.defaultTravelTime); + insertPatternStopStatement.setInt(5, tripPatternStop.defaultDwellTime); + insertPatternStopStatement.setInt(6, 0); + insertPatternStopStatement.setInt(7, 0); + // FIXME: shapeDistTraveled could be null + if (tripPatternStop.shapeDistTraveled == null) { + insertPatternStopStatement.setNull(8, JDBCType.DOUBLE.getVendorTypeNumber()); + } else { + insertPatternStopStatement.setDouble(8, tripPatternStop.shapeDistTraveled); + } + if (tripPatternStop.timepoint == null) { + insertPatternStopStatement.setNull(9, JDBCType.INTEGER.getVendorTypeNumber()); + } else { + insertPatternStopStatement.setInt(9, tripPatternStop.timepoint ? 1 : 0); + } + insertPatternStopStatement.addBatch(); + batchSize += 1; + stopSequence += 1; + // If we've accumulated a lot of prepared statement calls, pass them on to the database backend. + batchSize = handleBatchExecution(batchSize, insertPatternStatement, insertPatternStopStatement); + } + // Handle remaining updates. + insertPatternStatement.executeBatch(); + insertPatternStopStatement.executeBatch(); + } + } + + + // FIXME: Handle calendars/service exceptions.... + // Add service calendars FIXME: delete calendars already in the table? + if (feedTx.calendars != null) { + // Handle inserting pattern stops + PreparedStatement insertCalendar = connection.prepareStatement( + Table.CALENDAR.generateInsertSql(namespace, true)); + batchSize = 0; + LOG.info("Inserting {} calendars", feedTx.calendars.size()); + for (ServiceCalendar cal : feedTx.calendars.values()) { + insertCalendar.setString(1, cal.gtfsServiceId); + insertCalendar.setInt(2, cal.monday ? 1 : 0); + insertCalendar.setInt(3, cal.tuesday ? 1 : 0); + insertCalendar.setInt(4, cal.wednesday ? 1 : 0); + insertCalendar.setInt(5, cal.thursday ? 1 : 0); + insertCalendar.setInt(6, cal.friday ? 1 : 0); + insertCalendar.setInt(7, cal.saturday ? 1 : 0); + insertCalendar.setInt(8, cal.sunday ? 1 : 0); + insertCalendar.setString(9, cal.startDate != null ? cal.startDate.format(GTFS_DATE_FORMATTER) : null); + insertCalendar.setString(10, cal.endDate != null ? cal.endDate.format(GTFS_DATE_FORMATTER) : null); + insertCalendar.setString(11, cal.description); + + insertCalendar.addBatch(); + batchSize += 1; + // If we've accumulated a lot of prepared statement calls, pass them on to the database backend. + batchSize = handleBatchExecution(batchSize, insertCalendar); + } + // Handle remaining updates. + insertCalendar.executeBatch(); + } + + // Create schedule exceptions table. + Table.SCHEDULE_EXCEPTIONS.createSqlTable(connection, namespace, true); + + // Add schedule exceptions (Note: calendar dates may be carried over from GTFSFeed.toSql, but these will + // ultimately be overwritten by schedule exceptions during Editor feed export. + if (feedTx.exceptions != null) { + batchSize = 0; + PreparedStatement insertException = connection.prepareStatement(Table.SCHEDULE_EXCEPTIONS.generateInsertSql(namespace, true)); + LOG.info("Inserting {} schedule exceptions", feedTx.exceptions.size()); + for (ScheduleException ex : feedTx.exceptions.values()) { + String[] dates = ex.dates != null + ? ex.dates.stream() + .map(localDate -> localDate.format(GTFS_DATE_FORMATTER)) + .toArray(String[]::new) + : new String[]{}; + Array datesArray = connection.createArrayOf("text", dates); + Array customArray = connection.createArrayOf("text", ex.customSchedule != null + ? ex.customSchedule.toArray(new String[0]) + : new String[]{}); + Array addedArray = connection.createArrayOf("text", ex.addedService != null + ? ex.addedService.toArray(new String[0]) + : new String[]{}); + Array removedArray = connection.createArrayOf("text", ex.removedService != null + ? ex.removedService.toArray(new String[0]) + : new String[]{}); + insertException.setString(1, ex.name); + insertException.setArray(2, datesArray); + insertException.setInt(3, ex.exemplar.toInt()); + insertException.setArray(4, customArray); + insertException.setArray(5, addedArray); + insertException.setArray(6, removedArray); + + insertException.addBatch(); + batchSize += 1; + // If we've accumulated a lot of prepared statement calls, pass them on to the database backend. + batchSize = handleBatchExecution(batchSize, insertException); + } + + // Handle remaining updates. + insertException.executeBatch(); + } + return feedLoadResult; + } + + private int handleBatchExecution(int batchSize, PreparedStatement ... preparedStatements) throws SQLException { + if (batchSize > JdbcGtfsLoader.INSERT_BATCH_SIZE) { + for (PreparedStatement statement : preparedStatements) { + statement.executeBatch(); + } + return 0; + } else { + return batchSize; + } + } +} \ No newline at end of file diff --git a/src/main/java/com/conveyal/datatools/editor/jobs/CreateSnapshotJob.java b/src/main/java/com/conveyal/datatools/editor/jobs/CreateSnapshotJob.java new file mode 100644 index 000000000..db3238911 --- /dev/null +++ b/src/main/java/com/conveyal/datatools/editor/jobs/CreateSnapshotJob.java @@ -0,0 +1,136 @@ +package com.conveyal.datatools.editor.jobs; + +import com.conveyal.datatools.common.status.MonitorableJob; +import com.conveyal.datatools.manager.DataManager; +import com.conveyal.datatools.manager.models.FeedSource; +import com.conveyal.datatools.manager.models.Snapshot; +import com.conveyal.datatools.manager.persistence.Persistence; +import com.conveyal.gtfs.loader.FeedLoadResult; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Collection; +import java.util.Date; + +import static com.conveyal.gtfs.GTFS.makeSnapshot; + +/** + * Makes a snapshot of a feed in a SQL database. Effectively this is copying all of the tables for a given feed (which + * all exist in a single schema, e.g., abcd_efghijklmnopqurstu.stops) into a new feed/schema namespace. + * + * As far as the logic for managing snapshots in the GTFS editor, here is the basic model we follow, along with some + * ideas for future optimizations to make things quicker for large feeds (which can take a minute or two to copy all of + * the tables -- unfortunately there is nothing in the world of RDBMS that allows for copying tables with records and + * indexes on those records fully intact): + * + * 1. User uploads a GTFS zip file as a feed version. + * 2. User loads that feed version into editor. This initializes a CreateSnapshotJob to create the first working buffer. + * Also, the editorNamespace field is updated to this first buffer namespace (abcd_). + * 3. User makes edits, deletes a route, adds stops etc. + * + * TODO: In the future, we could copy on write to add individual tables as they are edited and piece together a feed. + * + * 4. User makes a new snapshot to save her work. This initializes another CreateSnapshotJob to snapshot the buffer + * (abcd_). Operationally what happens is that the tables are copied to a new namespace (lmno_) and the new namespace + * becomes the working buffer. + * 5. More edits... and, oops, we just deleted everything. + * 6. User needs to restore abcd_, so we make a snapshot of abcd_ (as naamespace wxyz_), which becomes the new working + * buffer. If the user chooses, she can delete lmno_. Otherwise it just stays as a snapshot that the pointer has + * moved away from. + * + * I think how I had been thinking about this at one point was that the newly created tables were the "snapshot," but + * actually in this model the namespace being copied becomes the immutable "snapshot" and the buffer is the new + * namespace. + * + * 2. 4. + * ____________ ____________ ____________ + * | | | | | | + * | | | | | | + * | | 3. | | 5. | | + * | abcd_ | --> edits --> | lmno_ | --> more edits on lmno_ --> restore abcd_ --> | wxyz_ | + * | | | | | | + * | | | | | | + * |____________| |____________| |____________| + */ +public class CreateSnapshotJob extends MonitorableJob { + private static final Logger LOG = LoggerFactory.getLogger(CreateSnapshotJob.class); + private final String namespace; + private final boolean updateBuffer; + private final boolean storeSnapshot; + private final boolean preserveBuffer; + private Snapshot snapshot; + private FeedSource feedSource; + + public CreateSnapshotJob(Snapshot snapshot, boolean updateBufferNamespace, boolean storeSnapshot, boolean preserveBufferAsSnapshot) { + super(snapshot.userId, "Creating snapshot for " + snapshot.feedSourceId, JobType.CREATE_SNAPSHOT); + this.namespace = snapshot.snapshotOf; + this.snapshot = snapshot; + this.updateBuffer = updateBufferNamespace; + this.storeSnapshot = storeSnapshot; + this.preserveBuffer = preserveBufferAsSnapshot; + status.update(false, "Initializing...", 0); + } + + @JsonProperty + public String getFeedSourceId () { + return snapshot.feedSourceId; + } + + @Override + public void jobLogic() { + // Get count of snapshots to set new version number. + feedSource = Persistence.feedSources.getById(snapshot.feedSourceId); + // Update job name to use feed source name (rather than ID). + this.name = String.format("Creating snapshot for %s", feedSource.name); + Collection existingSnapshots = feedSource.retrieveSnapshots(); + int version = existingSnapshots.size(); + status.update(false, "Creating snapshot...", 20); + FeedLoadResult loadResult = makeSnapshot(namespace, DataManager.GTFS_DATA_SOURCE); + snapshot.version = version; + snapshot.namespace = loadResult.uniqueIdentifier; + snapshot.feedLoadResult = loadResult; + if (snapshot.name == null) { + snapshot.generateName(); + } + snapshot.snapshotTime = loadResult.completionTime; + } + + @Override + public void jobFinished () { + if (!status.error) { + if (storeSnapshot) { + LOG.info("Storing snapshot {} for feed source {}", snapshot.id, feedSource.id); + Persistence.snapshots.create(snapshot); + } + if (preserveBuffer) { + // Preserve the existing buffer as a snapshot if requested. This is essentially a shorthand for creating + // a snapshot and then separately loading something new into the buffer. It can be thought of as an + // autosave. FIXME: the buffer would still exist even if not "preserved" here. Should it be deleted if + // requester opts to not preserve it? + if (feedSource.editorNamespace == null) { + LOG.error("Cannot preserve snapshot with null namespace for feed source {}", feedSource.id); + } else { + LOG.info("Autosaving feed source {} editor state {} as snapshot", feedSource.id, feedSource.editorNamespace); + Snapshot preservedBuffer = new Snapshot("Autosave " + new Date().toString(), feedSource.id, null); + preservedBuffer.namespace = feedSource.editorNamespace; + Persistence.snapshots.create(preservedBuffer); + } + } + if (updateBuffer) { + // Update working buffer to the newly created namespace. The "old" namespace will become the immutable + // snapshot. Restoring the snapshot is a matter of making a new snapshot and updating the buffer pointer + // to the new namespace, but we also will preserve the snapshot. + LOG.info("Updating feed source {} active snapshot to {}", feedSource.id, snapshot.namespace); + // If creating the initial snapshot for the editor buffer, set the editor namespace field to + // the new feed's namespace. + Persistence.feedSources.updateField( + feedSource.id, + "editorNamespace", + snapshot.namespace + ); + } + status.update(false, "Created snapshot!", 100, true); + } + } +} diff --git a/src/main/java/com/conveyal/datatools/editor/jobs/ExportSnapshotToGTFSJob.java b/src/main/java/com/conveyal/datatools/editor/jobs/ExportSnapshotToGTFSJob.java new file mode 100644 index 000000000..1d09113b5 --- /dev/null +++ b/src/main/java/com/conveyal/datatools/editor/jobs/ExportSnapshotToGTFSJob.java @@ -0,0 +1,79 @@ +package com.conveyal.datatools.editor.jobs; + +import com.conveyal.datatools.common.status.MonitorableJob; +import com.conveyal.datatools.manager.DataManager; +import com.conveyal.datatools.manager.models.FeedVersion; +import com.conveyal.datatools.manager.models.Snapshot; +import com.conveyal.datatools.manager.persistence.FeedStore; +import com.conveyal.gtfs.loader.FeedLoadResult; +import com.conveyal.gtfs.loader.JdbcGtfsExporter; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; + +public class ExportSnapshotToGTFSJob extends MonitorableJob { + + private static final Logger LOG = LoggerFactory.getLogger(ExportSnapshotToGTFSJob.class); + private final Snapshot snapshot; + private final String feedVersionId; + + public ExportSnapshotToGTFSJob(String owner, Snapshot snapshot, String feedVersionId) { + super(owner, "Exporting snapshot " + snapshot.name, JobType.EXPORT_SNAPSHOT_TO_GTFS); + this.snapshot = snapshot; + this.feedVersionId = feedVersionId; + } + + public ExportSnapshotToGTFSJob(String owner, Snapshot snapshot) { + this(owner, snapshot, null); + } + + @JsonProperty + public Snapshot getSnapshot () { + return snapshot; + } + + @Override + public void jobLogic() { + File tempFile; + try { + tempFile = File.createTempFile("snapshot", "zip"); + } catch (IOException e) { + e.printStackTrace(); + status.fail("Error creating local file for snapshot.", e); + return; + } + JdbcGtfsExporter exporter = new JdbcGtfsExporter(snapshot.namespace, tempFile.getAbsolutePath(), DataManager.GTFS_DATA_SOURCE, true); + FeedLoadResult result = exporter.exportTables(); + if (result.fatalException != null) { + String message = String.format("Error (%s) encountered while exporting database tables.", result.fatalException); + LOG.error(message); + status.fail(message); + } + + // Override snapshot ID if exporting feed for use as new feed version. + String filename = feedVersionId != null ? feedVersionId : snapshot.id + ".zip"; + String bucketPrefix = feedVersionId != null ? "gtfs" : "snapshots"; + // FIXME: replace with use of refactored FeedStore. + // Store the project merged zip locally or on s3 + if (DataManager.useS3) { + String s3Key = String.format("%s/%s", bucketPrefix, filename); + FeedStore.s3Client.putObject(DataManager.feedBucket, s3Key, tempFile); + LOG.info("Storing snapshot GTFS at s3://{}/{}", DataManager.feedBucket, s3Key); + } else { + try { + FeedVersion.feedStore.newFeed(filename, new FileInputStream(tempFile), null); + } catch (FileNotFoundException e) { + e.printStackTrace(); + status.fail("Could not export snapshot to GTFS."); + LOG.error("Could not store feed for snapshot {}", snapshot.id); + } + } + // Delete snapshot temp file. + tempFile.delete(); + } +} diff --git a/src/main/java/com/conveyal/datatools/editor/jobs/ProcessGtfsSnapshotExport.java b/src/main/java/com/conveyal/datatools/editor/jobs/ProcessGtfsSnapshotExport.java index 5da7b5ae9..d4450cc59 100755 --- a/src/main/java/com/conveyal/datatools/editor/jobs/ProcessGtfsSnapshotExport.java +++ b/src/main/java/com/conveyal/datatools/editor/jobs/ProcessGtfsSnapshotExport.java @@ -1,42 +1,33 @@ package com.conveyal.datatools.editor.jobs; import com.beust.jcommander.internal.Lists; +import com.conveyal.datatools.common.status.MonitorableJob; import com.conveyal.gtfs.GTFSFeed; -import com.conveyal.gtfs.model.CalendarDate; -import com.conveyal.gtfs.model.Entity; -import com.conveyal.gtfs.model.Frequency; -import com.conveyal.gtfs.model.Shape; -import com.conveyal.gtfs.model.ShapePoint; -import com.google.common.collect.Maps; -import com.vividsolutions.jts.geom.Coordinate; import com.conveyal.datatools.editor.datastore.FeedTx; import com.conveyal.datatools.editor.datastore.GlobalTx; import com.conveyal.datatools.editor.datastore.VersionedDataStore; import com.conveyal.datatools.editor.models.Snapshot; -import com.conveyal.datatools.editor.models.transit.*; + import java.time.LocalDate; -import org.mapdb.Fun; import org.mapdb.Fun.Tuple2; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.conveyal.datatools.editor.utils.GeoUtils; import java.io.File; import java.util.Arrays; import java.util.Collection; -import java.util.Iterator; -import java.util.Map; -public class ProcessGtfsSnapshotExport implements Runnable { +public class ProcessGtfsSnapshotExport extends MonitorableJob { public static final Logger LOG = LoggerFactory.getLogger(ProcessGtfsSnapshotExport.class); private Collection> snapshots; private File output; - private LocalDate startDate; - private LocalDate endDate; +// private LocalDate startDate; +// private LocalDate endDate; /** Export the named snapshots to GTFS */ public ProcessGtfsSnapshotExport(Collection> snapshots, File output, LocalDate startDate, LocalDate endDate) { + super("application", "Exporting snapshots to GTFS", JobType.PROCESS_SNAPSHOT_EXPORT); this.snapshots = snapshots; this.output = output; // this.startDate = startDate; @@ -48,6 +39,7 @@ public ProcessGtfsSnapshotExport(Collection> snapshots, * method have a different erasure from the other */ public ProcessGtfsSnapshotExport(Collection agencies, File output, LocalDate startDate, LocalDate endDate, boolean isagency) { + super("application", "Exporting snapshots to GTFS", JobType.PROCESS_SNAPSHOT_EXPORT); this.snapshots = Lists.newArrayList(agencies.size()); for (String agency : agencies) { @@ -68,8 +60,8 @@ public ProcessGtfsSnapshotExport (Snapshot snapshot, File output) { } @Override - public void run() { - GTFSFeed feed = new GTFSFeed(); + public void jobLogic() { + GTFSFeed feed = null; GlobalTx gtx = VersionedDataStore.getGlobalTx(); FeedTx feedTx = null; @@ -78,220 +70,16 @@ public void run() { for (Tuple2 ssid : snapshots) { String feedId = ssid.a; - feedTx = VersionedDataStore.getFeedTx(feedId); - Collection agencies = feedTx.agencies.values(); - - for (Agency agency : agencies) { - com.conveyal.gtfs.model.Agency gtfsAgency = agency.toGtfs(); - LOG.info("Exporting agency {}", gtfsAgency); - - // write the feeds.txt entry - feed.agency.put(agency.agencyId, agency.toGtfs()); - } - - Collection fares = feedTx.fares.values(); - - for (Fare fare : fares) { - com.conveyal.gtfs.model.Fare gtfsFare = fare.toGtfs(); - LOG.info("Exporting fare {}", gtfsFare); - - // write the feeds.txt entry - feed.fares.put(fare.gtfsFareId, gtfsFare); - } - - // write all of the calendars and calendar dates - if (feedTx.calendars != null) { - for (ServiceCalendar cal : feedTx.calendars.values()) { - - com.conveyal.gtfs.model.Service gtfsService = cal.toGtfs(toGtfsDate(cal.startDate), toGtfsDate(cal.endDate)); - // note: not using user-specified IDs - - // add calendar dates - if (feedTx.exceptions != null) { - for (ScheduleException ex : feedTx.exceptions.values()) { - if (ex.equals(ScheduleException.ExemplarServiceDescriptor.SWAP) && !ex.addedService.contains(cal.id) && !ex.removedService.contains(cal.id)) - // skip swap exception if cal is not referenced by added or removed service - // this is not technically necessary, but the output is cleaner/more intelligible - continue; - - for (LocalDate date : ex.dates) { - if (date.isBefore(cal.startDate) || date.isAfter(cal.endDate)) - // no need to write dates that do not apply - continue; - - CalendarDate cd = new CalendarDate(); - cd.date = date; - cd.service_id = gtfsService.service_id; - cd.exception_type = ex.serviceRunsOn(cal) ? 1 : 2; - - if (gtfsService.calendar_dates.containsKey(date)) - throw new IllegalArgumentException("Duplicate schedule exceptions on " + date.toString()); - - gtfsService.calendar_dates.put(date, cd); - } - } - } - - feed.services.put(gtfsService.service_id, gtfsService); - } - } - - Map gtfsRoutes = Maps.newHashMap(); - - // write the routes - if(feedTx.routes != null) { - for (Route route : feedTx.routes.values()) { - // only export approved routes - // TODO: restore route approval check? - //if (route.status == StatusType.APPROVED) { - - com.conveyal.gtfs.model.Route gtfsRoute = route.toGtfs(feedTx.agencies.get(route.agencyId).toGtfs(), gtx); - - feed.routes.put(route.getGtfsId(), gtfsRoute); - - gtfsRoutes.put(route.id, gtfsRoute); - //} - } + // retrieveById present feed database if no snapshot version provided + if (ssid.b == null) { + feedTx = VersionedDataStore.getFeedTx(feedId); } - - // write the trips on those routes - // we can't use the trips-by-route index because we may be exporting a snapshot database without indices - if(feedTx.trips != null) { - for (Trip trip : feedTx.trips.values()) { - if (!gtfsRoutes.containsKey(trip.routeId)) { - LOG.warn("Trip {} has not matching route", trip); - continue; - } - - com.conveyal.gtfs.model.Route gtfsRoute = gtfsRoutes.get(trip.routeId); - Route route = feedTx.routes.get(trip.routeId); - - com.conveyal.gtfs.model.Trip gtfsTrip = new com.conveyal.gtfs.model.Trip(); - - gtfsTrip.block_id = trip.blockId; - gtfsTrip.route_id = gtfsRoute.route_id; - gtfsTrip.trip_id = trip.getGtfsId(); - // not using custom ids for calendars - gtfsTrip.service_id = feed.services.get(trip.calendarId).service_id; - gtfsTrip.trip_headsign = trip.tripHeadsign; - gtfsTrip.trip_short_name = trip.tripShortName; - - TripPattern pattern = feedTx.tripPatterns.get(trip.patternId); - - // assign pattern direction if not null - if (pattern.patternDirection != null) { - gtfsTrip.direction_id = pattern.patternDirection.toGtfs(); - } - else { - gtfsTrip.direction_id = trip.tripDirection.toGtfs(); - } - Tuple2 nextKey = feed.shape_points.ceilingKey(new Tuple2(pattern.id, null)); - if ((nextKey == null || !pattern.id.equals(nextKey.a)) && pattern.shape != null && !pattern.useStraightLineDistances) { - // this shape has not yet been saved - double[] coordDistances = GeoUtils.getCoordDistances(pattern.shape); - - for (int i = 0; i < coordDistances.length; i++) { - Coordinate coord = pattern.shape.getCoordinateN(i); - ShapePoint shape = new ShapePoint(pattern.id, coord.y, coord.x, i + 1, coordDistances[i]); - feed.shape_points.put(new Tuple2(pattern.id, shape.shape_pt_sequence), shape); - } - } - - if (pattern.shape != null && !pattern.useStraightLineDistances) - gtfsTrip.shape_id = pattern.id; - - if (trip.wheelchairBoarding != null) { - if (trip.wheelchairBoarding.equals(AttributeAvailabilityType.AVAILABLE)) - gtfsTrip.wheelchair_accessible = 1; - - else if (trip.wheelchairBoarding.equals(AttributeAvailabilityType.UNAVAILABLE)) - gtfsTrip.wheelchair_accessible = 2; - - else - gtfsTrip.wheelchair_accessible = 0; - - } else if (route.wheelchairBoarding != null) { - if (route.wheelchairBoarding.equals(AttributeAvailabilityType.AVAILABLE)) - gtfsTrip.wheelchair_accessible = 1; - - else if (route.wheelchairBoarding.equals(AttributeAvailabilityType.UNAVAILABLE)) - gtfsTrip.wheelchair_accessible = 2; - - else - gtfsTrip.wheelchair_accessible = 0; - - } - - feed.trips.put(gtfsTrip.trip_id, gtfsTrip); - - TripPattern patt = feedTx.tripPatterns.get(trip.patternId); - - Iterator psi = patt.patternStops.iterator(); - - int stopSequence = 1; - - // write the stop times - for (StopTime st : trip.stopTimes) { - TripPatternStop ps = psi.next(); - if (st == null) - continue; - - Stop stop = feedTx.stops.get(st.stopId); - - if (!st.stopId.equals(ps.stopId)) { - throw new IllegalStateException("Trip " + trip.id + " does not match its pattern!"); - } - - com.conveyal.gtfs.model.StopTime gst = new com.conveyal.gtfs.model.StopTime(); - gst.arrival_time = st.arrivalTime != null ? st.arrivalTime : Entity.INT_MISSING; - gst.departure_time = st.departureTime != null ? st.departureTime : Entity.INT_MISSING; - - if (st.dropOffType != null) - gst.drop_off_type = st.dropOffType.toGtfsValue(); - else if (stop.dropOffType != null) - gst.drop_off_type = stop.dropOffType.toGtfsValue(); - - if (st.pickupType != null) - gst.pickup_type = st.pickupType.toGtfsValue(); - else if (stop.dropOffType != null) - gst.drop_off_type = stop.dropOffType.toGtfsValue(); - - gst.shape_dist_traveled = ps.shapeDistTraveled; - gst.stop_headsign = st.stopHeadsign; - gst.stop_id = stop.getGtfsId(); - - // write the stop as needed - if (!feed.stops.containsKey(gst.stop_id)) { - feed.stops.put(gst.stop_id, stop.toGtfs()); - } - - gst.stop_sequence = stopSequence++; - - if (ps.timepoint != null) - gst.timepoint = ps.timepoint ? 1 : 0; - else - gst.timepoint = Entity.INT_MISSING; - - gst.trip_id = gtfsTrip.trip_id; - - feed.stop_times.put(new Tuple2(gtfsTrip.trip_id, gst.stop_sequence), gst); - } - - // create frequencies as needed - if (trip.useFrequency != null && trip.useFrequency) { - Frequency f = new Frequency(); - f.trip_id = gtfsTrip.trip_id; - f.start_time = trip.startTime; - f.end_time = trip.endTime; - f.exact_times = 0; - f.headway_secs = trip.headway; - feed.frequencies.add(Fun.t2(gtfsTrip.trip_id, f)); - } - } + // else retrieveById snapshot version data + else { + feedTx = VersionedDataStore.getFeedTx(feedId, ssid.b); } + feed = feedTx.toGTFSFeed(false); } - - feed.toFile(output.getAbsolutePath()); } finally { gtx.rollbackIfOpen(); diff --git a/src/main/java/com/conveyal/datatools/editor/jobs/ProcessGtfsSnapshotMerge.java b/src/main/java/com/conveyal/datatools/editor/jobs/ProcessGtfsSnapshotMerge.java index a93bce047..23816dc5f 100755 --- a/src/main/java/com/conveyal/datatools/editor/jobs/ProcessGtfsSnapshotMerge.java +++ b/src/main/java/com/conveyal/datatools/editor/jobs/ProcessGtfsSnapshotMerge.java @@ -4,57 +4,42 @@ import com.conveyal.datatools.editor.datastore.FeedTx; import com.conveyal.datatools.editor.models.Snapshot; import com.conveyal.datatools.editor.models.transit.Agency; -import com.conveyal.datatools.editor.models.transit.Fare; +import com.conveyal.datatools.editor.models.transit.EditorFeed; +import com.conveyal.datatools.editor.models.transit.GtfsRouteType; import com.conveyal.datatools.editor.models.transit.Route; +import com.conveyal.datatools.editor.models.transit.RouteType; +import com.conveyal.datatools.editor.models.transit.ServiceCalendar; import com.conveyal.datatools.editor.models.transit.Stop; -import com.conveyal.datatools.editor.models.transit.StopTime; -import com.conveyal.datatools.editor.models.transit.Trip; import com.conveyal.datatools.manager.models.FeedVersion; -import com.conveyal.gtfs.GTFSFeed; -import com.conveyal.gtfs.model.*; +import com.conveyal.gtfs.loader.Feed; import com.google.common.collect.Maps; -import com.google.common.collect.Sets; -import com.google.common.primitives.Ints; import com.vividsolutions.jts.geom.Envelope; import com.vividsolutions.jts.geom.GeometryFactory; -import com.vividsolutions.jts.geom.LineString; import com.vividsolutions.jts.geom.PrecisionModel; import com.conveyal.datatools.editor.datastore.GlobalTx; import com.conveyal.datatools.editor.datastore.VersionedDataStore; import gnu.trove.map.TIntObjectMap; import gnu.trove.map.hash.TIntObjectHashMap; -import com.conveyal.datatools.editor.models.transit.*; -import org.joda.time.DateTimeConstants; import java.awt.geom.Rectangle2D; -import java.time.LocalDate; -import org.mapdb.DBMaker; -import org.mapdb.Fun; + import org.mapdb.Fun.Tuple2; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -//import play.i18n.Messages; -import java.io.File; import java.util.*; -import java.util.Map.Entry; - -import static spark.Spark.halt; - public class ProcessGtfsSnapshotMerge extends MonitorableJob { public static final Logger LOG = LoggerFactory.getLogger(ProcessGtfsSnapshotMerge.class); /** map from GTFS agency IDs to Agencies */ - private Map agencyIdMap = new HashMap(); - private Map routeIdMap = new HashMap(); + private Map agencyIdMap = new HashMap<>(); + private Map routeIdMap = new HashMap<>(); /** map from (gtfs stop ID, database agency ID) -> stop */ private Map, Stop> stopIdMap = Maps.newHashMap(); - private TIntObjectMap routeTypeIdMap = new TIntObjectHashMap(); - private Map shapes = DBMaker.newTempHashMap(); + private TIntObjectMap routeTypeIdMap = new TIntObjectHashMap<>(); - private GTFSFeed input; - private Status status; - private EditorFeed feed; + private Feed inputFeedTables; + private EditorFeed editorFeed; public FeedVersion feedVersion; @@ -63,14 +48,13 @@ public class ProcessGtfsSnapshotMerge extends MonitorableJob { }*/ public ProcessGtfsSnapshotMerge (FeedVersion feedVersion, String owner) { - super(owner, "Creating snapshot for " + feedVersion.getFeedSource().name, JobType.PROCESS_SNAPSHOT); + super(owner, "Creating snapshot for " + feedVersion.parentFeedSource().name, JobType.PROCESS_SNAPSHOT_MERGE); this.feedVersion = feedVersion; - status = new Status(); - status.message = "Waiting to begin job..."; - status.percentComplete = 0; + status.update(false, "Waiting to begin job...", 0); LOG.info("GTFS Snapshot Merge for feedVersion {}", feedVersion.id); } - public void run () { + + public void jobLogic () { long agencyCount = 0; long routeCount = 0; long stopCount = 0; @@ -78,7 +62,6 @@ public void run () { long tripCount = 0; long shapePointCount = 0; long serviceCalendarCount = 0; - long shapeCount = 0; long fareCount = 0; GlobalTx gtx = VersionedDataStore.getGlobalTx(); @@ -86,12 +69,12 @@ public void run () { // create a new feed based on this version FeedTx feedTx = VersionedDataStore.getFeedTx(feedVersion.feedSourceId); - feed = new EditorFeed(); - feed.setId(feedVersion.feedSourceId); - Rectangle2D bounds = feedVersion.getValidationSummary().bounds; + editorFeed = new EditorFeed(); + editorFeed.setId(feedVersion.feedSourceId); + Rectangle2D bounds = feedVersion.validationResult.fullBounds.toRectangle2D(); if (bounds != null) { - feed.defaultLat = bounds.getCenterY(); - feed.defaultLon = bounds.getCenterX(); + editorFeed.defaultLat = bounds.getCenterY(); + editorFeed.defaultLon = bounds.getCenterX(); } @@ -111,14 +94,14 @@ public void run () { for(String key : feedTx.trips.keySet()) feedTx.trips.remove(key); LOG.info("Cleared old data"); - // input = feedVersion.getGtfsFeed(); - // TODO: use GtfsCache? synchronized (status) { status.message = "Loading GTFS file..."; status.percentComplete = 5; } - input = feedVersion.getGtfsFeed(); - if(input == null) return; + + // retrieveById Feed connection to SQL tables for the feed version + inputFeedTables = feedVersion.retrieveFeed(); + if(inputFeedTables == null) return; LOG.info("GtfsImporter: importing feed..."); synchronized (status) { @@ -126,20 +109,23 @@ public void run () { status.percentComplete = 8; } // load feed_info.txt - if(input.feedInfo.size() > 0) { - FeedInfo feedInfo = input.feedInfo.values().iterator().next(); - feed.feedPublisherName = feedInfo.feed_publisher_name; - feed.feedPublisherUrl = feedInfo.feed_publisher_url; - feed.feedLang = feedInfo.feed_lang; - feed.feedEndDate = feedInfo.feed_end_date; - feed.feedStartDate = feedInfo.feed_start_date; - feed.feedVersion = feedInfo.feed_version; - } - gtx.feeds.put(feedVersion.feedSourceId, feed); + // FIXME add back in feed info!! +// if(inputFeedTables.feedInfo.size() > 0) { +// FeedInfo feedInfo = input.feedInfo.values().iterator().next(); +// editorFeed.feedPublisherName = feedInfo.feed_publisher_name; +// editorFeed.feedPublisherUrl = feedInfo.feed_publisher_url; +// editorFeed.feedLang = feedInfo.feed_lang; +// editorFeed.feedEndDate = feedInfo.feed_end_date; +// editorFeed.feedStartDate = feedInfo.feed_start_date; +// editorFeed.feedVersion = feedInfo.feed_version; +// } + gtx.feeds.put(feedVersion.feedSourceId, editorFeed); // load the GTFS agencies - for (com.conveyal.gtfs.model.Agency gtfsAgency : input.agency.values()) { - Agency agency = new Agency(gtfsAgency, feed); + Iterator agencyIterator = inputFeedTables.agencies.iterator(); + while (agencyIterator.hasNext()) { + com.conveyal.gtfs.model.Agency gtfsAgency = agencyIterator.next(); + Agency agency = new Agency(gtfsAgency, editorFeed); // don't save the agency until we've come up with the stop centroid, below. agencyCount++; @@ -162,7 +148,7 @@ public void run () { } // TODO: remove stop ownership inference entirely? // infer agency ownership of stops, if there are multiple feeds - SortedSet> stopsByAgency = inferAgencyStopOwnership(); +// SortedSet> stopsByAgency = inferAgencyStopOwnership(); // build agency centroids as we go // note that these are not actually centroids, but the center of the extent of the stops . . . @@ -173,35 +159,11 @@ public void run () { } GeometryFactory geometryFactory = new GeometryFactory(new PrecisionModel(), 4326); - for (com.conveyal.gtfs.model.Stop gtfsStop : input.stops.values()) { - Stop stop = new Stop(gtfsStop, geometryFactory, feed); + for (com.conveyal.gtfs.model.Stop gtfsStop : inputFeedTables.stops) { + Stop stop = new Stop(gtfsStop, geometryFactory, editorFeed); feedTx.stops.put(stop.id, stop); - stopIdMap.put(new Tuple2(gtfsStop.stop_id, feed.id), stop); + stopIdMap.put(new Tuple2(gtfsStop.stop_id, editorFeed.id), stop); stopCount++; - - /* - // duplicate the stop for all of the feeds by which it is used - Collection agencies = Collections2.transform( - stopsByAgency.subSet(new Tuple2(gtfsStop.stop_id, null), new Tuple2(gtfsStop.stop_id, Fun.HI)), - new Function, Agency>() { - - @Override - public Agency apply(Tuple2 input) { - // TODO Auto-generated method stub - return agencyIdMap.get(input.b); - } - }); - - // impossible to tell to whom unused stops belong, so give them to everyone - if (agencies.size() == 0) - agencies = agencyIdMap.values(); - - for (Agency agency : agencies) { - Stop stop = new Stop(gtfsStop, geometryFactory, agency); - agencyTxs.get(agency.id).stops.put(stop.id, stop); - stopIdMap.put(new Tuple2(gtfsStop.stop_id, agency.id), stop); - stopEnvelopes.get(agency.id).expandToInclude(gtfsStop.stop_lon, gtfsStop.stop_lat); - }*/ } LOG.info("Stops loaded: " + stopCount); @@ -215,19 +177,17 @@ public Agency apply(Tuple2 input) { status.percentComplete = 30; } // import routes - for (com.conveyal.gtfs.model.Route gtfsRoute : input.routes.values()) { + for (com.conveyal.gtfs.model.Route gtfsRoute : inputFeedTables.routes) { Agency agency = agencyIdMap.get(gtfsRoute.agency_id); if (!routeTypeIdMap.containsKey(gtfsRoute.route_type)) { RouteType rt = new RouteType(); rt.gtfsRouteType = GtfsRouteType.fromGtfs(gtfsRoute.route_type); -// rt.hvtRouteType = rt.gtfsRouteType.toHvt(); -// rt.description = agencyIdMap.values().iterator().next().name + " " + rt.gtfsRouteType.toString(); gtx.routeTypes.put(rt.id, rt); routeTypeIdMap.put(gtfsRoute.route_type, rt.id); } - Route route = new Route(gtfsRoute, feed, agency); + Route route = new Route(gtfsRoute, editorFeed, agency); feedTx.routes.put(route.id, route); routeIdMap.put(gtfsRoute.route_id, route); @@ -250,99 +210,98 @@ public Agency apply(Tuple2 input) { // GTFS service ID -> ServiceCalendar Map calendars = Maps.newHashMap(); - for (Service svc : input.services.values()) { - - ServiceCalendar cal; - - if (svc.calendar != null) { - // easy case: don't have to infer anything! - cal = new ServiceCalendar(svc.calendar, feed); - } else { - // infer a calendar - // number of mondays, etc. that this calendar is active - int monday, tuesday, wednesday, thursday, friday, saturday, sunday; - monday = tuesday = wednesday = thursday = friday = saturday = sunday = 0; - - LocalDate startDate = null; - LocalDate endDate = null; - - for (CalendarDate cd : svc.calendar_dates.values()) { - if (cd.exception_type == 2) - continue; - - if (startDate == null || cd.date.isBefore(startDate)) - startDate = cd.date; - - if (endDate == null || cd.date.isAfter(endDate)) - endDate = cd.date; - - int dayOfWeek = cd.date.getDayOfWeek().getValue(); - - switch (dayOfWeek) { - case DateTimeConstants.MONDAY: - monday++; - break; - case DateTimeConstants.TUESDAY: - tuesday++; - break; - case DateTimeConstants.WEDNESDAY: - wednesday++; - break; - case DateTimeConstants.THURSDAY: - thursday++; - break; - case DateTimeConstants.FRIDAY: - friday++; - break; - case DateTimeConstants.SATURDAY: - saturday++; - break; - case DateTimeConstants.SUNDAY: - sunday++; - break; - } - } - - // infer the calendar. if there is service on more than half as many as the maximum number of - // a particular day that has service, assume that day has service in general. - int maxService = Ints.max(monday, tuesday, wednesday, thursday, friday, saturday, sunday); - - cal = new ServiceCalendar(); - cal.feedId = feed.id; - - if (startDate == null) { - // no service whatsoever - LOG.warn("Service ID " + svc.service_id + " has no service whatsoever"); - startDate = LocalDate.now().minusMonths(1); - endDate = startDate.plusYears(1); - cal.monday = cal.tuesday = cal.wednesday = cal.thursday = cal.friday = cal.saturday = cal.sunday = false; - } - else { - // infer parameters - - int threshold = (int) Math.round(Math.ceil((double) maxService / 2)); - - cal.monday = monday >= threshold; - cal.tuesday = tuesday >= threshold; - cal.wednesday = wednesday >= threshold; - cal.thursday = thursday >= threshold; - cal.friday = friday >= threshold; - cal.saturday = saturday >= threshold; - cal.sunday = sunday >= threshold; - - cal.startDate = startDate; - cal.endDate = endDate; - } - - cal.inferName(); - cal.gtfsServiceId = svc.service_id; - } - -// feedTx.calendars.put(cal.gtfsServiceId, cal); - calendars.put(svc.service_id, cal); - - serviceCalendarCount++; - } + // FIXME: add back in services! +// for (Service svc : input.services.values()) { +// +// ServiceCalendar cal; +// +// if (svc.calendar != null) { +// // easy case: don't have to infer anything! +// cal = new ServiceCalendar(svc.calendar, feed); +// } else { +// // infer a calendar +// // number of mondays, etc. that this calendar is active +// int monday, tuesday, wednesday, thursday, friday, saturday, sunday; +// monday = tuesday = wednesday = thursday = friday = saturday = sunday = 0; +// LocalDate startDate = null; +// LocalDate endDate = null; +// +// for (CalendarDate cd : svc.calendar_dates.values()) { +// if (cd.exception_type == 2) +// continue; +// +// if (startDate == null || cd.date.isBefore(startDate)) +// startDate = cd.date; +// +// if (endDate == null || cd.date.isAfter(endDate)) +// endDate = cd.date; +// +// int dayOfWeek = cd.date.getDayOfWeek().getValue(); +// +// switch (dayOfWeek) { +// case DateTimeConstants.MONDAY: +// monday++; +// break; +// case DateTimeConstants.TUESDAY: +// tuesday++; +// break; +// case DateTimeConstants.WEDNESDAY: +// wednesday++; +// break; +// case DateTimeConstants.THURSDAY: +// thursday++; +// break; +// case DateTimeConstants.FRIDAY: +// friday++; +// break; +// case DateTimeConstants.SATURDAY: +// saturday++; +// break; +// case DateTimeConstants.SUNDAY: +// sunday++; +// break; +// } +// } +// +// // infer the calendar. if there is service on more than half as many as the maximum number of +// // a particular day that has service, assume that day has service in general. +// int maxService = Ints.max(monday, tuesday, wednesday, thursday, friday, saturday, sunday); +// +// cal = new ServiceCalendar(); +// cal.feedId = feed.id; +// +// if (startDate == null) { +// // no service whatsoever +// LOG.warn("Service ID " + svc.service_id + " has no service whatsoever"); +// startDate = LocalDate.now().minusMonths(1); +// endDate = startDate.plusYears(1); +// cal.monday = cal.tuesday = cal.wednesday = cal.thursday = cal.friday = cal.saturday = cal.sunday = false; +// } +// else { +// // infer parameters +// +// int threshold = (int) Math.round(Math.ceil((double) maxService / 2)); +// +// cal.monday = monday >= threshold; +// cal.tuesday = tuesday >= threshold; +// cal.wednesday = wednesday >= threshold; +// cal.thursday = thursday >= threshold; +// cal.friday = friday >= threshold; +// cal.saturday = saturday >= threshold; +// cal.sunday = sunday >= threshold; +// +// cal.startDate = startDate; +// cal.endDate = endDate; +// } +// +// cal.inferName(); +// cal.gtfsServiceId = svc.service_id; +// } +// +// calendars.put(svc.service_id, cal); +// +// serviceCalendarCount++; +// } LOG.info("Service calendars loaded: " + serviceCalendarCount); synchronized (status) { @@ -354,64 +313,69 @@ public Agency apply(Tuple2 input) { status.message = "Importing trips..."; status.percentComplete = 50; } + // FIXME need to load patterns and trips // import trips, stop times and patterns all at once - Map patterns = input.patterns; - Set processedTrips = new HashSet<>(); - for (Entry pattern : patterns.entrySet()) { - // it is possible, though unlikely, for two routes to have the same stopping pattern - // we want to ensure they get different trip patterns - Map tripPatternsByRoute = Maps.newHashMap(); - for (String tripId : pattern.getValue().associatedTrips) { - - // TODO: figure out why trips are being added twice. This check prevents that. - if (processedTrips.contains(tripId)) { - continue; - } - synchronized (status) { - status.message = "Importing trips... (id: " + tripId + ") " + tripCount + "/" + input.trips.size(); - status.percentComplete = 50 + 45 * tripCount / input.trips.size(); - } - com.conveyal.gtfs.model.Trip gtfsTrip = input.trips.get(tripId); - - if (!tripPatternsByRoute.containsKey(gtfsTrip.route_id)) { - TripPattern pat = createTripPatternFromTrip(gtfsTrip, feedTx); - feedTx.tripPatterns.put(pat.id, pat); - tripPatternsByRoute.put(gtfsTrip.route_id, pat); - } - - // there is more than one pattern per route, but this map is specific to only this pattern - // generally it will contain exactly one entry, unless there are two routes with identical - // stopping patterns. - // (in DC, suppose there were trips on both the E2/weekday and E3/weekend from Friendship Heights - // that short-turned at Missouri and 3rd). - TripPattern pat = tripPatternsByRoute.get(gtfsTrip.route_id); - - ServiceCalendar cal = calendars.get(gtfsTrip.service_id); - - // if the service calendar has not yet been imported, import it - if (feedTx.calendars != null && !feedTx.calendars.containsKey(cal.id)) { - // no need to clone as they are going into completely separate mapdbs - feedTx.calendars.put(cal.id, cal); - } - - Trip trip = new Trip(gtfsTrip, routeIdMap.get(gtfsTrip.route_id), pat, cal); - - Collection stopTimes = - input.stop_times.subMap(new Tuple2(gtfsTrip.trip_id, null), new Tuple2(gtfsTrip.trip_id, Fun.HI)).values(); - - for (com.conveyal.gtfs.model.StopTime st : stopTimes) { - trip.stopTimes.add(new StopTime(st, stopIdMap.get(new Tuple2<>(st.stop_id, feed.id)).id)); - } - - feedTx.trips.put(trip.id, trip); - processedTrips.add(tripId); - tripCount++; - - if (tripCount % 1000 == 0) { - LOG.info("Loaded {} / {} trips", tripCount, input.trips.size()); - } - } - } +// Map patterns = input.patterns; +// Set processedTrips = new HashSet<>(); +// for (Entry pattern : patterns.entrySet()) { +// // it is possible, though unlikely, for two routes to have the same stopping pattern +// // we want to ensure they retrieveById different trip patterns +// Map tripPatternsByRoute = Maps.newHashMap(); +// for (String tripId : pattern.getValue().associatedTrips) { +// +// // TODO: figure out why trips are being added twice. This check prevents that. +// if (processedTrips.contains(tripId)) { +// continue; +// } +// synchronized (status) { +// status.message = "Importing trips... (id: " + tripId + ") " + tripCount + "/" + input.trips.size(); +// status.percentComplete = 50 + 45 * tripCount / input.trips.size(); +// } +// com.conveyal.gtfs.model.Trip gtfsTrip = input.trips.retrieveById(tripId); +// +// if (!tripPatternsByRoute.containsKey(gtfsTrip.route_id)) { +// TripPattern pat = createTripPatternFromTrip(gtfsTrip, feedTx); +// feedTx.tripPatterns.put(pat.id, pat); +// tripPatternsByRoute.put(gtfsTrip.route_id, pat); +// } +// +// // there is more than one pattern per route, but this map is specific to only this pattern +// // generally it will contain exactly one entry, unless there are two routes with identical +// // stopping patterns. +// // (in DC, suppose there were trips on both the E2/weekday and E3/weekend from Friendship Heights +// // that short-turned at Missouri and 3rd). +// TripPattern pat = tripPatternsByRoute.retrieveById(gtfsTrip.route_id); +// +// ServiceCalendar cal = calendars.retrieveById(gtfsTrip.service_id); +// +// // if the service calendar has not yet been imported, import it +// if (feedTx.calendars != null && !feedTx.calendars.containsKey(cal.id)) { +// // no need to clone as they are going into completely separate mapdbs +// feedTx.calendars.put(cal.id, cal); +// } +// +// Trip trip = new Trip(gtfsTrip, routeIdMap.retrieveById(gtfsTrip.route_id), pat, cal); +// +// // TODO: query ordered stopTimes for a given trip id +// // FIXME: add back in stopTimes +// Collection stopTimes = new ArrayList<>(); +// input.stopTimes.subMap(new Tuple2(gtfsTrip.trip_id, null), new Tuple2(gtfsTrip.trip_id, Fun.HI)).values(); +// +// for (com.conveyal.gtfs.model.StopTime st : stopTimes) { +// trip.stopTimes.add(new StopTime(st, stopIdMap.retrieveById(new Tuple2<>(st.stop_id, feed.id)).id)); +// stopTimeCount++; +// } +// +// feedTx.trips.put(trip.id, trip); +// processedTrips.add(tripId); +// tripCount++; +// +// // FIXME add back in total number of trips for QC +// if (tripCount % 1000 == 0) { +// LOG.info("Loaded {} / {} trips", tripCount); // input.trips.size() +// } +// } +// } LOG.info("Trips loaded: " + tripCount); synchronized (status) { @@ -420,12 +384,13 @@ public Agency apply(Tuple2 input) { } LOG.info("GtfsImporter: importing fares..."); - Map fares = input.fares; - for (com.conveyal.gtfs.model.Fare f : fares.values()) { - Fare fare = new Fare(f.fare_attribute, f.fare_rules, feed); - feedTx.fares.put(fare.id, fare); - fareCount++; - } + // FIXME add in fares +// Map fares = input.fares; +// for (com.conveyal.gtfs.model.Fare f : fares.values()) { +// Fare fare = new Fare(f.fare_attribute, f.fare_rules, feed); +// feedTx.fares.put(fare.id, fare); +// fareCount++; +// } LOG.info("Fares loaded: " + fareCount); synchronized (status) { status.message = "Fares loaded: " + fareCount; @@ -441,7 +406,7 @@ public Agency apply(Tuple2 input) { gtx.commit(); Snapshot.deactivateSnapshots(feedVersion.feedSourceId, null); // create an initial snapshot for this FeedVersion - Snapshot snapshot = VersionedDataStore.takeSnapshot(feed.id, "Snapshot of " + feedVersion.getName(), "none"); + Snapshot snapshot = VersionedDataStore.takeSnapshot(editorFeed.id, feedVersion.id, "Snapshot of " + feedVersion.name, "none"); LOG.info("Imported GTFS file: " + agencyCount + " agencies; " + routeCount + " routes;" + stopCount + " stops; " + stopTimeCount + " stopTimes; " + tripCount + " trips;" + shapePointCount + " shapePoints"); @@ -456,137 +421,117 @@ public Agency apply(Tuple2 input) { status.message = "Failed to process GTFS snapshot."; status.error = true; } - halt(404, "Failed to process GTFS snapshot."); } finally { feedTx.rollbackIfOpen(); gtx.rollbackIfOpen(); - // set job as complete - jobFinished(); + // FIXME: anything we need to do at the end of using Feed? +// inputFeedTables.close(); + } } /** infer the ownership of stops based on what stops there * Returns a set of tuples stop ID, agency ID with GTFS IDs */ - private SortedSet> inferAgencyStopOwnership() { - // agency - SortedSet> ret = Sets.newTreeSet(); - - for (com.conveyal.gtfs.model.StopTime st : input.stop_times.values()) { - String stopId = st.stop_id; - com.conveyal.gtfs.model.Trip trip = input.trips.get(st.trip_id); - if (trip != null) { - String routeId = trip.route_id; - String agencyId = input.routes.get(routeId).agency_id; - Tuple2 key = new Tuple2(stopId, agencyId); - ret.add(key); - } - } - - return ret; - } +// private SortedSet> inferAgencyStopOwnership() { +// SortedSet> ret = Sets.newTreeSet(); +// +// for (com.conveyal.gtfs.model.StopTime st : input.stop_times.values()) { +// String stopId = st.stop_id; +// com.conveyal.gtfs.model.Trip trip = input.trips.retrieveById(st.trip_id); +// if (trip != null) { +// String routeId = trip.route_id; +// String agencyId = input.routes.retrieveById(routeId).agency_id; +// Tuple2 key = new Tuple2(stopId, agencyId); +// ret.add(key); +// } +// } +// +// return ret; +// } /** * Create a trip pattern from the given trip. - * Neither the trippattern nor the trippatternstops are saved. + * Neither the TripPattern nor the TripPatternStops are saved. */ - public TripPattern createTripPatternFromTrip (com.conveyal.gtfs.model.Trip gtfsTrip, FeedTx tx) { - TripPattern patt = new TripPattern(); - com.conveyal.gtfs.model.Route gtfsRoute = input.routes.get(gtfsTrip.route_id); - patt.routeId = routeIdMap.get(gtfsTrip.route_id).id; - patt.feedId = feed.id; - - String patternId = input.tripPatternMap.get(gtfsTrip.trip_id); - Pattern gtfsPattern = input.patterns.get(patternId); - patt.shape = gtfsPattern.geometry; - patt.id = gtfsPattern.pattern_id; - - patt.patternStops = new ArrayList(); - patt.patternDirection = TripDirection.fromGtfs(gtfsTrip.direction_id); - - com.conveyal.gtfs.model.StopTime[] stopTimes = - input.stop_times.subMap(new Tuple2(gtfsTrip.trip_id, 0), new Tuple2(gtfsTrip.trip_id, Fun.HI)).values().toArray(new com.conveyal.gtfs.model.StopTime[0]); - - if (gtfsTrip.trip_headsign != null && !gtfsTrip.trip_headsign.isEmpty()) - patt.name = gtfsTrip.trip_headsign; - else - patt.name = gtfsPattern.name; -// else if (gtfsRoute.route_long_name != null) -// patt.name = String.format("{} to {} ({} stops)", gtfsRoute.route_long_name, input.stops.get(stopTimes[stopTimes.length - 1].stop_id).stop_name, stopTimes.length); // Messages.get("gtfs.named-route-pattern", gtfsTrip.route.route_long_name, input.stops.get(stopTimes[stopTimes.length - 1].stop_id).stop_name, stopTimes.length); +// public TripPattern createTripPatternFromTrip (com.conveyal.gtfs.model.Trip gtfsTrip, FeedTx tx) { +// TripPattern patt = new TripPattern(); +// com.conveyal.gtfs.model.Route gtfsRoute = input.routes.retrieveById(gtfsTrip.route_id); +// patt.routeId = routeIdMap.retrieveById(gtfsTrip.route_id).id; +// patt.feedId = feed.id; +// +// String patternId = input.tripPatternMap.retrieveById(gtfsTrip.trip_id); +// Pattern gtfsPattern = input.patterns.retrieveById(patternId); +// patt.shape = gtfsPattern.geometry; +// patt.id = gtfsPattern.pattern_id; +// +// patt.patternStops = new ArrayList<>(); +// patt.patternDirection = TripDirection.fromGtfs(gtfsTrip.direction_id); +// +// com.conveyal.gtfs.model.StopTime[] stopTimes = +// input.stop_times.subMap(new Tuple2(gtfsTrip.trip_id, 0), new Tuple2(gtfsTrip.trip_id, Fun.HI)).values().toArray(new com.conveyal.gtfs.model.StopTime[0]); +// +// if (gtfsTrip.trip_headsign != null && !gtfsTrip.trip_headsign.isEmpty()) +// patt.name = gtfsTrip.trip_headsign; // else -// patt.name = String.format("to {} ({{} stops)", input.stops.get(stopTimes[stopTimes.length - 1].stop_id).stop_name, stopTimes.length); // Messages.get("gtfs.unnamed-route-pattern", input.stops.get(stopTimes[stopTimes.length - 1].stop_id).stop_name, stopTimes.length); - - for (com.conveyal.gtfs.model.StopTime st : stopTimes) { - TripPatternStop tps = new TripPatternStop(); +// patt.name = gtfsPattern.name; +// +// for (com.conveyal.gtfs.model.StopTime st : stopTimes) { +// TripPatternStop tps = new TripPatternStop(); +// +// Stop stop = stopIdMap.retrieveById(new Tuple2(st.stop_id, patt.feedId)); +// tps.stopId = stop.id; +// +// // set timepoint according to first gtfs value and then whether arrival and departure times are present +// if (st.timepoint != Entity.INT_MISSING) +// tps.timepoint = st.timepoint == 1; +// else if (st.arrival_time != Entity.INT_MISSING && st.departure_time != Entity.INT_MISSING) { +// tps.timepoint = true; +// } +// else +// tps.timepoint = false; +// +// if (st.departure_time != Entity.INT_MISSING && st.arrival_time != Entity.INT_MISSING) +// tps.defaultDwellTime = st.departure_time - st.arrival_time; +// else +// tps.defaultDwellTime = 0; +// +// patt.patternStops.add(tps); +// } +// +// patt.calcShapeDistTraveled(tx); +// +// // infer travel times +// if (stopTimes.length >= 2) { +// int startOfBlock = 0; +// // start at one because the first stop has no travel time +// // but don't put nulls in the data +// patt.patternStops.retrieveById(0).defaultTravelTime = 0; +// for (int i = 1; i < stopTimes.length; i++) { +// com.conveyal.gtfs.model.StopTime current = stopTimes[i]; +// +// if (current.arrival_time != Entity.INT_MISSING) { +// // interpolate times +// +// int timeSinceLastSpecifiedTime = current.arrival_time - stopTimes[startOfBlock].departure_time; +// +// double blockLength = patt.patternStops.retrieveById(i).shapeDistTraveled - patt.patternStops.retrieveById(startOfBlock).shapeDistTraveled; +// +// // go back over all of the interpolated stop times and interpolate them +// for (int j = startOfBlock + 1; j <= i; j++) { +// TripPatternStop tps = patt.patternStops.retrieveById(j); +// double distFromLastStop = patt.patternStops.retrieveById(j).shapeDistTraveled - patt.patternStops.retrieveById(j - 1).shapeDistTraveled; +// tps.defaultTravelTime = (int) Math.round(timeSinceLastSpecifiedTime * distFromLastStop / blockLength); +// } +// +// startOfBlock = i; +// } +// } +// } +// +// return patt; +// } - Stop stop = stopIdMap.get(new Tuple2(st.stop_id, patt.feedId)); - tps.stopId = stop.id; - - // set timepoint according to first gtfs value and then whether arrival and departure times are present - if (st.timepoint != Entity.INT_MISSING) - tps.timepoint = st.timepoint == 1; - else if (st.arrival_time != Entity.INT_MISSING && st.departure_time != Entity.INT_MISSING) { - tps.timepoint = true; - } - else - tps.timepoint = false; - - if (st.departure_time != Entity.INT_MISSING && st.arrival_time != Entity.INT_MISSING) - tps.defaultDwellTime = st.departure_time - st.arrival_time; - else - tps.defaultDwellTime = 0; - - patt.patternStops.add(tps); - } - - patt.calcShapeDistTraveled(tx); - - // infer travel times - if (stopTimes.length >= 2) { - int startOfBlock = 0; - // start at one because the first stop has no travel time - // but don't put nulls in the data - patt.patternStops.get(0).defaultTravelTime = 0; - for (int i = 1; i < stopTimes.length; i++) { - com.conveyal.gtfs.model.StopTime current = stopTimes[i]; - - if (current.arrival_time != Entity.INT_MISSING) { - // interpolate times - - int timeSinceLastSpecifiedTime = current.arrival_time - stopTimes[startOfBlock].departure_time; - - double blockLength = patt.patternStops.get(i).shapeDistTraveled - patt.patternStops.get(startOfBlock).shapeDistTraveled; - - // go back over all of the interpolated stop times and interpolate them - for (int j = startOfBlock + 1; j <= i; j++) { - TripPatternStop tps = patt.patternStops.get(j); - double distFromLastStop = patt.patternStops.get(j).shapeDistTraveled - patt.patternStops.get(j - 1).shapeDistTraveled; - tps.defaultTravelTime = (int) Math.round(timeSinceLastSpecifiedTime * distFromLastStop / blockLength); - } - - startOfBlock = i; - } - } - } - - return patt; - } - - @Override - public Status getStatus() { - synchronized (status) { - return status.clone(); - } - } - - @Override - public void handleStatusEvent(Map statusMap) { - synchronized (status) { - status.message = (String) statusMap.get("message"); - status.percentComplete = (double) statusMap.get("percentComplete"); - status.error = (boolean) statusMap.get("error"); - } - } } diff --git a/src/main/java/com/conveyal/datatools/editor/models/Snapshot.java b/src/main/java/com/conveyal/datatools/editor/models/Snapshot.java index 2a54a06b8..ada896941 100644 --- a/src/main/java/com/conveyal/datatools/editor/models/Snapshot.java +++ b/src/main/java/com/conveyal/datatools/editor/models/Snapshot.java @@ -2,10 +2,12 @@ import com.conveyal.datatools.editor.datastore.GlobalTx; import com.conveyal.datatools.editor.datastore.VersionedDataStore; +import com.conveyal.datatools.editor.jobs.ProcessGtfsSnapshotExport; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import java.io.File; import java.io.IOException; import java.time.LocalDate; @@ -83,6 +85,36 @@ public Snapshot clone () { } } + public String generateFileName () { + return this.feedId + "_" + this.snapshotTime + ".zip"; + } + + /** Write snapshot to disk as GTFS */ + public static boolean writeSnapshotAsGtfs (Tuple2 decodedId, File outFile) { + GlobalTx gtx = VersionedDataStore.getGlobalTx(); + Snapshot local; + try { + if (!gtx.snapshots.containsKey(decodedId)) { + return false; + } + local = gtx.snapshots.get(decodedId); + new ProcessGtfsSnapshotExport(local, outFile).run(); + } finally { + gtx.rollbackIfOpen(); + } + return true; + } + + public static boolean writeSnapshotAsGtfs (String id, File outFile) { + Tuple2 decodedId; + try { + decodedId = JacksonSerializers.Tuple2IntDeserializer.deserialize(id); + } catch (IOException e1) { + return false; + } + return writeSnapshotAsGtfs(decodedId, outFile); + } + @JsonIgnore public static Collection getSnapshots (String feedId) { GlobalTx gtx = VersionedDataStore.getGlobalTx(); diff --git a/src/main/java/com/conveyal/datatools/editor/models/transit/Agency.java b/src/main/java/com/conveyal/datatools/editor/models/transit/Agency.java index 9bb49c456..3fc117c08 100755 --- a/src/main/java/com/conveyal/datatools/editor/models/transit/Agency.java +++ b/src/main/java/com/conveyal/datatools/editor/models/transit/Agency.java @@ -33,6 +33,7 @@ public Agency(com.conveyal.gtfs.model.Agency agency, EditorFeed feed) { this.lang = agency.agency_lang; this.phone = agency.agency_phone; this.feedId = feed.id; + this.email = agency.agency_email; } public Agency () {} @@ -63,7 +64,7 @@ public com.conveyal.gtfs.model.Agency toGtfs() { ret.agency_timezone = timezone; ret.agency_lang = lang; ret.agency_phone = phone; -// ret.agency_email = email; + ret.agency_email = email; return ret; } diff --git a/src/main/java/com/conveyal/datatools/editor/models/transit/AttributeAvailabilityType.java b/src/main/java/com/conveyal/datatools/editor/models/transit/AttributeAvailabilityType.java index 792a6228d..7a4ed3298 100755 --- a/src/main/java/com/conveyal/datatools/editor/models/transit/AttributeAvailabilityType.java +++ b/src/main/java/com/conveyal/datatools/editor/models/transit/AttributeAvailabilityType.java @@ -3,5 +3,27 @@ public enum AttributeAvailabilityType { UNKNOWN, AVAILABLE, - UNAVAILABLE + UNAVAILABLE; + + public int toGtfs () { + switch (this) { + case AVAILABLE: + return 1; + case UNAVAILABLE: + return 2; + default: // if value is UNKNOWN or missing + return 0; + } + } + + public static AttributeAvailabilityType fromGtfs (int availabilityType) { + switch (availabilityType) { + case 1: + return AVAILABLE; + case 2: + return UNAVAILABLE; + default: // if value is UNKNOWN or missing + return UNKNOWN; + } + } } \ No newline at end of file diff --git a/src/main/java/com/conveyal/datatools/editor/models/transit/EditorFeed.java b/src/main/java/com/conveyal/datatools/editor/models/transit/EditorFeed.java index a70f6350a..a9e39be2b 100644 --- a/src/main/java/com/conveyal/datatools/editor/models/transit/EditorFeed.java +++ b/src/main/java/com/conveyal/datatools/editor/models/transit/EditorFeed.java @@ -1,13 +1,6 @@ package com.conveyal.datatools.editor.models.transit; -import com.conveyal.datatools.editor.datastore.FeedTx; -import com.conveyal.datatools.editor.datastore.GlobalTx; -import com.conveyal.datatools.editor.datastore.VersionedDataStore; import com.conveyal.datatools.editor.models.Model; -import com.conveyal.datatools.manager.models.JsonViews; -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.annotation.JsonView; import java.io.Serializable; import java.net.URL; @@ -33,27 +26,18 @@ public class EditorFeed extends Model implements Cloneable, Serializable { public LocalDate feedStartDate; public LocalDate feedEndDate; -// @JsonProperty -// public Integer getRouteCount() { -// FeedTx tx = VersionedDataStore.getFeedTx(id); -// return tx.routes.size(); -// } +// public transient int numberOfRoutes, numberOfStops; +// @JsonProperty("numberOfRoutes") +// public int jsonGetNumberOfRoutes() { return numberOfRoutes; } // -// @JsonProperty -// public Integer getStopCount() { -// FeedTx tx = VersionedDataStore.getFeedTx(id); -// return tx.stops.size(); -// } -// @JsonInclude(JsonInclude.Include.NON_NULL) -// @JsonView(JsonViews.UserInterface.class) -// public boolean getEditedSinceSnapshot() { -// FeedTx tx = VersionedDataStore.getFeedTx(id); -//// return tx.editedSinceSnapshot.get(); -// return false; +// @JsonProperty("numberOfStops") +// public int jsonGetNumberOfStops() { return numberOfStops; } +// +// // Add information about the days of week this route is active +// public void addDerivedInfo(final FeedTx tx) { +// numberOfRoutes = tx.routes.size(); +// numberOfStops = tx.stops.size(); // } - // the associated FeedSource in the data manager DB - //public String feedSourceId; - public EditorFeed() {} diff --git a/src/main/java/com/conveyal/datatools/editor/models/transit/Fare.java b/src/main/java/com/conveyal/datatools/editor/models/transit/Fare.java index 2356dfae9..1a7dcc933 100644 --- a/src/main/java/com/conveyal/datatools/editor/models/transit/Fare.java +++ b/src/main/java/com/conveyal/datatools/editor/models/transit/Fare.java @@ -1,7 +1,6 @@ package com.conveyal.datatools.editor.models.transit; import com.conveyal.datatools.editor.models.Model; -import com.conveyal.gtfs.model.FareAttribute; import com.conveyal.gtfs.model.FareRule; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.google.common.collect.Lists; @@ -27,7 +26,7 @@ public class Fare extends Model implements Cloneable, Serializable { public Integer transferDuration; public List fareRules = Lists.newArrayList(); - public Fare() {}; + public Fare() {} public Fare(com.conveyal.gtfs.model.FareAttribute fare, List rules, EditorFeed feed) { this.gtfsFareId = fare.fare_id; diff --git a/src/main/java/com/conveyal/datatools/editor/models/transit/Route.java b/src/main/java/com/conveyal/datatools/editor/models/transit/Route.java index c96a033e5..228808413 100755 --- a/src/main/java/com/conveyal/datatools/editor/models/transit/Route.java +++ b/src/main/java/com/conveyal/datatools/editor/models/transit/Route.java @@ -1,6 +1,6 @@ package com.conveyal.datatools.editor.models.transit; -import com.conveyal.datatools.editor.datastore.VersionedDataStore; +import com.conveyal.gtfs.model.Entity; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; import com.conveyal.datatools.editor.datastore.FeedTx; @@ -13,6 +13,8 @@ import java.net.MalformedURLException; import java.net.URL; import java.util.Collection; +import java.util.HashSet; +import java.util.Set; public class Route extends Model implements Cloneable, Serializable { public static final long serialVersionUID = 1; @@ -46,14 +48,9 @@ public class Route extends Model implements Cloneable, Serializable { public AttributeAvailabilityType wheelchairBoarding; - public int getNumberOfTrips () { - FeedTx tx = VersionedDataStore.getFeedTx(this.feedId); - Collection trips = tx.getTripsByRoute(this.id); - return trips == null ? 0 : trips.size(); - } - /** on which days does this route have service? Derived from calendars on render */ public transient Boolean monday, tuesday, wednesday, thursday, friday, saturday, sunday; + public transient int numberOfTrips = 0; // add getters so Jackson will serialize @@ -92,8 +89,19 @@ public Boolean jsonGetSunday() { return sunday; } + @JsonProperty("numberOfTrips") + public int jsonGetNumberOfTrips() { + return numberOfTrips; + } + public Route () {} + /** + * Construct editor route from gtfs-lib representation. + * @param route + * @param feed + * @param agency + */ public Route(com.conveyal.gtfs.model.Route route, EditorFeed feed, Agency agency) { this.gtfsRouteId = route.route_id; this.routeShortName = route.route_short_name; @@ -107,7 +115,7 @@ public Route(com.conveyal.gtfs.model.Route route, EditorFeed feed, Agency agency this.routeTextColor = route.route_text_color; this.feedId = feed.id; - this.agencyId = agency.id; + this.agencyId = agency != null ? agency.id : null; } @@ -118,20 +126,19 @@ public Route(String routeShortName, String routeLongName, int routeType, String this.routeDesc = routeDescription; this.feedId = feed.id; - this.agencyId = agency.id; + this.agencyId = agency != null ? agency.id : null; } - public com.conveyal.gtfs.model.Route toGtfs(com.conveyal.gtfs.model.Agency a, GlobalTx tx) { + public com.conveyal.gtfs.model.Route toGtfs(com.conveyal.gtfs.model.Agency a) { com.conveyal.gtfs.model.Route ret = new com.conveyal.gtfs.model.Route(); - ret.agency_id = a.agency_id; + ret.agency_id = a != null ? a.agency_id : ""; ret.route_color = routeColor; ret.route_desc = routeDesc; ret.route_id = getGtfsId(); ret.route_long_name = routeLongName; ret.route_short_name = routeShortName; ret.route_text_color = routeTextColor; - // TODO also handle HVT types here - //ret.route_type = mapGtfsRouteType(routeTypeId); + ret.route_type = gtfsRouteType != null ? gtfsRouteType.toGtfs() : Entity.INT_MISSING; try { ret.route_url = routeUrl == null ? null : new URL(routeUrl); } catch (MalformedURLException e) { @@ -158,7 +165,7 @@ public String getGtfsId() { /** * Get a name for this combining the short name and long name as available. - * @return + * @return combined route short and long names */ @JsonIgnore public String getName() { @@ -174,36 +181,56 @@ else if (routeLongName == null) } // Add information about the days of week this route is active - public void addDerivedInfo(FeedTx tx) { - monday = tuesday = wednesday = thursday = friday = saturday = sunday = false; - - for (Trip trip : tx.getTripsByRoute(this.id)) { - ServiceCalendar cal = tx.calendars.get(trip.calendarId); - - if (cal.monday) - monday = true; - - if (cal.tuesday) - tuesday = true; - - if (cal.wednesday) - wednesday = true; - - if (cal.thursday) - thursday = true; - - if (cal.friday) - friday = true; - - if (cal.saturday) - saturday = true; - - if (cal.sunday) - sunday = true; - - if (monday && tuesday && wednesday && thursday && friday && saturday && sunday) - // optimization: no point in continuing - break; + public void addDerivedInfo(final FeedTx tx) { + + monday = false; + tuesday = false; + wednesday = false; + thursday = false; + friday = false; + saturday = false; + sunday = false; + Set calendars = new HashSet<>(); + + Collection tripsForRoute = tx.getTripsByRoute(this.id); + numberOfTrips = tripsForRoute == null ? 0 : tripsForRoute.size(); + + for (Trip trip : tripsForRoute) { + ServiceCalendar cal = null; + try { + if (calendars.contains(trip.calendarId)) continue; + cal = tx.calendars.get(trip.calendarId); + if (cal.monday) + monday = true; + + if (cal.tuesday) + tuesday = true; + + if (cal.wednesday) + wednesday = true; + + if (cal.thursday) + thursday = true; + + if (cal.friday) + friday = true; + + if (cal.saturday) + saturday = true; + + if (cal.sunday) + sunday = true; + + if (monday && tuesday && wednesday && thursday && friday && saturday && sunday) { + // optimization: no point in continuing + break; + } + } catch (Exception e) { + LOG.error("Could not process trip {} or cal {} for route {}", trip, cal, this); + } + + // track which calendars we've processed to avoid redundancy + calendars.add(trip.calendarId); } } diff --git a/src/main/java/com/conveyal/datatools/editor/models/transit/ScheduleException.java b/src/main/java/com/conveyal/datatools/editor/models/transit/ScheduleException.java index 4101a78d1..e01a9a881 100644 --- a/src/main/java/com/conveyal/datatools/editor/models/transit/ScheduleException.java +++ b/src/main/java/com/conveyal/datatools/editor/models/transit/ScheduleException.java @@ -83,14 +83,41 @@ else if (removedService != null && removedService.contains(service.id)) { * Represents a desire about what service should be like on a particular day. * For example, run Sunday service on Presidents' Day, or no service on New Year's Day. */ - public static enum ExemplarServiceDescriptor { + public enum ExemplarServiceDescriptor { MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY, NO_SERVICE, CUSTOM, SWAP; + + public int toInt () { + switch (this) { + case MONDAY: + return 0; + case TUESDAY: + return 1; + case WEDNESDAY: + return 2; + case THURSDAY: + return 3; + case FRIDAY: + return 4; + case SATURDAY: + return 5; + case SUNDAY: + return 6; + case NO_SERVICE: + return 7; + case CUSTOM: + return 8; + case SWAP: + return 9; + default: + return 0; + } + } } public ScheduleException clone () throws CloneNotSupportedException { ScheduleException c = (ScheduleException) super.clone(); - c.dates = new ArrayList(this.dates); - c.customSchedule = new ArrayList(this.customSchedule); + c.dates = new ArrayList<>(this.dates); + c.customSchedule = new ArrayList<>(this.customSchedule); return c; } } diff --git a/src/main/java/com/conveyal/datatools/editor/models/transit/ServiceCalendar.java b/src/main/java/com/conveyal/datatools/editor/models/transit/ServiceCalendar.java index 509ea0b1d..05b6ea55e 100755 --- a/src/main/java/com/conveyal/datatools/editor/models/transit/ServiceCalendar.java +++ b/src/main/java/com/conveyal/datatools/editor/models/transit/ServiceCalendar.java @@ -5,8 +5,6 @@ import com.conveyal.gtfs.model.Calendar; import com.conveyal.gtfs.model.Service; import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.base.Function; -import com.google.common.collect.Collections2; import com.conveyal.datatools.editor.datastore.FeedTx; import com.conveyal.datatools.editor.models.Model; import java.time.LocalDate; @@ -33,7 +31,7 @@ public class ServiceCalendar extends Model implements Cloneable, Serializable { public LocalDate startDate; public LocalDate endDate; - public ServiceCalendar() {}; + public ServiceCalendar() {} public ServiceCalendar(Calendar calendar, EditorFeed feed) { this.gtfsServiceId = calendar.service_id; @@ -44,8 +42,8 @@ public ServiceCalendar(Calendar calendar, EditorFeed feed) { this.friday = calendar.friday == 1; this.saturday = calendar.saturday == 1; this.sunday = calendar.sunday == 1; - this.startDate = fromGtfs(calendar.start_date); - this.endDate = fromGtfs(calendar.end_date); + this.startDate = calendar.start_date; + this.endDate = calendar.end_date; inferName(); this.feedId = feed.id; } @@ -157,8 +155,8 @@ public Service toGtfs(int startDate, int endDate) { Service ret = new Service(id); ret.calendar = new Calendar(); ret.calendar.service_id = ret.service_id; - ret.calendar.start_date = startDate; - ret.calendar.end_date = endDate; + ret.calendar.start_date = fromGtfs(startDate); + ret.calendar.end_date = fromGtfs(endDate); ret.calendar.sunday = sunday ? 1 : 0; ret.calendar.monday = monday ? 1 : 0; ret.calendar.tuesday = tuesday ? 1 : 0; @@ -210,9 +208,9 @@ public void addDerivedInfo(final FeedTx tx) { // note that this is not ideal as we are fetching all of the trips. however, it's not really very possible // with MapDB to have an index involving three tables. - Set routeIds = Sets.newHashSet(); Map tripsForRoutes = new HashMap<>(); for (Trip trip : tx.getTripsByCalendar(this.id)) { + if (trip == null) continue; Long count = 0L; /** @@ -228,16 +226,7 @@ public void addDerivedInfo(final FeedTx tx) { if (trip.routeId != null) { tripsForRoutes.put(trip.routeId, count + 1); } -// routeIds.add(trip.routeId); } this.routes = tripsForRoutes; -// this.routes = Collections2.transform(routeIds, new Function() { -// -// @Override -// public String apply(String routeId) { -// return tx.routes.get(routeId).getName(); -// } -// -// }); } } diff --git a/src/main/java/com/conveyal/datatools/editor/models/transit/StatusType.java b/src/main/java/com/conveyal/datatools/editor/models/transit/StatusType.java index 013365910..92a7b5745 100755 --- a/src/main/java/com/conveyal/datatools/editor/models/transit/StatusType.java +++ b/src/main/java/com/conveyal/datatools/editor/models/transit/StatusType.java @@ -4,5 +4,18 @@ public enum StatusType { IN_PROGRESS, PENDING_APPROVAL, APPROVED, - DISABLED + DISABLED; + + public int toInt () { + switch (this) { + case APPROVED: + return 2; + case IN_PROGRESS: + return 1; + case PENDING_APPROVAL: + return 0; + default: + return 0; + } + } } \ No newline at end of file diff --git a/src/main/java/com/conveyal/datatools/editor/models/transit/Stop.java b/src/main/java/com/conveyal/datatools/editor/models/transit/Stop.java index 6da4339e9..ebc7ece23 100755 --- a/src/main/java/com/conveyal/datatools/editor/models/transit/Stop.java +++ b/src/main/java/com/conveyal/datatools/editor/models/transit/Stop.java @@ -70,6 +70,7 @@ public Stop(com.conveyal.gtfs.model.Stop stop, GeometryFactory geometryFactory, this.parentStation = stop.parent_station; this.pickupType = StopTimePickupDropOffType.SCHEDULED; this.dropOffType = StopTimePickupDropOffType.SCHEDULED; + this.wheelchairBoarding = stop.wheelchair_boarding != null ? AttributeAvailabilityType.fromGtfs(Integer.valueOf(stop.wheelchair_boarding)) : null; this.location = geometryFactory.createPoint(new Coordinate(stop.stop_lon,stop.stop_lat)); @@ -116,11 +117,15 @@ public com.conveyal.gtfs.model.Stop toGtfs() { ret.stop_desc = stopDesc; ret.stop_lat = location.getY(); ret.stop_lon = location.getX(); + // TODO: gtfs-lib value needs to be int + if (wheelchairBoarding != null) { + ret.wheelchair_boarding = String.valueOf(wheelchairBoarding.toGtfs()); + } if (stopName != null && !stopName.isEmpty()) ret.stop_name = stopName; else - ret.stop_name = id.toString(); + ret.stop_name = id; try { ret.stop_url = stopUrl == null ? null : new URL(stopUrl); @@ -141,46 +146,43 @@ public static void merge (List stopIds, FeedTx tx) { // find all the patterns that stop at this stop Collection tps = tx.getTripPatternsByStop(source.id); - List tpToSave = new ArrayList(); + List tpToSave = new ArrayList<>(); // update them for (TripPattern tp : tps) { try { tp = tp.clone(); } catch (CloneNotSupportedException e) { - // TODO Auto-generated catch block e.printStackTrace(); tx.rollback(); throw new RuntimeException(e); } - for (TripPatternStop ps : tp.patternStops) { - if (source.id.equals(ps.stopId)) { - ps.stopId = target.id; - } - } + tp.patternStops.stream() + .filter(ps -> source.id.equals(ps.stopId)) + .forEach(ps -> ps.stopId = target.id); // batch them for save at the end, as all of the sets we are working with still refer to the db, // so changing it midstream is a bad idea tpToSave.add(tp); // update the trips - List tripsToSave = new ArrayList(); + List tripsToSave = new ArrayList<>(); for (Trip trip : tx.getTripsByPattern(tp.id)) { try { trip = trip.clone(); } catch (CloneNotSupportedException e) { - // TODO Auto-generated catch block e.printStackTrace(); tx.rollback(); throw new RuntimeException(e); } - for (StopTime st : trip.stopTimes) { - if (source.id.equals(st.stopId)) { - // stop times have been cloned, so this is safe - st.stopId = target.id; - } - } + // stop times have been cloned, so this is safe + trip.stopTimes.stream() + .filter(st -> source.id.equals(st.stopId)) + .forEach(st -> { + // stop times have been cloned, so this is safe + st.stopId = target.id; + }); tripsToSave.add(trip); } diff --git a/src/main/java/com/conveyal/datatools/editor/models/transit/Trip.java b/src/main/java/com/conveyal/datatools/editor/models/transit/Trip.java index 27fa630e6..ae77e612c 100755 --- a/src/main/java/com/conveyal/datatools/editor/models/transit/Trip.java +++ b/src/main/java/com/conveyal/datatools/editor/models/transit/Trip.java @@ -115,7 +115,7 @@ else if (pattern.route.wheelchairBoarding.equals(AttributeAvailabilityType.UNAVA return ret; }*/ - /** get the frequencies.txt entry for this trip, or null if this trip should not be in frequencies.txt */ + /** retrieveById the frequencies.txt entry for this trip, or null if this trip should not be in frequencies.txt */ public Frequency getFrequency(com.conveyal.gtfs.model.Trip trip) { if (useFrequency == null || !useFrequency || headway <= 0 || trip.trip_id != getGtfsId()) return null; diff --git a/src/main/java/com/conveyal/datatools/editor/models/transit/TripPattern.java b/src/main/java/com/conveyal/datatools/editor/models/transit/TripPattern.java index caa086c28..c0c8a6b01 100755 --- a/src/main/java/com/conveyal/datatools/editor/models/transit/TripPattern.java +++ b/src/main/java/com/conveyal/datatools/editor/models/transit/TripPattern.java @@ -4,18 +4,12 @@ import com.conveyal.datatools.editor.datastore.FeedTx; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; -import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.LineString; -import com.vividsolutions.jts.geom.Polygon; import com.vividsolutions.jts.linearref.LinearLocation; -import com.vividsolutions.jts.linearref.LocationIndexedLine; -import com.conveyal.datatools.editor.datastore.VersionedDataStore; import com.conveyal.datatools.editor.models.Model; -import org.geotools.referencing.GeodeticCalculator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.conveyal.datatools.editor.utils.GeoUtils; import java.io.Serializable; import java.util.ArrayList; @@ -24,8 +18,6 @@ import java.util.Map; import java.util.stream.Collectors; -import static com.conveyal.datatools.editor.utils.GeoUtils.getCoordDistances; - @JsonIgnoreProperties(ignoreUnknown = true) public class TripPattern extends Model implements Cloneable, Serializable { public static final long serialVersionUID = 1; @@ -46,67 +38,79 @@ public class TripPattern extends Model implements Cloneable, Serializable { public TripDirection patternDirection; - public List patternStops = new ArrayList(); - - public int getNumberOfTrips () { - FeedTx tx = VersionedDataStore.getFeedTx(this.feedId); - Collection trips = tx.getTripsByPattern(this.id); - return trips == null ? 0 : trips.size(); - } + public List patternStops = new ArrayList<>(); - public Map getTripCountByCalendar () { - FeedTx tx = VersionedDataStore.getFeedTx(this.feedId); - Collection trips = tx.getTripsByPattern(this.id); - return trips.stream() - .filter(t -> t.calendarId != null) - .collect(Collectors.groupingBy(t -> t.calendarId, Collectors.counting())); - } - - /** - * Lines showing how stops are being snapped to the shape. - * @return - */ - @JsonProperty("stopConnections") - public LineString[] jsonGetStopConnections () { - if (useStraightLineDistances || shape == null) - return null; + // give the UI a little information about the content of this trip pattern + public transient int numberOfTrips; + public transient Map tripCountByCalendar; - FeedTx tx = VersionedDataStore.getFeedTx(this.feedId); - try { - LineString[] ret = new LineString[patternStops.size()]; - - double[] coordDistances = getCoordDistances(shape); - LocationIndexedLine shapeIdx = new LocationIndexedLine(shape); - - for (int i = 0; i < ret.length; i++) { - TripPatternStop ps = patternStops.get(i); - - if (ps.shapeDistTraveled == null) { - return null; - } - - Coordinate snapped = shapeIdx.extractPoint(getLoc(coordDistances, ps.shapeDistTraveled)); - // offset it slightly so that line creation does not fail if the stop is coincident - snapped.x = snapped.x - 0.00000001; - Coordinate stop = tx.stops.get(patternStops.get(i).stopId).location.getCoordinate(); - ret[i] = GeoUtils.geometyFactory.createLineString(new Coordinate[] {stop, snapped}); - } + @JsonProperty("numberOfTrips") + public int jsonGetNumberOfTrips () { + return numberOfTrips; + } - return ret; - } finally { - tx.rollback(); - } + @JsonProperty("tripCountByCalendar") + Map jsonGetTripCountByCalendar () { return tripCountByCalendar; } - } + // do-nothing setters + @JsonProperty("numberOfTrips") + public void jsonSetNumberOfTrips(int numberOfTrips) { } - public TripPattern() - { + @JsonProperty("tripCountByCalendar") + public void jsonSetTripCountByCalendar(Map tripCountByCalendar) { } + /** add transient info for UI with number of routes, number of trips */ + public void addDerivedInfo(final FeedTx tx) { + Collection trips = tx.getTripsByPattern(this.id); + numberOfTrips = trips.size(); + tripCountByCalendar = trips.stream() + .filter(t -> t != null && t.calendarId != null) + .collect(Collectors.groupingBy(t -> t.calendarId, Collectors.counting())); } - public TripPattern(String name, String headsign, LineString shape, Route route) - { +// /** +// * Lines showing how stops are being snapped to the shape. +// * @return array of LineStrings showing how stops connect to shape +// */ +// @JsonProperty("stopConnections") +// public LineString[] jsonGetStopConnections () { +// if (useStraightLineDistances || shape == null) +// return null; +// +// final FeedTx tx = VersionedDataStore.getFeedTx(this.feedId); +// +// try { +// LineString[] ret = new LineString[patternStops.size()]; +// +// double[] coordDistances = getCoordDistances(shape); +// LocationIndexedLine shapeIdx = new LocationIndexedLine(shape); +// +// for (int i = 0; i < ret.length; i++) { +// TripPatternStop ps = patternStops.retrieveById(i); +// +// if (ps.shapeDistTraveled == null) { +// return null; +// } +// +// Coordinate snapped = shapeIdx.extractPoint(getLoc(coordDistances, ps.shapeDistTraveled)); +// // offset it slightly so that line creation does not fail if the stop is coincident +// snapped.x = snapped.x - 0.00000001; +// Stop st = tx.stops.retrieveById(ps.stopId); +// Coordinate stop = st.location.getCoordinate(); +// ret[i] = GeoUtils.geometyFactory.createLineString(new Coordinate[] {stop, snapped}); +// } +// +// return ret; +// } finally { +// tx.rollback(); +// } +// +// } + + public TripPattern() {} + + public TripPattern(String name, String headsign, LineString shape, Route route) { this.name = name; this.headsign = headsign; this.shape = shape; @@ -121,7 +125,7 @@ public TripPattern clone() throws CloneNotSupportedException { else ret.shape = null; - ret.patternStops = new ArrayList(); + ret.patternStops = new ArrayList<>(); for (TripPatternStop ps : this.patternStops) { ret.patternStops.add(ps.clone()); @@ -307,199 +311,8 @@ else if (originalStops.size() < newStops.size()) { } } - // cast generic Geometry object to LineString because jackson2-geojson library only returns generic Geometry objects - public void setShape (Geometry g) { - this.shape = (LineString) g; - } - - public void calcShapeDistTraveled () { - FeedTx tx = VersionedDataStore.getFeedTx(feedId); - calcShapeDistTraveled(tx); - tx.rollback(); - } - - /** - * Calculate the shape dist traveled along the current shape. Do this by snapping points but constraining order. - * - * To make this a bit more formal, here is the algorithm: - * - * 1. We snap each stop to the nearest point on the shape, sliced by the shape_dist_traveled of the previous stop to ensure monotonicity. - * 2. then compute the distance from stop to snapped point - * 3. multiply by 2, create a buffer of that radius around the stop, and intersect with the shape. - * 4. if it intersects in 1 or 2 places, assume that you have found the correct location for that stop and - * "fix" it into that position. - * 5. otherwise, mark it to be returned to on the second pass - * 6. on the second pass, just snap to the closest point on the subsection of the shape defined by the previous and next stop positions. - */ - public void calcShapeDistTraveled(FeedTx tx) { - if (patternStops.size() == 0) - return; - - // we don't actually store shape_dist_traveled, but rather the distance from the previous point along the shape - // however, for the algorithm it's more useful to have the cumulative dist traveled - double[] shapeDistTraveled = new double[patternStops.size()]; - - useStraightLineDistances = false; - - if (shape == null) { - calcShapeDistTraveledStraightLine(tx); - return; - } - - // compute the shape dist traveled of each coordinate of the shape - double[] shapeDist = getCoordDistances(shape); - - double[] coordDist = shapeDist; - - for (int i = 0; i < shapeDistTraveled.length; i++) { - shapeDistTraveled[i] = -1; - } - - // location along the entire shape - LocationIndexedLine shapeIdx = new LocationIndexedLine(shape); - // location along the subline currently being considered - LocationIndexedLine subIdx = shapeIdx; - - LineString subShape = shape; - - double lastShapeDistTraveled = 0; - - int fixed = 0; - - GeodeticCalculator gc = new GeodeticCalculator(); - - // detect backwards shapes - int backwards = 0; - - double lastPos = -1; - for (TripPatternStop tps : patternStops) { - Stop stop = tx.stops.get(tps.stopId); - double pos = getDist(shapeDist, shapeIdx.project(stop.location.getCoordinate())); - - if (lastPos > 0) { - if (pos > lastPos) - backwards--; - else if (pos > lastPos) - backwards++; - } - - lastPos = pos; - } - - if (backwards > 0) { - LOG.warn("Detected likely backwards shape for trip pattern {} ({}) on route {}, reversing", id, name, routeId); - this.shape = (LineString) this.shape.reverse(); - calcShapeDistTraveled(tx); - return; - } - else if (backwards == 0) { - LOG.warn("Unable to tell if shape is backwards for trip pattern {} ({}) on route {}, assuming it is correct", id, name, routeId); - } - - // first pass: fix the obvious stops - for (int i = 0; i < shapeDistTraveled.length; i++) { - TripPatternStop tps = patternStops.get(i); - Stop stop = tx.stops.get(tps.stopId); - LinearLocation candidateLoc = subIdx.project(stop.location.getCoordinate()); - Coordinate candidatePt = subIdx.extractPoint(candidateLoc); - - // step 2: compute distance - gc.setStartingGeographicPoint(stop.location.getX(), stop.location.getY()); - gc.setDestinationGeographicPoint(candidatePt.x, candidatePt.y); - double dist = gc.getOrthodromicDistance(); - - // don't snap stops more than 1km - if (dist > 1000) { - LOG.warn("Stop is more than 1km from its shape, using straight-line distances"); - this.calcShapeDistTraveledStraightLine(tx); - return; - } - - // step 3: compute buffer - // add 5m to the buffer so that if the stop sits exactly atop two lines we don't just pick one - Polygon buffer = GeoUtils.bufferGeographicPoint(stop.location.getCoordinate(), dist * 2 + 5, 20); - - Geometry intersection = buffer.intersection(shape); - if (intersection.getNumGeometries() == 1) { - // good, only one intersection - shapeDistTraveled[i] = lastShapeDistTraveled + getDist(coordDist, candidateLoc); - lastShapeDistTraveled = shapeDistTraveled[i]; - - // recalculate shape dist traveled and idx - subShape = (LineString) subIdx.extractLine(candidateLoc, subIdx.getEndIndex()); - subIdx = new LocationIndexedLine(subShape); - - coordDist = getCoordDistances(subShape); - - fixed++; - } - } - - LOG.info("Fixed {} / {} stops after first round for trip pattern {} ({}) on route {}", fixed, shapeDistTraveled.length, id, name, routeId); - - // pass 2: fix the rest of the stops - lastShapeDistTraveled = 0; - for (int i = 0; i < shapeDistTraveled.length; i++) { - TripPatternStop tps = patternStops.get(i); - Stop stop = tx.stops.get(tps.stopId); - - if (shapeDistTraveled[i] >= 0) { - lastShapeDistTraveled = shapeDistTraveled[i]; - continue; - } - - // find the next shape dist traveled - double nextShapeDistTraveled = shapeDist[shapeDist.length - 1]; - for (int j = i; j < shapeDistTraveled.length; j++) { - if (shapeDistTraveled[j] >= 0) { - nextShapeDistTraveled = shapeDistTraveled[j]; - break; - } - } - - // create and index the subshape - // recalculate shape dist traveled and idx - subShape = (LineString) shapeIdx.extractLine(getLoc(shapeDist, lastShapeDistTraveled), getLoc(shapeDist, nextShapeDistTraveled)); - - if (subShape.getLength() < 0.00000001) { - LOG.warn("Two stops on trip pattern {} map to same point on shape", id); - shapeDistTraveled[i] = lastShapeDistTraveled; - continue; - } - - subIdx = new LocationIndexedLine(subShape); - - coordDist = getCoordDistances(subShape); - - LinearLocation loc = subIdx.project(stop.location.getCoordinate()); - shapeDistTraveled[i] = lastShapeDistTraveled + getDist(coordDist, loc); - lastShapeDistTraveled = shapeDistTraveled[i]; - } - - // assign default distances - for (int i = 0; i < shapeDistTraveled.length; i++) { - patternStops.get(i).shapeDistTraveled = shapeDistTraveled[i]; - } - } - - /** Calculate distances using straight line geometries */ - public void calcShapeDistTraveledStraightLine(FeedTx tx) { - useStraightLineDistances = true; - GeodeticCalculator gc = new GeodeticCalculator(); - Stop prev = tx.stops.get(patternStops.get(0).stopId); - patternStops.get(0).shapeDistTraveled = 0D; - double previousDistance = 0; - for (int i = 1; i < patternStops.size(); i++) { - TripPatternStop ps = patternStops.get(i); - Stop stop = tx.stops.get(ps.stopId); - gc.setStartingGeographicPoint(prev.location.getX(), prev.location.getY()); - gc.setDestinationGeographicPoint(stop.location.getX(), stop.location.getY()); - previousDistance = ps.shapeDistTraveled = previousDistance + gc.getOrthodromicDistance(); - } - } - /** - * From an array of distances at coordinates and a distance, get a linear location for that distance. + * From an array of distances at coordinates and a distance, retrieveById a linear location for that distance. */ private static LinearLocation getLoc(double[] distances, double distTraveled) { if (distTraveled < 0) @@ -523,7 +336,7 @@ else if (distTraveled >= distances[distances.length - 1]) { } /** - * From an array of distances at coordinates and linear locs, get a distance for that location. + * From an array of distances at coordinates and linear locs, retrieveById a distance for that location. */ private static double getDist(double[] distances, LinearLocation loc) { if (loc.getSegmentIndex() == distances.length - 1) diff --git a/src/main/java/com/conveyal/datatools/editor/models/transit/TripPatternStop.java b/src/main/java/com/conveyal/datatools/editor/models/transit/TripPatternStop.java index bc42a7413..42d77c84a 100755 --- a/src/main/java/com/conveyal/datatools/editor/models/transit/TripPatternStop.java +++ b/src/main/java/com/conveyal/datatools/editor/models/transit/TripPatternStop.java @@ -24,13 +24,9 @@ public class TripPatternStop implements Cloneable, Serializable { public Double shapeDistTraveled; - public TripPatternStop() - { + public TripPatternStop() {} - } - - public TripPatternStop(Stop stop, Integer defaultTravelTime) - { + public TripPatternStop(Stop stop, Integer defaultTravelTime) { this.stopId = stop.id; this.defaultTravelTime = defaultTravelTime; } diff --git a/src/main/java/com/conveyal/datatools/editor/utils/GeoUtils.java b/src/main/java/com/conveyal/datatools/editor/utils/GeoUtils.java index 90da61672..658564275 100644 --- a/src/main/java/com/conveyal/datatools/editor/utils/GeoUtils.java +++ b/src/main/java/com/conveyal/datatools/editor/utils/GeoUtils.java @@ -39,7 +39,7 @@ public static Polygon bufferGeographicPoint (Coordinate point, double dist, int return geometyFactory.createPolygon(ring, holes); } - /** get the distances from the start of the line string to every coordinate along the line string */ + /** retrieve the distances from the start of the line string to every coordinate along the line string */ public static double[] getCoordDistances(LineString line) { double[] coordDist = new double[line.getNumPoints()]; coordDist[0] = 0; diff --git a/src/main/java/com/conveyal/datatools/editor/utils/JacksonSerializers.java b/src/main/java/com/conveyal/datatools/editor/utils/JacksonSerializers.java index 7ec16020b..4d4496899 100644 --- a/src/main/java/com/conveyal/datatools/editor/utils/JacksonSerializers.java +++ b/src/main/java/com/conveyal/datatools/editor/utils/JacksonSerializers.java @@ -2,47 +2,38 @@ import com.conveyal.datatools.editor.models.transit.GtfsRouteType; import com.conveyal.datatools.editor.models.transit.TripDirection; -import com.fasterxml.jackson.core.JsonGenerationException; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.DeserializationContext; -import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.JsonMappingException; import com.fasterxml.jackson.databind.SerializerProvider; import com.fasterxml.jackson.databind.deser.std.StdScalarDeserializer; import com.fasterxml.jackson.databind.ser.std.StdScalarSerializer; import com.fasterxml.jackson.databind.ser.std.StdSerializer; import com.google.common.io.BaseEncoding; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.GeometryFactory; -import com.vividsolutions.jts.geom.LineString; import java.time.Instant; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; import java.time.LocalDate; -import java.time.LocalTime; -//import java.time.format.D; +import java.time.ZoneOffset; import java.time.format.DateTimeFormatter; import org.mapdb.Fun.Tuple2; import java.io.IOException; import java.io.UnsupportedEncodingException; -import java.util.List; public class JacksonSerializers { private static final BaseEncoding encoder = BaseEncoding.base64Url(); + public static class Tuple2Serializer extends StdScalarSerializer> { + private static final long serialVersionUID = 884752482339455539L; + public Tuple2Serializer () { super(Tuple2.class, true); } @Override public void serialize(Tuple2 t2, JsonGenerator jgen, - SerializerProvider arg2) throws IOException, - JsonProcessingException { + SerializerProvider arg2) throws IOException { jgen.writeString(serialize(t2)); } @@ -56,14 +47,15 @@ public static String serialize (Tuple2 t2) { } public static class Tuple2Deserializer extends StdScalarDeserializer> { + private static final long serialVersionUID = -9155687065800376769L; + public Tuple2Deserializer () { super(Tuple2.class); } @Override public Tuple2 deserialize(JsonParser jp, - DeserializationContext arg1) throws IOException, - JsonProcessingException { + DeserializationContext arg1) throws IOException { return deserialize(jp.readValueAs(String.class)); } @@ -73,19 +65,20 @@ public static Tuple2 deserialize (String serialized) throws IOEx throw new IOException("Unable to parse value"); } - return new Tuple2(new String(encoder.decode(val[0]), "UTF-8"), new String(encoder.decode(val[1]), "UTF-8")); + return new Tuple2<>(new String(encoder.decode(val[0]), "UTF-8"), new String(encoder.decode(val[1]), "UTF-8")); } } public static class Tuple2IntSerializer extends StdScalarSerializer> { + private static final long serialVersionUID = 3201085724165980819L; + public Tuple2IntSerializer () { super(Tuple2.class, true); } @Override public void serialize(Tuple2 t2, JsonGenerator jgen, - SerializerProvider arg2) throws IOException, - JsonProcessingException { + SerializerProvider arg2) throws IOException { jgen.writeString(serialize(t2)); } @@ -99,14 +92,14 @@ public static String serialize (Tuple2 t2) { } public static class Tuple2IntDeserializer extends StdScalarDeserializer> { + private static final long serialVersionUID = -6787630225359327452L; + public Tuple2IntDeserializer () { super(Tuple2.class); } @Override - public Tuple2 deserialize(JsonParser jp, - DeserializationContext arg1) throws IOException, - JsonProcessingException { + public Tuple2 deserialize(JsonParser jp, DeserializationContext arg1) throws IOException { return deserialize(jp.readValueAs(String.class)); } @@ -116,56 +109,76 @@ public static Tuple2 deserialize (String serialized) throws IOE throw new IOException("Unable to parse value"); } - return new Tuple2(new String(encoder.decode(val[0]), "UTF-8"), Integer.parseInt(val[1])); + return new Tuple2<>(new String(encoder.decode(val[0]), "UTF-8"), Integer.parseInt(val[1])); } } /** serialize local dates as noon GMT epoch times */ public static class LocalDateSerializer extends StdScalarSerializer { + private static final long serialVersionUID = 3153194744968260324L; + public LocalDateSerializer() { super(LocalDate.class, false); } @Override - public void serialize(LocalDate ld, JsonGenerator jgen, - SerializerProvider arg2) throws IOException, - JsonGenerationException { - long millis = ld.atStartOfDay(ZoneOffset.UTC).toInstant().toEpochMilli(); - jgen.writeNumber(millis); + public void serialize(LocalDate ld, JsonGenerator jgen, SerializerProvider arg2) throws IOException { + // YYYYMMDD + jgen.writeString(ld.format(DateTimeFormatter.BASIC_ISO_DATE)); } } /** deserialize local dates from GMT epochs */ public static class LocalDateDeserializer extends StdScalarDeserializer { + private static final long serialVersionUID = -1855560624079270379L; + public LocalDateDeserializer () { super(LocalDate.class); } @Override - public LocalDate deserialize(JsonParser jp, - DeserializationContext arg1) throws IOException, - JsonProcessingException { + public LocalDate deserialize(JsonParser jp, DeserializationContext arg1) throws IOException { + LocalDate date; + try { + date = LocalDate.parse(jp.getText(), DateTimeFormatter.BASIC_ISO_DATE); + return date; + } catch (Exception jsonException) { + // This is here to catch any loads of database dumps that happen to have the old java.util.Date + // field type in validationResult. God help us. + System.out.println("Error parsing date value, trying legacy java.util.Date date format"); + try { + date = Instant.ofEpochMilli(jp.getValueAsLong()).atZone(ZoneOffset.UTC).toLocalDate(); + return date; + } catch (Exception e) { + e.printStackTrace(); + } + } - LocalDate date = Instant.ofEpochMilli(jp.getValueAsLong()).atZone(ZoneOffset.UTC).toLocalDate(); - return date; +// System.out.println(jp.getValueAsLong()); +// System.out.println(date.format(DateTimeFormatter.BASIC_ISO_DATE)); + return null; } } /** serialize GtfsRouteType as GTFS integer value */ public static class GtfsRouteTypeSerializer extends StdScalarSerializer { + private static final long serialVersionUID = -8179814233698591433L; + public GtfsRouteTypeSerializer() { super(GtfsRouteType.class, false); } @Override - public void serialize(GtfsRouteType gtfsRouteType, JsonGenerator jgen, + public void serialize(GtfsRouteType gtfsRouteType, JsonGenerator jsonGenerator, SerializerProvider arg2) throws IOException { - jgen.writeNumber(gtfsRouteType.toGtfs()); + jsonGenerator.writeNumber(gtfsRouteType.toGtfs()); } } /** serialize GTFS integer value to GtfsRouteType */ public static class GtfsRouteTypeDeserializer extends StdScalarDeserializer { + private static final long serialVersionUID = 2771914080477037467L; + public GtfsRouteTypeDeserializer () { super(GtfsRouteType.class); } @@ -178,6 +191,8 @@ public GtfsRouteType deserialize(JsonParser jp, } public static class MyDtoNullKeySerializer extends StdSerializer { + private static final long serialVersionUID = -8104007875350340832L; + public MyDtoNullKeySerializer() { this(null); } @@ -188,57 +203,37 @@ public MyDtoNullKeySerializer(Class t) { @Override public void serialize(Object nullKey, JsonGenerator jsonGenerator, SerializerProvider unused) - throws IOException, JsonProcessingException { + throws IOException { jsonGenerator.writeFieldName(""); } } - /** serialize GtfsRouteType as GTFS integer value */ - public static class TripDirectionSerializer extends StdScalarSerializer { - public TripDirectionSerializer() { - super(TripDirection.class, false); - } - - @Override - public void serialize(TripDirection gtfsRouteType, JsonGenerator jgen, - SerializerProvider arg2) throws IOException { - jgen.writeNumber(gtfsRouteType.toGtfs()); - } - } - - /** serialize GTFS integer value to TripDirection */ - public static class TripDirectionDeserializer extends StdScalarDeserializer { - public TripDirectionDeserializer () { super(TripDirection.class); } - - @Override - public TripDirection deserialize(JsonParser jp, - DeserializationContext arg1) throws IOException { - return TripDirection.fromGtfs(jp.getValueAsInt()); - } - } - public static final DateTimeFormatter format = DateTimeFormatter.ofPattern("yyyy-MM-dd"); /** Serialize a local date to an ISO date (year-month-day) */ public static class LocalDateIsoSerializer extends StdScalarSerializer { + private static final long serialVersionUID = 6365116779135936730L; + public LocalDateIsoSerializer () { super(LocalDate.class, false); } @Override - public void serialize(LocalDate localDate, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) throws IOException, JsonGenerationException { + public void serialize(LocalDate localDate, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) throws IOException { jsonGenerator.writeString(localDate.format(format)); } } /** Deserialize an ISO date (year-month-day) */ public static class LocalDateIsoDeserializer extends StdScalarDeserializer { + private static final long serialVersionUID = -1703584495462802108L; + public LocalDateIsoDeserializer () { super(LocalDate.class); } @Override - public LocalDate deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException, JsonProcessingException { + public LocalDate deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException { return LocalDate.parse(jsonParser.getValueAsString(), format); } diff --git a/src/main/java/com/conveyal/datatools/editor/utils/S3Utils.java b/src/main/java/com/conveyal/datatools/editor/utils/S3Utils.java deleted file mode 100644 index ab8d3dc65..000000000 --- a/src/main/java/com/conveyal/datatools/editor/utils/S3Utils.java +++ /dev/null @@ -1,82 +0,0 @@ -package com.conveyal.datatools.editor.utils; - -import com.amazonaws.AmazonServiceException; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3Client; -import com.amazonaws.services.s3.model.CannedAccessControlList; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.conveyal.datatools.manager.DataManager; -import org.apache.commons.io.IOUtils; -import spark.Request; - -import javax.servlet.MultipartConfigElement; -import javax.servlet.ServletException; -import javax.servlet.http.Part; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; - -import static spark.Spark.halt; - -/** - * Created by landon on 8/2/16. - */ -public class S3Utils { - - public static String uploadBranding(Request req, String id) throws IOException, ServletException { - String url; - - String s3Bucket = DataManager.getConfigPropertyAsText("application.data.gtfs_s3_bucket"); - if (s3Bucket == null) { - halt(400); - } - - // Get file from request - if (req.raw().getAttribute("org.eclipse.jetty.multipartConfig") == null) { - MultipartConfigElement multipartConfigElement = new MultipartConfigElement(System.getProperty("java.io.tmpdir")); - req.raw().setAttribute("org.eclipse.jetty.multipartConfig", multipartConfigElement); - } - -// req.attribute("org.eclipse.jetty.multipartConfig", new MultipartConfigElement("/temp")); - - Part part = req.raw().getPart("file"); - String extension = "." + part.getContentType().split("/", 0)[1]; - File tempFile = File.createTempFile(id, extension); - tempFile.deleteOnExit(); - - InputStream inputStream; - try { - inputStream = part.getInputStream(); - FileOutputStream out = new FileOutputStream(tempFile); - IOUtils.copy(inputStream, out); - } catch (Exception e) { -// LOG.error("Unable to open input stream from upload"); - halt("Unable to read uploaded file"); - } - - try { -// LOG.info("Uploading route branding to S3"); - // Upload file to s3 - AWSCredentials creds; - - // default credentials providers, e.g. IAM role - creds = new DefaultAWSCredentialsProviderChain().getCredentials(); - - String keyName = "branding/" + id + extension; - url = "https://s3.amazonaws.com/" + s3Bucket + "/" + keyName; - AmazonS3 s3client = new AmazonS3Client(creds); - s3client.putObject(new PutObjectRequest( - s3Bucket, keyName, tempFile) - // grant public read - .withCannedAcl(CannedAccessControlList.PublicRead)); - return url; - } - catch (AmazonServiceException ase) { - halt("Error uploading feed to S3"); - return null; - } - } -} diff --git a/src/main/java/com/conveyal/datatools/manager/ConvertMain.java b/src/main/java/com/conveyal/datatools/manager/ConvertMain.java new file mode 100644 index 000000000..00df34272 --- /dev/null +++ b/src/main/java/com/conveyal/datatools/manager/ConvertMain.java @@ -0,0 +1,173 @@ +package com.conveyal.datatools.manager; + +import com.conveyal.datatools.common.status.MonitorableJob; +import com.conveyal.datatools.editor.datastore.GlobalTx; +import com.conveyal.datatools.editor.datastore.VersionedDataStore; +import com.conveyal.datatools.editor.jobs.ConvertEditorMapDBToSQL; +import com.conveyal.datatools.editor.models.Snapshot; +import com.conveyal.datatools.manager.controllers.DumpController; +import com.conveyal.datatools.manager.controllers.api.StatusController; +import com.conveyal.datatools.manager.models.FeedSource; +import com.conveyal.datatools.manager.persistence.Persistence; +import org.apache.commons.io.FileUtils; +import org.eclipse.jetty.util.ConcurrentHashSet; +import org.mapdb.Fun; + +import java.io.File; +import java.nio.charset.Charset; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import static com.conveyal.datatools.manager.DataManager.initializeApplication; +import static com.conveyal.datatools.manager.DataManager.registerRoutes; + +/** + * Main method to run the data migration process from the v2 MapDB based application to the v3 Mongo and SQL-based + * application. The program first seeds the MongoDB with data from a JSON dump of the manager MapDB database. It then + * loads/validates each feed version into the SQL database, and finally it migrates the Editor MapDB to SQL. The JSON + * dump file is provided as a program argument. The Editor MapDB directory is specified in the server.yml config file at + * "application.data.editor_mapdb". This is all run as MonitorableJobs executed through the application's thread pool + * executor. Once all jobs are queued, The application runs on a loop until there are no more active jobs in the jobs + * list. + * + * Run instructions: + * + * java -Xmx6G -cp datatools.jar com.conveyal.datatools.manager.ConvertMain /path/to/env.yml /path/to/server.yml /path/to/dump.json + * + * An optional fourth argument can be provided to force the application to reprocess (load/validate) feed versions that + * have already been processed. + * + * The primary method to run this migration is: + * 1. First run the above java command to migrate the JSON dump and convert the editor mapdb to new snapshots. + * 2. Next run the following java command to clean up the snapshots (the snapshots imported from the JSON dump are not + * updated during the editor MapDB conversion. Rather, MongoDB records are created separately, so the JSON-sourced + * duplicate records need to be deleted and the newly generate records updated with the JSON data): + * java -Xmx6G -cp datatools.jar com.conveyal.datatools.manager.ConvertMain /path/to/env.yml /path/to/server.yml updateSnapshotMetadata=true /path/to/dump.json + * + */ +public class ConvertMain { + // Feed ID constants for testing. + private static final String CORTLAND_FEED_ID = "c5bdff54-82fa-47ce-ad6e-3c6517563992"; + public static final String AMTRAK_FEED_ID = "be5b775b-6811-4522-bbf6-1a408e7cf3f8"; + public static void main(String[] args) throws Exception { + + // Migration code! + + // First, set up application. + initializeApplication(args); + // Register HTTP endpoints so that the status endpoint is available during migration. + registerRoutes(); + + long startTime = System.currentTimeMillis(); + + boolean snapshotsOnly = args.length > 2 && "snapshotsOnly=true".equals(args[2]); + boolean updateSnapshotMetadata = args.length > 2 && "updateSnapshotMetadata=true".equals(args[2]); + + // FIXME remove migrateSingleSnapshot (just for local testing) +// migrateSingleSnapshot(null); + if (updateSnapshotMetadata) { + String jsonString = FileUtils.readFileToString(new File(args[3]), Charset.defaultCharset()); + boolean result = DumpController.updateSnapshotMetadata(jsonString); + if (result) { + System.out.println("Snapshot metadata update successful!"); + } + // Done. + System.exit(0); + } else if (!snapshotsOnly) { + // STEP 1: Load in JSON dump into MongoDB (args 0 and 1 are the config files) + String jsonString = FileUtils.readFileToString(new File(args[2]), Charset.defaultCharset()); + // FIXME: Do we still need to map certain project fields? + DumpController.load(jsonString); + + // STEP 2: For each feed version, load GTFS in Postgres and validate. + boolean force = args.length > 3 && "true".equals(args[3]); + DumpController.validateAll(true, force, null); + } else { + System.out.println("Skipping JSON load and feed version load/validation due to snapshotsOnly flag"); + } + + // STEP 3: For each feed source in MongoDB, load all snapshots (and current editor buffer) into Postgres DB. + // STEP 3A: For each snapshot/editor DB, create a snapshot Mongo object for the feed source with the FeedLoadResult. + migrateEditorFeeds(); + System.out.println("Done queueing!!!!!!!!"); + int totalJobs = StatusController.getAllJobs().size(); + while (!StatusController.filterActiveJobs(StatusController.getAllJobs()).isEmpty()) { + // While there are still active jobs, continue waiting. + ConcurrentHashSet activeJobs = StatusController.filterActiveJobs(StatusController.getAllJobs()); + System.out.println(String.format("%d/%d jobs still active. Checking for completion again in 5 seconds...", activeJobs.size(), totalJobs)); +// System.out.println(String.join(", ", activeJobs.stream().map(job -> job.name).collect(Collectors.toList()))); + int jobsInExecutor = ((ThreadPoolExecutor) DataManager.heavyExecutor).getActiveCount(); + System.out.println(String.format("Jobs in thread pool executor: %d", jobsInExecutor)); + System.out.println(String.format("Jobs completed by executor: %d", ((ThreadPoolExecutor) DataManager.heavyExecutor).getCompletedTaskCount())); + Thread.sleep(5000); + } + long durationInMillis = System.currentTimeMillis() - startTime; + System.out.println(String.format("MIGRATION COMPLETED IN %d SECONDS.", TimeUnit.MILLISECONDS.toSeconds(durationInMillis))); + System.exit(0); + } + + public static boolean migrateEditorFeeds (String ...feedIdsToSkip) { + // Open the Editor MapDB and write a snapshot to the SQL database. + GlobalTx gtx = VersionedDataStore.getGlobalTx(); + try { + long startTime = System.currentTimeMillis(); + int count = 0; + int snapshotCount = gtx.snapshots.values().size(); + System.out.println(snapshotCount + " snapshots to convert"); + + Set feedSourcesEncountered = new HashSet<>(); + // Iterate over the provided snapshots and convert each one. Note: this will skip snapshots for feed IDs that + // don't exist as feed sources in MongoDB. + for (Map.Entry, Snapshot> entry : gtx.snapshots.entrySet()) { + Snapshot snapshot = entry.getValue(); + Fun.Tuple2 key = entry.getKey(); + String feedSourceId = key.a; + // Get feed source from MongoDB. + FeedSource feedSource = Persistence.feedSources.getById(feedSourceId); + if (feedSource != null) { + // Only migrate the feeds that have a feed source record in the MongoDB. + if (feedIdsToSkip != null && Arrays.asList(feedIdsToSkip).contains(feedSourceId)) { + // If list of feed IDs to skip is provided and the current feed ID matches, skip it. + System.out.println("Skipping feed. ID found in list to skip. id: " + feedSourceId); + continue; + } + if (!feedSourcesEncountered.contains(feedSource.id)) { + // If this is the first feed encountered, load the editor buffer. + ConvertEditorMapDBToSQL convertEditorBufferToSQL = new ConvertEditorMapDBToSQL(snapshot.id.a, null); + DataManager.heavyExecutor.execute(convertEditorBufferToSQL); + count++; + } + ConvertEditorMapDBToSQL convertEditorMapDBToSQL = new ConvertEditorMapDBToSQL(snapshot.id.a, snapshot.id.b); + DataManager.heavyExecutor.execute(convertEditorMapDBToSQL); + System.out.println(count + "/" + snapshotCount + " snapshot conversion queued"); + feedSourcesEncountered.add(feedSource.id); + count++; + } else { + System.out.println("Not converting snapshot. Feed source Id does not exist in application data" + feedSourceId); + } + } +// long duration = System.currentTimeMillis() - startTime; +// System.out.println("Converting " + snapshotCount + " snapshots took " + TimeUnit.MILLISECONDS.toMinutes(duration) + " minutes"); + return true; + } catch (Exception e) { + System.out.println("Migrating editor feeds FAILED"); + e.printStackTrace(); + return false; + } finally { + gtx.rollbackIfOpen(); + } + } + + public static boolean migrateSingleSnapshot (Fun.Tuple2 decodedId) { + if (decodedId == null) { + // Use Cortland if no feed provided + decodedId = new Fun.Tuple2<>(CORTLAND_FEED_ID, 12); + } + new ConvertEditorMapDBToSQL(decodedId.a, decodedId.b).run(); + return true; + } +} diff --git a/src/main/java/com/conveyal/datatools/manager/DataManager.java b/src/main/java/com/conveyal/datatools/manager/DataManager.java index 27032471c..49abc3178 100644 --- a/src/main/java/com/conveyal/datatools/manager/DataManager.java +++ b/src/main/java/com/conveyal/datatools/manager/DataManager.java @@ -1,135 +1,239 @@ package com.conveyal.datatools.manager; +import com.bugsnag.Bugsnag; +import com.conveyal.datatools.common.status.MonitorableJob; +import com.conveyal.datatools.common.utils.CorsFilter; +import com.conveyal.datatools.editor.controllers.EditorLockController; +import com.conveyal.datatools.editor.controllers.api.EditorControllerImpl; +import com.conveyal.datatools.editor.controllers.api.SnapshotController; import com.conveyal.datatools.manager.auth.Auth0Connection; - import com.conveyal.datatools.manager.controllers.DumpController; -import com.conveyal.datatools.manager.controllers.api.*; -import com.conveyal.datatools.editor.controllers.api.*; - +import com.conveyal.datatools.manager.controllers.api.DeploymentController; +import com.conveyal.datatools.manager.controllers.api.FeedSourceController; +import com.conveyal.datatools.manager.controllers.api.FeedVersionController; +import com.conveyal.datatools.manager.controllers.api.GtfsPlusController; +import com.conveyal.datatools.manager.controllers.api.NoteController; +import com.conveyal.datatools.manager.controllers.api.OrganizationController; +import com.conveyal.datatools.manager.controllers.api.ProjectController; +import com.conveyal.datatools.manager.controllers.api.AppInfoController; +import com.conveyal.datatools.manager.controllers.api.StatusController; +import com.conveyal.datatools.manager.controllers.api.UserController; import com.conveyal.datatools.manager.extensions.ExternalFeedResource; import com.conveyal.datatools.manager.extensions.mtc.MtcFeedResource; import com.conveyal.datatools.manager.extensions.transitfeeds.TransitFeedsFeedResource; import com.conveyal.datatools.manager.extensions.transitland.TransitLandFeedResource; - -import com.conveyal.datatools.common.status.MonitorableJob; +import com.conveyal.datatools.manager.jobs.FeedUpdater; import com.conveyal.datatools.manager.models.Project; import com.conveyal.datatools.manager.persistence.FeedStore; -import com.conveyal.datatools.manager.utils.CorsFilter; -import com.conveyal.gtfs.GTFSCache; +import com.conveyal.datatools.manager.persistence.Persistence; +import com.conveyal.gtfs.GTFS; +import com.conveyal.gtfs.GraphQLMain; +import com.conveyal.gtfs.loader.Table; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; import com.google.common.io.Resources; import org.apache.commons.io.Charsets; +import org.eclipse.jetty.util.ConcurrentHashSet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import spark.utils.IOUtils; +import javax.sql.DataSource; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.net.URL; -import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Map; -import java.util.Set; +import java.util.Properties; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executor; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; -import static spark.Spark.*; - +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; +import static com.conveyal.datatools.common.utils.SparkUtils.logRequest; +import static com.conveyal.datatools.common.utils.SparkUtils.logResponse; +import static spark.Spark.after; +import static spark.Spark.before; +import static spark.Spark.exception; +import static spark.Spark.get; +import static spark.Spark.port; + +/** + * This is the singleton where the application is initialized. It currently stores a number of static fields which are + * referenced throughout the application. + */ public class DataManager { + private static final Logger LOG = LoggerFactory.getLogger(DataManager.class); - public static final Logger LOG = LoggerFactory.getLogger(DataManager.class); - - public static JsonNode config; - public static JsonNode serverConfig; + // These fields hold YAML files that represent the server configuration. + private static JsonNode envConfig; + private static JsonNode serverConfig; + // These fields hold YAML files that represent the GTFS and GTFS+ specifications. public static JsonNode gtfsPlusConfig; public static JsonNode gtfsConfig; + // Contains the config-enabled ExternalFeedResource objects that define connections to third-party feed indexes + // (e.g., transit.land, TransitFeeds.com) + // TODO: define type for ExternalFeedResource Strings public static final Map feedResources = new HashMap<>(); - public static Map> userJobsMap = new HashMap<>(); + // Stores jobs underway by user ID. + public static Map> userJobsMap = new ConcurrentHashMap<>(); + // Stores ScheduledFuture objects that kick off runnable tasks (e.g., fetch project feeds at 2:00 AM). public static Map autoFetchMap = new HashMap<>(); + // Scheduled executor that handles running scheduled jobs. public final static ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1); + // ObjectMapper that loads in YAML config files private static final ObjectMapper yamlMapper = new ObjectMapper(new YAMLFactory()); - public static GTFSCache gtfsCache; + + + // Heavy executor should contain long-lived CPU-intensive tasks (e.g., feed loading/validation) + public static Executor heavyExecutor = Executors.newFixedThreadPool(4); + // light executor is for tasks for things that should finish quickly (e.g., email notifications) + public static Executor lightExecutor = Executors.newSingleThreadExecutor(); public static String feedBucket; public static String bucketFolder; -// public final AmazonS3Client s3Client; + public static String repoUrl; + public static String commit = ""; + public static boolean useS3; - public static final String apiPrefix = "/api/manager/"; + private static final String API_PREFIX = "/api/manager/"; + private static final String GTFS_API_PREFIX = API_PREFIX + "secure/gtfs/"; + private static final String EDITOR_API_PREFIX = "/api/editor/"; + public static final String publicPath = "(" + API_PREFIX + "|" + EDITOR_API_PREFIX + ")public/.*"; + private static final String DEFAULT_ENV = "configurations/default/env.yml"; + private static final String DEFAULT_CONFIG = "configurations/default/server.yml"; + public static DataSource GTFS_DATA_SOURCE; - public static final String DEFAULT_ENV = "configurations/default/env.yml"; - public static final String DEFAULT_CONFIG = "configurations/default/server.yml"; + public static void main(String[] args) throws IOException { - private static List apiFeedSources = new ArrayList<>(); + initializeApplication(args); - public static void main(String[] args) throws IOException { + // initialize map of auto fetched projects + for (Project project : Persistence.projects.getAll()) { + if (project.autoFetchFeeds) { + ScheduledFuture scheduledFuture = ProjectController.scheduleAutoFeedFetch(project, 1); + autoFetchMap.put(project.id, scheduledFuture); + } + } + + registerRoutes(); - // load config + registerExternalResources(); + } + + static void initializeApplication(String[] args) throws IOException { + // Load configuration files (env.yml and server.yml). loadConfig(args); + loadProperties(); + + String bugsnagKey = getConfigPropertyAsText("BUGSNAG_KEY"); + if (bugsnagKey != null) { + new Bugsnag(bugsnagKey); + } - // set port + // FIXME: hack to statically load FeedStore + LOG.info(FeedStore.class.getSimpleName()); + + // Optionally set port for server. Otherwise, Spark defaults to 4567. if (getConfigProperty("application.port") != null) { port(Integer.parseInt(getConfigPropertyAsText("application.port"))); } - useS3 = getConfigPropertyAsText("application.data.use_s3_storage").equals("true"); + useS3 = "true".equals(getConfigPropertyAsText("application.data.use_s3_storage")); - // initialize map of auto fetched projects - for (Project p : Project.getAll()) { - if (p.autoFetchFeeds != null && autoFetchMap.get(p.id) == null){ - if (p.autoFetchFeeds) { - ScheduledFuture scheduledFuture = ProjectController.scheduleAutoFeedFetch(p.id, p.autoFetchHour, p.autoFetchMinute, 1, p.defaultTimeZone); - autoFetchMap.put(p.id, scheduledFuture); - } - } - } + GTFS_DATA_SOURCE = GTFS.createDataSource( + getConfigPropertyAsText("GTFS_DATABASE_URL"), + getConfigPropertyAsText("GTFS_DATABASE_USER"), + getConfigPropertyAsText("GTFS_DATABASE_PASSWORD") + ); feedBucket = getConfigPropertyAsText("application.data.gtfs_s3_bucket"); bucketFolder = FeedStore.s3Prefix; - if (useS3) { - LOG.info("Initializing gtfs-api for bucket {}/{} and cache dir {}", feedBucket, bucketFolder, FeedStore.basePath); - gtfsCache = new GTFSCache(feedBucket, bucketFolder, FeedStore.basePath); + // Initialize MongoDB storage + Persistence.initialize(); + } + + /** + * Load some properties files to obtain information about this project. + * This method reads in two files: + * - src/main/resources/.properties + * - src/main/resources/git.properties + * + * The git.properties file is automatically generated by the commit-id-plugin. If working with an existing copy of + * the repo from an older commit, you may need to run `mvn package` to have the file get generated. + */ + private static void loadProperties() { + final Properties projectProperties = new Properties(); + InputStream projectPropertiesInputStream = + DataManager.class.getClassLoader().getResourceAsStream(".properties"); + try { + projectProperties.load(projectPropertiesInputStream); + repoUrl = projectProperties.getProperty("repo_url"); + } catch (IOException e) { + LOG.warn("could not read .properties file"); + e.printStackTrace(); } - else { - LOG.info("Initializing gtfs cache locally (no s3 bucket) {}", FeedStore.basePath); - gtfsCache = new GTFSCache(null, FeedStore.basePath); + + final Properties gitProperties = new Properties(); + try { + InputStream gitPropertiesInputStream = + DataManager.class.getClassLoader().getResourceAsStream("git.properties"); + gitProperties.load(gitPropertiesInputStream); + commit = gitProperties.getProperty("git.commit.id"); + } catch (Exception e) { + LOG.warn("could not read git.properties file"); + e.printStackTrace(); } + } + + /** + * Register API routes with Spark. This register core application routes, any routes associated with optional + * modules and sets other core routes (e.g., 404 response) and response headers (e.g., API content type is JSON). + */ + static void registerRoutes() throws IOException { CorsFilter.apply(); + // Initialize GTFS GraphQL API service + // FIXME: Add user permissions check to ensure user has access to feeds. + GraphQLMain.initialize(GTFS_DATA_SOURCE, GTFS_API_PREFIX); + // Register core API routes + AppInfoController.register(API_PREFIX); + ProjectController.register(API_PREFIX); + FeedSourceController.register(API_PREFIX); + FeedVersionController.register(API_PREFIX); + NoteController.register(API_PREFIX); + StatusController.register(API_PREFIX); + OrganizationController.register(API_PREFIX); + + // Register editor API routes + if (isModuleEnabled("editor")) { + + SnapshotController.register(EDITOR_API_PREFIX); + EditorLockController.register(EDITOR_API_PREFIX); - // core controllers - ProjectController.register(apiPrefix); - FeedSourceController.register(apiPrefix); - FeedVersionController.register(apiPrefix); - RegionController.register(apiPrefix); - NoteController.register(apiPrefix); - StatusController.register(apiPrefix); - OrganizationController.register(apiPrefix); - - // Editor routes - if ("true".equals(getConfigPropertyAsText("modules.editor.enabled"))) { String gtfs = IOUtils.toString(DataManager.class.getResourceAsStream("/gtfs/gtfs.yml")); gtfsConfig = yamlMapper.readTree(gtfs); - AgencyController.register(apiPrefix); - CalendarController.register(apiPrefix); - RouteController.register(apiPrefix); - RouteTypeController.register(apiPrefix); - ScheduleExceptionController.register(apiPrefix); - StopController.register(apiPrefix); - TripController.register(apiPrefix); - TripPatternController.register(apiPrefix); - SnapshotController.register(apiPrefix); - FeedInfoController.register(apiPrefix); - FareController.register(apiPrefix); + new EditorControllerImpl(EDITOR_API_PREFIX, Table.AGENCY, DataManager.GTFS_DATA_SOURCE); + new EditorControllerImpl(EDITOR_API_PREFIX, Table.CALENDAR, DataManager.GTFS_DATA_SOURCE); + new EditorControllerImpl(EDITOR_API_PREFIX, Table.FARE_ATTRIBUTES, DataManager.GTFS_DATA_SOURCE); + new EditorControllerImpl(EDITOR_API_PREFIX, Table.FEED_INFO, DataManager.GTFS_DATA_SOURCE); + new EditorControllerImpl(EDITOR_API_PREFIX, Table.ROUTES, DataManager.GTFS_DATA_SOURCE); + // NOTE: Patterns controller handles updates to nested tables shapes, pattern stops, and frequencies. + new EditorControllerImpl(EDITOR_API_PREFIX, Table.PATTERNS, DataManager.GTFS_DATA_SOURCE); + new EditorControllerImpl(EDITOR_API_PREFIX, Table.SCHEDULE_EXCEPTIONS, DataManager.GTFS_DATA_SOURCE); + new EditorControllerImpl(EDITOR_API_PREFIX, Table.STOPS, DataManager.GTFS_DATA_SOURCE); + new EditorControllerImpl(EDITOR_API_PREFIX, Table.TRIPS, DataManager.GTFS_DATA_SOURCE); + // TODO: Add transfers.txt controller? +// GisController.register(EDITOR_API_PREFIX); } // log all exceptions to system.out @@ -137,113 +241,136 @@ public static void main(String[] args) throws IOException { // module-specific controllers if (isModuleEnabled("deployment")) { - DeploymentController.register(apiPrefix); + DeploymentController.register(API_PREFIX); } if (isModuleEnabled("gtfsapi")) { - GtfsApiController.register(apiPrefix); + // Check that update interval (in seconds) and use_extension are specified and initialize feedUpdater. + if (hasConfigProperty("modules.gtfsapi.update_frequency") && hasConfigProperty("modules.gtfsapi.use_extension")) { + String extensionType = getConfigPropertyAsText("modules.gtfsapi.use_extension"); + String extensionFeedBucket = getExtensionPropertyAsText(extensionType, "s3_bucket"); + String extensionBucketFolder = getExtensionPropertyAsText(extensionType, "s3_download_prefix"); + int updateFrequency = getConfigProperty("modules.gtfsapi.update_frequency").asInt(); + if (feedBucket != null && extensionBucketFolder != null) FeedUpdater.schedule(updateFrequency, extensionFeedBucket, extensionBucketFolder); + else LOG.warn("FeedUpdater not initialized. S3 bucket and folder not provided."); + } } if (isModuleEnabled("gtfsplus")) { - GtfsPlusController.register(apiPrefix); + GtfsPlusController.register(API_PREFIX); URL gtfsplus = DataManager.class.getResource("/gtfs/gtfsplus.yml"); gtfsPlusConfig = yamlMapper.readTree(Resources.toString(gtfsplus, Charsets.UTF_8)); } if (isModuleEnabled("user_admin")) { - UserController.register(apiPrefix); + UserController.register(API_PREFIX); } if (isModuleEnabled("dump")) { DumpController.register("/"); } + before(EDITOR_API_PREFIX + "secure/*", ((request, response) -> { + Auth0Connection.checkUser(request); + Auth0Connection.checkEditPrivileges(request); + })); + - before(apiPrefix + "secure/*", (request, response) -> { + before(API_PREFIX + "secure/*", (request, response) -> { if(request.requestMethod().equals("OPTIONS")) return; Auth0Connection.checkUser(request); }); - // lazy load by feed source id if new one is requested -// if ("true".equals(getConfigPropertyAsText("modules.gtfsapi.load_on_fetch"))) { -// before(apiPrefix + "*", (request, response) -> { -// String feeds = request.queryParams("feed"); -// if (feeds != null) { -// String[] feedIds = feeds.split(","); -// for (String feedId : feedIds) { -// FeedSource fs = FeedSource.get(feedId); -// if (fs == null) { -// continue; -// } -// else if (!GtfsApiController.gtfsApi.registeredFeedSources.contains(fs.id) && !apiFeedSources.contains(fs.id)) { -// apiFeedSources.add(fs.id); -// -// LoadGtfsApiFeedJob loadJob = new LoadGtfsApiFeedJob(fs); -// new Thread(loadJob).start(); -// halt(202, "Initializing feed load..."); -// } -// else if (apiFeedSources.contains(fs.id) && !GtfsApiController.gtfsApi.registeredFeedSources.contains(fs.id)) { -// halt(202, "Loading feed, please try again later"); -// } -// } -// -// } -// }); -// } + // FIXME: Add auth check for gtfs-api. This is tricky because it requires extracting the namespace argument from + // the GraphQL query, which could be embedded in the query itself or in the variables JSON. We would then need + // to check against both the snapshots and feed versions collections for the feed source ID to use in the + // permissions check. +// before(GTFS_API_PREFIX + "*", (request, response) -> { +// Auth0Connection.checkGTFSPrivileges(request); +// }); + // return "application/json" for all API routes - after(apiPrefix + "*", (request, response) -> { + before(API_PREFIX + "*", (request, response) -> { + response.type("application/json"); + response.header("Content-Encoding", "gzip"); + }); + before(EDITOR_API_PREFIX + "*", (request, response) -> { response.type("application/json"); response.header("Content-Encoding", "gzip"); }); // load index.html - InputStream stream = DataManager.class.getResourceAsStream("/public/index.html"); - String index = IOUtils.toString(stream).replace("${S3BUCKET}", getConfigPropertyAsText("application.assets_bucket")); - stream.close(); + final String index = resourceToString("/public/index.html") + .replace("${S3BUCKET}", getConfigPropertyAsText("application.assets_bucket")); + final String auth0html = resourceToString("/public/auth0-silent-callback.html"); + + // auth0 silent callback + get("/api/auth0-silent-callback", (request, response) -> { + response.type("text/html"); + return auth0html; + }); - // return 404 for any api response that's not found - get(apiPrefix + "*", (request, response) -> { - halt(404); + ///////////////// Final API routes ///////////////////// + + // Return 404 for any API path that is not configured. + // IMPORTANT: Any API paths must be registered before this halt. + get("/api/" + "*", (request, response) -> { + haltWithMessage(request, 404, "No API route configured for this path."); return null; }); - -// // return assets as byte array -// get("/assets/*", (request, response) -> { -// try (InputStream stream = DataManager.class.getResourceAsStream("/public" + request.pathInfo())) { -// return IOUtils.toByteArray(stream); -// } catch (IOException e) { -// return null; -// } -// }); + // return index.html for any sub-directory get("/*", (request, response) -> { response.type("text/html"); return index; }); - registerExternalResources(); + + // add logger + before((request, response) -> { + logRequest(request, response); + }); + + // add logger + after((request, response) -> { + logResponse(request, response); + }); } + /** + * Convenience function to check existence of a config property (nested fields defined by dot notation + * "data.use_s3_storage") in either server.yml or env.yml. + */ public static boolean hasConfigProperty(String name) { // try the server config first, then the main config - boolean fromServerConfig = hasConfigProperty(serverConfig, name); - if(fromServerConfig) return fromServerConfig; + return hasConfigProperty(serverConfig, name) || hasConfigProperty(envConfig, name); + } - return hasConfigProperty(config, name); + private static String resourceToString (String resourceName) { + try (InputStream stream = DataManager.class.getResourceAsStream(resourceName)) { + return IOUtils.toString(stream); + } catch (IOException e) { + e.printStackTrace(); + throw new RuntimeException("Could not find resource."); + } } - public static boolean hasConfigProperty(JsonNode config, String name) { + private static boolean hasConfigProperty(JsonNode config, String name) { String parts[] = name.split("\\."); JsonNode node = config; - for(int i = 0; i < parts.length; i++) { + for (int i = 0; i < parts.length; i++) { if(node == null) return false; node = node.get(parts[i]); } return node != null; } + /** + * Convenience function to get a config property (nested fields defined by dot notation "data.use_s3_storage") as + * JsonNode. Checks server.yml, then env.yml, and finally returns null if property is not found. + */ public static JsonNode getConfigProperty(String name) { // try the server config first, then the main config JsonNode fromServerConfig = getConfigProperty(serverConfig, name); if(fromServerConfig != null) return fromServerConfig; - return getConfigProperty(config, name); + return getConfigProperty(envConfig, name); } - public static JsonNode getConfigProperty(JsonNode config, String name) { + private static JsonNode getConfigProperty(JsonNode config, String name) { String parts[] = name.split("\\."); JsonNode node = config; for(int i = 0; i < parts.length; i++) { @@ -256,6 +383,9 @@ public static JsonNode getConfigProperty(JsonNode config, String name) { return node; } + /** + * Get a config property (nested fields defined by dot notation "data.use_s3_storage") as text. + */ public static String getConfigPropertyAsText(String name) { JsonNode node = getConfigProperty(name); if (node != null) { @@ -265,15 +395,29 @@ public static String getConfigPropertyAsText(String name) { return null; } } + public static String getExtensionPropertyAsText (String extensionType, String name) { + return getConfigPropertyAsText(String.join(".", "extensions", extensionType.toLowerCase(), name)); + } + /** + * Checks if an application module (e.g., editor, GTFS+) has been enabled. The UI must also have the module + * enabled in order to use. + */ public static boolean isModuleEnabled(String moduleName) { - return "true".equals(getConfigPropertyAsText("modules." + moduleName + ".enabled")); + return hasConfigProperty("modules." + moduleName) && "true".equals(getConfigPropertyAsText("modules." + moduleName + ".enabled")); } + /** + * Checks if an extension has been enabled. Extensions primarily define external resources + * the application can sync with. The UI config must also have the extension enabled in order to use. + */ public static boolean isExtensionEnabled(String extensionName) { - return "true".equals(getConfigPropertyAsText("extensions." + extensionName + ".enabled")); + return hasConfigProperty("extensions." + extensionName) && "true".equals(getExtensionPropertyAsText(extensionName, "enabled")); } + /** + * Check if extension is enabled and, if so, register it. + */ private static void registerExternalResources() { if (isExtensionEnabled("mtc")) { @@ -291,26 +435,36 @@ private static void registerExternalResources() { registerExternalResource(new TransitFeedsFeedResource()); } } - private static void loadConfig (String[] args) throws IOException { - FileInputStream configStream; + + /** + * Load config files from either program arguments or (if no args specified) from + * default configuration file locations. Config fields are retrieved with getConfigProperty. + */ + private static void loadConfig(String[] args) throws IOException { + FileInputStream envConfigStream; FileInputStream serverConfigStream; if (args.length == 0) { LOG.warn("Using default env.yml: {}", DEFAULT_ENV); LOG.warn("Using default server.yml: {}", DEFAULT_CONFIG); - configStream = new FileInputStream(new File(DEFAULT_ENV)); + envConfigStream = new FileInputStream(new File(DEFAULT_ENV)); serverConfigStream = new FileInputStream(new File(DEFAULT_CONFIG)); } else { LOG.info("Loading env.yml: {}", args[0]); LOG.info("Loading server.yml: {}", args[1]); - configStream = new FileInputStream(new File(args[0])); + envConfigStream = new FileInputStream(new File(args[0])); serverConfigStream = new FileInputStream(new File(args[1])); } - config = yamlMapper.readTree(configStream); + envConfig = yamlMapper.readTree(envConfigStream); serverConfig = yamlMapper.readTree(serverConfigStream); } + + /** + * Register external feed resource (e.g., transit.land) with feedResources map. + * This essentially "enables" the syncing and storing feeds from the external resource. + */ private static void registerExternalResource(ExternalFeedResource resource) { feedResources.put(resource.getResourceType(), resource); } diff --git a/src/main/java/com/conveyal/datatools/manager/UpdateSQLFeedsMain.java b/src/main/java/com/conveyal/datatools/manager/UpdateSQLFeedsMain.java new file mode 100644 index 000000000..ac053f67d --- /dev/null +++ b/src/main/java/com/conveyal/datatools/manager/UpdateSQLFeedsMain.java @@ -0,0 +1,106 @@ +package com.conveyal.datatools.manager; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + +import static com.conveyal.datatools.manager.DataManager.initializeApplication; +import static com.conveyal.datatools.manager.DataManager.registerRoutes; + +/** + * Main method that performs batch SQL updates on optionally filtered set of namespaces over the GTFS SQL database + * connection specified in the configuration files (env.yml). + * + * For example, this script can add a column for all `routes` tables for schemas that do not have a null value for + * the filename field in the feeds table. In effect, this would alter only those feeds that are editor snapshots. + * + * Argument descriptions: + * 1. path to env.yml + * 2. path to server.yml + * 3. update sql statement to apply to optionally filtered feeds + * 4. field to filter feeds on + * 5. value (corresponding to field in arg 3) to filter feeds on (omit to use NULL as value) + * + * Sample arguments: + * + * "/path/to/config/env.yml" "/path/to/config/server.yml" "alter table %s.routes add column some_column_name int" filename + * + * "/path/to/config/env.yml" "/path/to/config/server.yml" "alter table %s.routes add column some_column_name int" filename /tmp/gtfs.zip + */ +public class UpdateSQLFeedsMain { + + public static void main(String[] args) throws IOException, SQLException { + // First, set up application. + initializeApplication(args); + // Register HTTP endpoints so that the status endpoint is available during migration. + registerRoutes(); + // Load args (first and second args are used for config files). + // Update SQL string should be contained within third argument with %s specifier where namespace should be + // substituted. + String updateSql = args[2]; + // The next arguments will apply a where clause to conditionally to apply the updates. + String field = args.length > 3 ? args[3] : null; + String value = args.length > 4 ? args[4] : null; + List failedNamespace = updateFeedsWhere(updateSql, field, value); + System.out.println("Finished!"); + System.out.println("Failed namespaces: " + String.join(", ", failedNamespace)); + System.exit(0); + } + + /** + * + * @param updateSql + * @param field + * @param value + * @return + * @throws SQLException + */ + private static List updateFeedsWhere(String updateSql, String field, String value)throws SQLException { + if (updateSql == null) throw new RuntimeException("Update SQL must not be null!"); + // Keep track of failed namespaces for convenient printing at end of method. + List failedNamespace = new ArrayList<>(); + // Select feeds migrated from MapDB + String selectFeedsSql = "select namespace from feeds"; + if (field != null) { + // Add where clause if field is not null + // NOTE: if value is null, where clause will be executed accordingly (i.e., WHERE field = null) + String operator = value == null ? "IS NULL" : "= ?"; + selectFeedsSql = String.format("%s where %s %s", selectFeedsSql, field, operator); + } + Connection connection = DataManager.GTFS_DATA_SOURCE.getConnection(); + // Set auto-commit to true. + connection.setAutoCommit(true); + PreparedStatement selectStatement = connection.prepareStatement(selectFeedsSql); + // Set filter value if not null (otherwise, IS NULL has already been populated). + if (value != null) { + selectStatement.setString(1, value); + } + System.out.println(selectStatement.toString()); + ResultSet resultSet = selectStatement.executeQuery(); + int successCount = 0; + while (resultSet.next()) { + String namespace = resultSet.getString(1); + String updateLocationSql = String.format(updateSql, namespace); + Statement statement = connection.createStatement(); + try { + int updated = statement.executeUpdate(updateLocationSql); + System.out.println(updateLocationSql); + System.out.println(String.format("Updated rows: %d", updated)); + successCount++; + } catch (SQLException e) { + // The stops table likely did not exist for the schema. + e.printStackTrace(); + failedNamespace.add(namespace); + } + } + System.out.println(String.format("Updated %d tables.", successCount)); + // No need to commit the transaction because of auto-commit + connection.close(); + return failedNamespace; + } +} diff --git a/src/main/java/com/conveyal/datatools/manager/auth/Auth0Connection.java b/src/main/java/com/conveyal/datatools/manager/auth/Auth0Connection.java index 347f82b24..d8c1f4ab4 100644 --- a/src/main/java/com/conveyal/datatools/manager/auth/Auth0Connection.java +++ b/src/main/java/com/conveyal/datatools/manager/auth/Auth0Connection.java @@ -1,87 +1,195 @@ package com.conveyal.datatools.manager.auth; +import com.auth0.jwt.JWTVerifier; +import com.auth0.jwt.JWTVerifyException; +import com.auth0.jwt.pem.PemReader; import com.conveyal.datatools.manager.DataManager; +import com.conveyal.datatools.manager.models.FeedSource; +import com.conveyal.datatools.manager.persistence.Persistence; import com.fasterxml.jackson.databind.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import spark.Request; -import javax.net.ssl.HttpsURLConnection; -import java.io.BufferedReader; -import java.io.DataOutputStream; -import java.io.InputStreamReader; -import java.net.URL; +import java.io.IOException; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.security.NoSuchProviderException; +import java.security.PublicKey; +import java.security.SignatureException; +import java.security.spec.InvalidKeySpecException; +import java.util.Map; -import static spark.Spark.halt; +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; +import static com.conveyal.datatools.manager.DataManager.getConfigPropertyAsText; +import static com.conveyal.datatools.manager.DataManager.hasConfigProperty; /** + * This handles verifying the Auth0 token passed in the Auth header of Spark HTTP requests. + * * Created by demory on 3/22/16. */ public class Auth0Connection { + public static final String APP_METADATA = "app_metadata"; + public static final String USER_METADATA = "user_metadata"; + public static final String SCOPE = "http://datatools"; + public static final String SCOPED_APP_METADATA = String.join("/", SCOPE, APP_METADATA); + public static final String SCOPED_USER_METADATA = String.join("/", SCOPE, USER_METADATA); + private static final ObjectMapper MAPPER = new ObjectMapper(); private static final Logger LOG = LoggerFactory.getLogger(Auth0Connection.class); - public static void checkUser(Request req) { - String token = getToken(req); + private static JWTVerifier verifier; - if(token == null) { - halt(401, "Could not find authorization token"); + /** + * Check the incoming API request for the user token (and verify it) and assign as the "user" attribute on the + * incoming request object for use in downstream controllers. + * @param req Spark request object + */ + public static void checkUser(Request req) { + if (authDisabled()) { + // If in a development environment, assign a mock profile to request attribute and skip authentication. + req.attribute("user", new Auth0UserProfile("mock@example.com", "user_id:string")); + return; } - + // Check that auth header is present and formatted correctly (Authorization: Bearer [token]). + final String authHeader = req.headers("Authorization"); + if (authHeader == null) { + haltWithMessage(req, 401, "Authorization header is missing."); + } + String[] parts = authHeader.split(" "); + if (parts.length != 2 || !"bearer".equals(parts[0].toLowerCase())) { + haltWithMessage(req, 401, String.format("Authorization header is malformed: %s", authHeader)); + } + // Retrieve token from auth header. + String token = parts[1]; + if (token == null) { + haltWithMessage(req, 401, "Could not find authorization token"); + } + // Handle getting the verifier outside of the below verification try/catch, which is intended to catch issues + // with the client request. (getVerifier has its own exception/halt handling). + verifier = getVerifier(req); + // Validate the JWT and cast into the user profile, which will be attached as an attribute on the request object + // for downstream controllers to check permissions. try { - Auth0UserProfile profile = getUserProfile(token); + Map jwt = verifier.verify(token); + remapTokenValues(jwt); + Auth0UserProfile profile = MAPPER.convertValue(jwt, Auth0UserProfile.class); + // The user attribute is used on the server side to check user permissions and does not have all of the + // fields that the raw Auth0 profile string does. req.attribute("user", profile); - } - catch(Exception e) { -// e.printStackTrace(); - LOG.warn("Could not verify user", e); - halt(401, "Could not verify user"); + } catch (Exception e) { + LOG.warn("Login failed to verify with our authorization provider.", e); + haltWithMessage(req, 401, "Could not verify user's token"); } } - public static String getToken(Request req) { - String token = null; - - final String authorizationHeader = req.headers("Authorization"); - if (authorizationHeader == null) return null; - - // check format (Authorization: Bearer [token]) - String[] parts = authorizationHeader.split(" "); - if (parts.length != 2) return null; - - String scheme = parts[0]; - String credentials = parts[1]; - - if (scheme.equals("Bearer")) token = credentials; - return token; + /** + * Choose the correct JWT verification algorithm (based on the values present in env.yml config) and get the + * respective verifier. + */ + private static JWTVerifier getVerifier(Request req) { + if (verifier == null) { + try { + if (hasConfigProperty("AUTH0_SECRET")) { + // Use HS256 algorithm to verify token (uses client secret). + byte[] decodedSecret = new org.apache.commons.codec.binary.Base64().decode(getConfigPropertyAsText("AUTH0_SECRET")); + verifier = new JWTVerifier(decodedSecret); + } else if (hasConfigProperty("AUTH0_PUBLIC_KEY")) { + // Use RS256 algorithm to verify token (uses public key/.pem file). + PublicKey publicKey = PemReader.readPublicKey(getConfigPropertyAsText("AUTH0_PUBLIC_KEY")); + verifier = new JWTVerifier(publicKey); + } else throw new IllegalStateException("Auth0 public key or secret token must be defined in config (env.yml)."); + } catch (IllegalStateException | NullPointerException | NoSuchAlgorithmException | IOException | NoSuchProviderException | InvalidKeySpecException e) { + LOG.error("Auth0 verifier configured incorrectly."); + e.printStackTrace(); + haltWithMessage(req, 500, "Server authentication configured incorrectly.", e); + } + } + return verifier; } - public static Auth0UserProfile getUserProfile(String token) throws Exception { - - URL url = new URL("https://" + DataManager.getConfigPropertyAsText("AUTH0_DOMAIN") + "/tokeninfo"); - HttpsURLConnection con = (HttpsURLConnection) url.openConnection(); - - //add request header - con.setRequestMethod("POST"); + /** + * Handle mapping token values to the expected keys. This accounts for app_metadata and user_metadata that have been + * scoped to conform with OIDC (i.e., how newer Auth0 accounts structure the user profile) as well as the user_id -> + * sub mapping. + */ + private static void remapTokenValues(Map jwt) { + // If token did not contain app_metadata or user_metadata, add the scoped values to the decoded token object. + if (!jwt.containsKey(APP_METADATA) && jwt.containsKey(SCOPED_APP_METADATA)) { + jwt.put(APP_METADATA, jwt.get(SCOPED_APP_METADATA)); + } + if (!jwt.containsKey(USER_METADATA) && jwt.containsKey(SCOPED_USER_METADATA)) { + jwt.put(USER_METADATA, jwt.get(SCOPED_USER_METADATA)); + } + // Do the same for user_id -> sub + if (!jwt.containsKey("user_id") && jwt.containsKey("sub")) { + jwt.put("user_id", jwt.get("sub")); + } + // Remove scoped metadata objects to clean up user profile object. + jwt.remove(SCOPED_APP_METADATA); + jwt.remove(SCOPED_USER_METADATA); + } - String urlParameters = "id_token=" + token; + /** + * Check that the user has edit privileges for the feed ID specified. NOTE: the feed ID provided in the request will + * represent a feed source, not a specific SQL namespace that corresponds to a feed version or specific set of GTFS + * tables in the database. + */ + public static void checkEditPrivileges(Request request) { + if (authDisabled()) { + // If in a development environment, skip privileges check. + return; + } + Auth0UserProfile userProfile = request.attribute("user"); + String feedId = request.queryParams("feedId"); + if (feedId == null) { + // Some editor requests (e.g., update snapshot) specify the feedId as a parameters in the request (not a + // query parameter). + String[] parts = request.pathInfo().split("/"); + feedId = parts[parts.length - 1]; + } + FeedSource feedSource = feedId != null ? Persistence.feedSources.getById(feedId) : null; + if (feedSource == null) { + LOG.warn("feedId {} not found", feedId); + haltWithMessage(request, 400, "Must provide valid feedId parameter"); + } - // Send post request - con.setDoOutput(true); - DataOutputStream wr = new DataOutputStream(con.getOutputStream()); - wr.writeBytes(urlParameters); - wr.flush(); - wr.close(); + if (!request.requestMethod().equals("GET")) { + if (!userProfile.canEditGTFS(feedSource.organizationId(), feedSource.projectId, feedSource.id)) { + LOG.warn("User {} cannot edit GTFS for {}", userProfile.email, feedId); + haltWithMessage(request, 403, "User does not have permission to edit GTFS for feedId"); + } + } + } - BufferedReader in = new BufferedReader(new InputStreamReader(con.getInputStream())); - String inputLine; - StringBuffer response = new StringBuffer(); + /** + * Check whether authentication has been disabled via the DISABLE_AUTH config variable. + */ + public static boolean authDisabled() { + return DataManager.hasConfigProperty("DISABLE_AUTH") && "true".equals(getConfigPropertyAsText("DISABLE_AUTH")); + } - while ((inputLine = in.readLine()) != null) { - response.append(inputLine); + /** + * TODO: Check that user has access to query namespace provided in GraphQL query (see https://github.com/catalogueglobal/datatools-server/issues/94). + */ + public static void checkGTFSPrivileges(Request request) { + Auth0UserProfile userProfile = request.attribute("user"); + String feedId = request.queryParams("feedId"); + if (feedId == null) { + String[] parts = request.pathInfo().split("/"); + feedId = parts[parts.length - 1]; + } + FeedSource feedSource = feedId != null ? Persistence.feedSources.getById(feedId) : null; + if (feedSource == null) { + LOG.warn("feedId {} not found", feedId); + haltWithMessage(request, 400, "Must provide valid feedId parameter"); } - in.close(); - ObjectMapper m = new ObjectMapper(); - return m.readValue(response.toString(), Auth0UserProfile.class); + if (!request.requestMethod().equals("GET")) { + if (!userProfile.canEditGTFS(feedSource.organizationId(), feedSource.projectId, feedSource.id)) { + LOG.warn("User {} cannot edit GTFS for {}", userProfile.email, feedId); + haltWithMessage(request, 403, "User does not have permission to edit GTFS for feedId"); + } + } } } diff --git a/src/main/java/com/conveyal/datatools/manager/auth/Auth0UserProfile.java b/src/main/java/com/conveyal/datatools/manager/auth/Auth0UserProfile.java index a6511c256..f6e21918e 100644 --- a/src/main/java/com/conveyal/datatools/manager/auth/Auth0UserProfile.java +++ b/src/main/java/com/conveyal/datatools/manager/auth/Auth0UserProfile.java @@ -1,18 +1,12 @@ package com.conveyal.datatools.manager.auth; import com.conveyal.datatools.manager.DataManager; -import com.fasterxml.jackson.annotation.JsonCreator; +import com.conveyal.datatools.manager.persistence.Persistence; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.databind.ObjectMapper; -import java.net.URL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Date; import java.util.List; -import java.util.Map; /** * Created by demory on 1/18/16. @@ -24,10 +18,19 @@ public class Auth0UserProfile { String user_id; AppMetadata app_metadata; - public Auth0UserProfile() { + public Auth0UserProfile() {} + + /** + * Constructor for creating a mock user (app admin) for testing environment. + * @param email + * @param user_id + */ + public Auth0UserProfile(String email, String user_id) { + setEmail(email); + setUser_id(user_id); + setApp_metadata(new AppMetadata()); } - public String getUser_id() { return user_id; } @@ -60,15 +63,15 @@ public void setDatatoolsInfo(DatatoolsInfo datatoolsInfo) { @JsonIgnoreProperties(ignoreUnknown = true) public static class AppMetadata { - ObjectMapper mapper = new ObjectMapper(); @JsonProperty("datatools") List datatools; - public AppMetadata() { - } + public AppMetadata() {} @JsonIgnore public void setDatatoolsInfo(DatatoolsInfo datatools) { + if (Auth0Connection.authDisabled()) return; + for(int i = 0; i < this.datatools.size(); i++) { if (this.datatools.get(i).clientId.equals(DataManager.getConfigPropertyAsText("AUTH0_CLIENT_ID"))) { this.datatools.set(i, datatools); @@ -77,6 +80,8 @@ public void setDatatoolsInfo(DatatoolsInfo datatools) { } @JsonIgnore public DatatoolsInfo getDatatoolsInfo() { + if (Auth0Connection.authDisabled()) return null; + for(int i = 0; i < this.datatools.size(); i++) { DatatoolsInfo dt = this.datatools.get(i); if (dt.clientId.equals(DataManager.getConfigPropertyAsText("AUTH0_CLIENT_ID"))) { @@ -95,8 +100,7 @@ public static class DatatoolsInfo { Permission[] permissions; Subscription[] subscriptions; - public DatatoolsInfo() { - } + public DatatoolsInfo() {} public DatatoolsInfo(String clientId, Project[] projects, Permission[] permissions, Organization[] organizations, Subscription[] subscriptions) { this.clientId = clientId; @@ -135,8 +139,7 @@ public static class Project { Permission[] permissions; String[] defaultFeeds; - public Project() { - } + public Project() {} public Project(String project_id, Permission[] permissions, String[] defaultFeeds) { this.project_id = project_id; @@ -161,8 +164,7 @@ public static class Permission { String type; String[] feeds; - public Permission() { - } + public Permission() {} public Permission(String type, String[] feeds) { this.type = type; @@ -208,8 +210,7 @@ public static class Subscription { String type; String[] target; - public Subscription() { - } + public Subscription() {} public Subscription(String type, String[] target) { this.type = type; @@ -244,6 +245,9 @@ public boolean hasProject(String projectID, String organizationId) { } public boolean canAdministerApplication() { + // NOTE: user can administer application by default if running without authentication + if (Auth0Connection.authDisabled()) return true; + if(app_metadata.getDatatoolsInfo() != null && app_metadata.getDatatoolsInfo().permissions != null) { for(Permission permission : app_metadata.getDatatoolsInfo().permissions) { if(permission.type.equals("administer-application")) { @@ -255,8 +259,9 @@ public boolean canAdministerApplication() { } public boolean canAdministerOrganization() { - if(app_metadata.getDatatoolsInfo() != null && app_metadata.getDatatoolsInfo().organizations != null) { - Organization org = app_metadata.getDatatoolsInfo().organizations[0]; + if (canAdministerApplication()) return true; + Organization org = getAuth0Organization(); + if(app_metadata.getDatatoolsInfo() != null && org != null) { for(Permission permission : org.permissions) { if(permission.type.equals("administer-organization")) { return true; @@ -369,17 +374,19 @@ public boolean checkFeedPermission(Project project, String feedID, String permis // check for permission-specific feeds for (Permission permission : project.permissions) { if(permission.type.equals(permissionType)) { + // if specific feeds apply to permission (rather than default set), reassign feeds list if(permission.feeds != null) { feeds = permission.feeds; } + // if permission is found in project, check that it applies to the feed requested + for(String thisFeedID : feeds) { + if (thisFeedID.equals(feedID) || thisFeedID.equals("*")) { + return true; + } + } } } - - for(String thisFeedID : feeds) { - if (thisFeedID.equals(feedID) || thisFeedID.equals("*")) { - return true; - } - } + // if no permissionType + feedID combo was found return false; } @@ -387,7 +394,7 @@ public boolean checkFeedPermission(Project project, String feedID, String permis public com.conveyal.datatools.manager.models.Organization getOrganization () { Organization[] orgs = getApp_metadata().getDatatoolsInfo().organizations; if (orgs != null && orgs.length != 0) { - return orgs[0] != null ? com.conveyal.datatools.manager.models.Organization.get(orgs[0].organizationId) : null; + return orgs[0] != null ? Persistence.organizations.getById(orgs[0].organizationId) : null; } return null; } diff --git a/src/main/java/com/conveyal/datatools/manager/auth/Auth0Users.java b/src/main/java/com/conveyal/datatools/manager/auth/Auth0Users.java index e6b35dc8b..e30acb74f 100644 --- a/src/main/java/com/conveyal/datatools/manager/auth/Auth0Users.java +++ b/src/main/java/com/conveyal/datatools/manager/auth/Auth0Users.java @@ -22,17 +22,25 @@ import java.util.HashSet; /** - * Created by landon on 4/26/16. + * This class contains methods for querying Auth0 users using the Auth0 User Management API. Auth0 docs describing the + * searchable fields and query syntax are here: https://auth0.com/docs/api/management/v2/user-search */ public class Auth0Users { - private static String AUTH0_DOMAIN = DataManager.getConfigPropertyAsText("AUTH0_DOMAIN"); - private static String AUTH0_API_TOKEN = DataManager.getConfigPropertyAsText("AUTH0_TOKEN"); - private static ObjectMapper mapper = new ObjectMapper(); + private static final String AUTH0_DOMAIN = DataManager.getConfigPropertyAsText("AUTH0_DOMAIN"); + private static final String AUTH0_API_TOKEN = DataManager.getConfigPropertyAsText("AUTH0_TOKEN"); + private static final String clientId = DataManager.getConfigPropertyAsText("AUTH0_CLIENT_ID"); + private static final ObjectMapper mapper = new ObjectMapper(); private static final Logger LOG = LoggerFactory.getLogger(Auth0Users.class); + /** + * Constructs a user search query URL. + * @param searchQuery search query to perform (null value implies default query) + * @param page which page of users to return + * @param perPage number of users to return per page + * @param includeTotals whether to include the total number of users in search results + * @return URI to perform the search query + */ private static URI getUrl(String searchQuery, int page, int perPage, boolean includeTotals) { - String clientId = DataManager.getConfigPropertyAsText("AUTH0_CLIENT_ID"); - // always filter users by datatools client_id String defaultQuery = "app_metadata.datatools.client_id:" + clientId; URIBuilder builder = new URIBuilder(); @@ -63,6 +71,9 @@ private static URI getUrl(String searchQuery, int page, int perPage, boolean inc return uri; } + /** + * Perform user search query, returning results as a JSON string. + */ private static String doRequest(URI uri) { LOG.info("Auth0 getUsers URL=" + uri); String charset = "UTF-8"; @@ -91,12 +102,26 @@ private static String doRequest(URI uri) { return result; } + /** + * Wrapper method for performing user search with default per page count. + * @return JSON string of users matching search query + */ public static String getAuth0Users(String searchQuery, int page) { URI uri = getUrl(searchQuery, page, 10, false); return doRequest(uri); } + /** + * Wrapper method for performing user search with default per page count and page number = 0. + */ + public static String getAuth0Users(String queryString) { + return getAuth0Users(queryString, 0); + } + + /** + * Get all users for this application (using the default search). + */ public static Collection getAll () { Collection users = new HashSet<>(); @@ -111,6 +136,9 @@ public static Collection getAll () { return users; } + /** + * Get a single Auth0 user for the specified ID. + */ public static Auth0UserProfile getUserById(String id) { URIBuilder builder = new URIBuilder(); @@ -133,23 +161,28 @@ public static Auth0UserProfile getUserById(String id) { return user; } + /** + * Get users subscribed to a given target ID. + */ public static String getUsersBySubscription(String subscriptionType, String target) { return getAuth0Users("app_metadata.datatools.subscriptions.type:" + subscriptionType + " AND app_metadata.datatools.subscriptions.target:" + target); } + /** + * Get users belong to a specified organization. + */ public static String getUsersForOrganization(String organizationId) { return getAuth0Users("app_metadata.datatools.organizations.organization_id:" + organizationId); } - public static String getAuth0Users(String queryString) { - return getAuth0Users(queryString, 0); - } - - public static JsonNode getAuth0UserCount(String searchQuery) throws IOException { + /** + * Get number of users for the application. + */ + public static int getAuth0UserCount(String searchQuery) throws IOException { URI uri = getUrl(searchQuery, 0, 1, true); String result = doRequest(uri); JsonNode jsonNode = new ObjectMapper().readTree(result); - return jsonNode.get("total"); + return jsonNode.get("total").asInt(); } } diff --git a/src/main/java/com/conveyal/datatools/manager/codec/IntArrayCodec.java b/src/main/java/com/conveyal/datatools/manager/codec/IntArrayCodec.java new file mode 100644 index 000000000..342c26e51 --- /dev/null +++ b/src/main/java/com/conveyal/datatools/manager/codec/IntArrayCodec.java @@ -0,0 +1,40 @@ +package com.conveyal.datatools.manager.codec; + +import gnu.trove.list.TIntList; +import gnu.trove.list.array.TIntArrayList; +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; + +public class IntArrayCodec implements Codec { + + @Override + public void encode(final BsonWriter writer, final int[] value, final EncoderContext encoderContext) { + writer.writeStartArray(); + for (int v : value) { + writer.writeInt32(v); + } + writer.writeEndArray(); + } + + @Override + public int[] decode(final BsonReader reader, final DecoderContext decoderContext) { + reader.readStartArray(); + + TIntList intList = new TIntArrayList(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + intList.add(reader.readInt32()); + } + + reader.readEndArray(); + return intList.toArray(); + } + + @Override + public Class getEncoderClass() { + return int[].class; + } +} diff --git a/src/main/java/com/conveyal/datatools/manager/codec/LocalDateCodec.java b/src/main/java/com/conveyal/datatools/manager/codec/LocalDateCodec.java new file mode 100644 index 000000000..f3969f633 --- /dev/null +++ b/src/main/java/com/conveyal/datatools/manager/codec/LocalDateCodec.java @@ -0,0 +1,50 @@ +package com.conveyal.datatools.manager.codec; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Instant; +import java.time.LocalDate; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; + +/** + * Created by landon on 9/6/17. + */ +public class LocalDateCodec implements Codec { + private static final Logger LOG = LoggerFactory.getLogger(LocalDateCodec.class); + @Override + public void encode(final BsonWriter writer, final LocalDate value, final EncoderContext encoderContext) { + writer.writeString(value.format(DateTimeFormatter.BASIC_ISO_DATE)); + } + + @Override + public LocalDate decode(final BsonReader reader, final DecoderContext decoderContext) { + LocalDate date; + try { + date = LocalDate.parse(reader.readString(), DateTimeFormatter.BASIC_ISO_DATE); + return date; + } catch (Exception jsonException) { + // This is here to catch any loads of database dumps that happen to have the old java.util.Date + // field type in validationResult. God help us. + LOG.error("Error parsing date value, trying legacy java.util.Date date format"); + try { + date = Instant.ofEpochMilli(reader.readInt64()).atZone(ZoneOffset.UTC).toLocalDate(); + return date; + } catch (Exception e) { + e.printStackTrace(); + } + } + return null; + } + + @Override + public Class getEncoderClass() { + return LocalDate.class; + } +} \ No newline at end of file diff --git a/src/main/java/com/conveyal/datatools/manager/codec/URLCodec.java b/src/main/java/com/conveyal/datatools/manager/codec/URLCodec.java new file mode 100644 index 000000000..6628d2060 --- /dev/null +++ b/src/main/java/com/conveyal/datatools/manager/codec/URLCodec.java @@ -0,0 +1,35 @@ +package com.conveyal.datatools.manager.codec; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; + +import java.net.MalformedURLException; +import java.net.URL; + +/** + * Created by landon on 9/6/17. + */ +public class URLCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final URL value, final EncoderContext encoderContext) { + writer.writeString(value.toString()); + } + + @Override + public URL decode(final BsonReader reader, final DecoderContext decoderContext) { + try { + return new URL(reader.readString()); + } catch (MalformedURLException e) { + e.printStackTrace(); + return null; + } + } + + @Override + public Class getEncoderClass() { + return URL.class; + } +} diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/DumpController.java b/src/main/java/com/conveyal/datatools/manager/controllers/DumpController.java index 0e2c1ed15..d8c4d94f9 100644 --- a/src/main/java/com/conveyal/datatools/manager/controllers/DumpController.java +++ b/src/main/java/com/conveyal/datatools/manager/controllers/DumpController.java @@ -1,17 +1,20 @@ package com.conveyal.datatools.manager.controllers; -import com.conveyal.datatools.manager.auth.Auth0UserProfile; -import com.conveyal.datatools.manager.auth.Auth0Users; +import com.conveyal.datatools.common.status.MonitorableJob; +import com.conveyal.datatools.manager.DataManager; +import com.conveyal.datatools.manager.jobs.ProcessSingleFeedJob; +import com.conveyal.datatools.manager.jobs.ValidateFeedJob; import com.conveyal.datatools.manager.models.Deployment; import com.conveyal.datatools.manager.models.ExternalFeedSourceProperty; import com.conveyal.datatools.manager.models.FeedSource; -import com.conveyal.datatools.manager.models.FeedValidationResult; import com.conveyal.datatools.manager.models.FeedVersion; import com.conveyal.datatools.manager.models.JsonViews; import com.conveyal.datatools.manager.models.Note; import com.conveyal.datatools.manager.models.Project; +import com.conveyal.datatools.manager.models.Snapshot; +import com.conveyal.datatools.manager.persistence.Persistence; import com.conveyal.datatools.manager.utils.json.JsonManager; -import com.conveyal.gtfs.validator.json.LoadStatus; +import com.conveyal.gtfs.validator.ValidationResult; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; @@ -21,22 +24,31 @@ import spark.Response; import java.io.IOException; +import java.net.MalformedURLException; import java.net.URL; import java.util.Collection; import java.util.Date; import java.util.Iterator; +import java.util.List; import java.util.Map; +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; +import static com.mongodb.client.model.Filters.and; +import static com.mongodb.client.model.Filters.eq; import static spark.Spark.*; /** - * Created by demory on 5/25/16. + * This class contains HTTP endpoints that should ONLY be used in controlled environments (i.e., when the application is + * not accessible on the Internet. The endpoints allow for dumping the entirety of the manager application data + * (projects, feed sources, feed versions, etc.) into a JSON file. NOTE: this does not include actual GTFS feed contents + * stored in PostgreSQL, but rather the metadata about these feeds and how they are organized into feed sources and + * projects. This allows for backing up and restoring the MongoDB data. */ public class DumpController { public static final Logger LOG = LoggerFactory.getLogger(DumpController.class); /** - * Represents a snapshot of the database. This require loading the entire database into RAM. + * Represents a snapshot of the database. This requires loading the entire database into RAM. * This shouldn't be an issue, though, as the feeds are stored separately. This is only metadata. */ public static class DatabaseState { @@ -44,114 +56,196 @@ public static class DatabaseState { public Collection feedSources; public Collection feedVersions; public Collection notes; - // public Collection users; + // Users are maintained in Auth0 database. + // public Collection users; public Collection deployments; public Collection externalProperties; + public Collection snapshots; } - +// private static JsonManager json = - new JsonManager(DatabaseState.class, JsonViews.DataDump.class); + new JsonManager<>(DatabaseState.class, JsonViews.DataDump.class); + + + /** + * Method to handle a web request for a legacy object + */ + private static boolean getLegacy(Request req, Response response) { + try { + return loadLegacy(req.body()); + } catch (IOException e) { + e.printStackTrace(); + haltWithMessage(req, 400, "Error loading legacy JSON", e); + return false; + } + } + /** + * Copies each table containing application data into the database state object and returns entire set of data. This, + * along with the other methods in this class, should only be used in a controlled environment where no outside access + * is permitted (e.g., using a cloned database on a local development machine). Otherwise, application data is + * visible to the entire world. + */ public static DatabaseState dump (Request req, Response res) throws JsonProcessingException { +// // FIXME this appears to be capable of using unbounded amounts of memory (it copies an entire database into memory) DatabaseState db = new DatabaseState(); - db.projects = Project.getAll(); - db.feedSources = FeedSource.getAll(); - db.feedVersions = FeedVersion.getAll(); - db.notes = Note.getAll(); - db.deployments = Deployment.getAll(); - db.externalProperties = ExternalFeedSourceProperty.getAll(); - + db.projects = Persistence.projects.getAll(); + db.feedSources = Persistence.feedSources.getAll(); + db.feedVersions = Persistence.feedVersions.getAll(); + db.notes = Persistence.notes.getAll(); + db.deployments = Persistence.deployments.getAll(); + db.externalProperties = Persistence.externalFeedSourceProperties.getAll(); + db.snapshots = Persistence.snapshots.getAll(); return db; } - + // FIXME: This can now be authenticated because users are stored in Auth0. // this is not authenticated, because it has to happen with a bare database (i.e. no users) // this method in particular is coded to allow up to 500MB of data to be posted // @BodyParser.Of(value=BodyParser.Json.class, maxLength = 500 * 1024 * 1024) - public static boolean load (Request req, Response res) { + + /** + * Load a JSON dump into the manager database. This should be performed with the python script load.py found + * in the datatools-ui/scripts directory. + */ + public static boolean load (String jsonString) { // TODO: really ought to check all tables LOG.info("loading data..."); - DatabaseState db = null; + DatabaseState db; try { - db = json.read(req.body()); + db = json.read(jsonString); LOG.info("data loaded successfully"); } catch (IOException e) { e.printStackTrace(); LOG.error("data load error. check json validity."); return false; } - for (Project c : db.projects) { - LOG.info("loading project {}", c.id); - c.save(false); + for (Project project : db.projects) { + LOG.info("loading project {}", project.id); + Persistence.projects.create(project); + } + + for (FeedSource feedSource : db.feedSources) { + LOG.info("loading feed source {}", feedSource.id); + Persistence.feedSources.create(feedSource); } - Project.commit(); - for (FeedSource s : db.feedSources) { - LOG.info("loading feed source {}", s.id); - s.save(false); + for (FeedVersion feedVersion : db.feedVersions) { + LOG.info("loading version {}", feedVersion.id); + Persistence.feedVersions.create(feedVersion); } - FeedSource.commit(); - for (FeedVersion v : db.feedVersions) { - LOG.info("loading version {}", v.id); - v.save(false); + for (Note note : db.notes) { + LOG.info("loading note {}", note.id); + Persistence.notes.create(note); } - FeedVersion.commit(); - for (Note n : db.notes) { - LOG.info("loading note {}", n.id); - n.save(false); + for (Deployment deployment : db.deployments) { + LOG.info("loading deployment {}", deployment.id); + Persistence.deployments.create(deployment); } - Note.commit(); - for (Deployment d : db.deployments) { - LOG.info("loading deployment {}", d.id); - d.save(false); + for (ExternalFeedSourceProperty externalFeedSourceProperty : db.externalProperties) { + LOG.info("loading external properties {}", externalFeedSourceProperty.id); + Persistence.externalFeedSourceProperties.create(externalFeedSourceProperty); } - Deployment.commit(); - for (ExternalFeedSourceProperty d : db.externalProperties) { - LOG.info("loading external properties {}", d.id); - d.save(false); + for (Snapshot snapshot : db.snapshots) { + LOG.info("loading snapshot {}", snapshot.id); + Persistence.snapshots.create(snapshot); } - ExternalFeedSourceProperty.commit(); LOG.info("load completed."); return true; } - public static boolean loadLegacy (Request req, Response res) throws Exception { - ObjectMapper mapper = new ObjectMapper(); - JsonNode node = mapper.readTree(req.body()); + /** + * Updates snapshots in Mongo database with data from a list of snapshots in a JSON dump file. This is mainly intended + * for a one-off import that did not load in the snapshots from a dump file, but rather generated them directly from + * an editor mapdb. This method also deletes any duplicate snapshots (i.e., where the feedSourceId and version are + * the same), leaving only one snapshot for that feedSourceId/version remaining. + * @param jsonString + * @return + */ + public static boolean updateSnapshotMetadata (String jsonString) { + LOG.info("loading data..."); + DatabaseState db; + try { + db = json.read(jsonString); + LOG.info("data loaded successfully"); + } catch (IOException e) { + e.printStackTrace(); + LOG.error("data load error. check json validity."); + return false; + } + + if (db.snapshots == null || db.snapshots.size() == 0) { + LOG.warn("No snapshots found in JSON!!"); + return false; + } + int updateCount = 0; + int deleteCount = 0; + for (Snapshot snapshotFromJSON : db.snapshots) { + List matchingSnapshots = Persistence.snapshots.getFiltered(and( + eq("version", snapshotFromJSON.version), + eq(Snapshot.FEED_SOURCE_REF, snapshotFromJSON.feedSourceId))); + + Iterator snapshotIterator = matchingSnapshots.iterator(); + while (snapshotIterator.hasNext()) { + Snapshot nextSnapshot = snapshotIterator.next(); + if (snapshotIterator.hasNext()) { + // Remove any duplicates that may have been created during import + LOG.warn("Removing duplicate snapshot for {}.{}", snapshotFromJSON.feedSourceId, snapshotFromJSON.version); + Persistence.snapshots.removeById(nextSnapshot.id); + deleteCount++; + } else { + // Update snapshot from JSON with single remaining snapshot's id, namespace, and feed load result + LOG.info("updating snapshot {}.{}", snapshotFromJSON.feedSourceId, snapshotFromJSON.version); + snapshotFromJSON.id = nextSnapshot.id; + snapshotFromJSON.namespace = nextSnapshot.namespace; + snapshotFromJSON.feedLoadResult = nextSnapshot.feedLoadResult; + // Replace stored snapshot with snapshot from JSON. + Persistence.snapshots.replace(nextSnapshot.id, snapshotFromJSON); + updateCount++; + } + } + } + if (updateCount > 0 || deleteCount > 0) { + LOG.info("{} snapshots updated, {} snapshots deleted (duplicates)", updateCount, deleteCount); + return true; + } + else { + LOG.warn("No snapshots updated or deleted."); + return false; + } + } + /** + * Load a v2 JSON dump (i.e., objects with the class structure immediately before the MongoDB migration). + */ + private static boolean loadLegacy(String jsonString) throws IOException { + ObjectMapper mapper = new ObjectMapper(); + JsonNode node = mapper.readTree(jsonString); Iterator> fieldsIter = node.fields(); while (fieldsIter.hasNext()) { Map.Entry entry = fieldsIter.next(); LOG.info("Loading {} {}...", entry.getValue().size(), entry.getKey()); switch(entry.getKey()) { - case "feedCollections": - for(int i=0; i< entry.getValue().size(); i++) { - loadLegacyProject(entry.getValue().get(i)); - } - Project.commit(); - break; case "projects": for(int i=0; i< entry.getValue().size(); i++) { loadLegacyProject(entry.getValue().get(i)); } - Project.commit(); break; case "feedSources": for(int i=0; i< entry.getValue().size(); i++) { loadLegacyFeedSource(entry.getValue().get(i)); } - FeedSource.commit(); break; case "feedVersions": for(int i=0; i< entry.getValue().size(); i++) { loadLegacyFeedVersion(entry.getValue().get(i)); } - FeedVersion.commit(); break; + // FIXME: add deployments, etc. default: break; } @@ -159,51 +253,61 @@ public static boolean loadLegacy (Request req, Response res) throws Exception { return true; } + /** + * Load a v2 project (i.e., a project with the class structure immediately before the MongoDB migration). + */ private static void loadLegacyProject (JsonNode node) { String name = node.findValue("name").asText(); String id = node.findValue("id").asText(); - if (Project.get(id) == null) { + if (Persistence.projects.getById(id) == null) { LOG.info("load legacy project " + name); Project project = new Project(); project.id = id; project.name = name; - project.save(false); + Persistence.projects.create(project); } else { LOG.warn("legacy project {} already exists... skipping", name); } } - private static void loadLegacyFeedSource (JsonNode node) throws Exception { + /** + * Load a v2 feed source (i.e., a feed source with the class structure immediately before the MongoDB migration). + */ + private static void loadLegacyFeedSource (JsonNode node) { String name = node.findValue("name").asText(); String id = node.findValue("id").asText(); - if (FeedSource.get(id) == null) { + if (Persistence.feedSources.getById(id) == null) { LOG.info("load legacy FeedSource " + name); - FeedSource fs = new FeedSource(); - fs.id = id; - fs.projectId = node.findValue("feedCollectionId").asText(); - fs.name = name; + FeedSource feedSource = new FeedSource(); + feedSource.id = id; + feedSource.projectId = node.findValue("feedCollectionId").asText(); + feedSource.name = name; switch(node.findValue("retrievalMethod").asText()) { case "FETCHED_AUTOMATICALLY": - fs.retrievalMethod = FeedSource.FeedRetrievalMethod.FETCHED_AUTOMATICALLY; + feedSource.retrievalMethod = FeedSource.FeedRetrievalMethod.FETCHED_AUTOMATICALLY; break; case "MANUALLY_UPLOADED": - fs.retrievalMethod = FeedSource.FeedRetrievalMethod.MANUALLY_UPLOADED; + feedSource.retrievalMethod = FeedSource.FeedRetrievalMethod.MANUALLY_UPLOADED; break; case "PRODUCED_IN_HOUSE": - fs.retrievalMethod = FeedSource.FeedRetrievalMethod.PRODUCED_IN_HOUSE; + feedSource.retrievalMethod = FeedSource.FeedRetrievalMethod.PRODUCED_IN_HOUSE; break; } - fs.snapshotVersion = node.findValue("snapshotVersion").asText(); + feedSource.snapshotVersion = node.findValue("snapshotVersion").asText(); Object url = node.findValue("url").asText(); - fs.url = url != null && !url.equals("null") ? new URL(url.toString()) : null; + try { + feedSource.url = url != null && !url.equals("null") ? new URL(url.toString()) : null; + } catch (MalformedURLException e) { + e.printStackTrace(); + } //fs.lastFetched = new Date(node.findValue("lastFetched").asText()); //System.out.println("wrote lastFetched"); - fs.deployable = node.findValue("deployable").asBoolean(); - fs.isPublic = node.findValue("isPublic").asBoolean(); - fs.save(false); + feedSource.deployable = node.findValue("deployable").asBoolean(); + feedSource.isPublic = node.findValue("isPublic").asBoolean(); + Persistence.feedSources.create(feedSource); } else { LOG.warn("legacy FeedSource {} already exists... skipping", name); @@ -211,9 +315,12 @@ private static void loadLegacyFeedSource (JsonNode node) throws Exception { } - private static void loadLegacyFeedVersion (JsonNode node) throws Exception { + /** + * Load a v2 feed version (i.e., a feed version with the class structure immediately before the MongoDB migration). + */ + private static void loadLegacyFeedVersion (JsonNode node) { String id = node.findValue("id").asText(); - if (FeedVersion.get(id) == null) { + if (Persistence.feedVersions.getById(id) == null) { LOG.info("load legacy FeedVersion " + node.findValue("id")); FeedVersion version = new FeedVersion(); version.id = node.findValue("id").asText(); @@ -222,40 +329,56 @@ private static void loadLegacyFeedVersion (JsonNode node) throws Exception { version.hash = node.findValue("hash").asText(); version.updated = new Date(node.findValue("updated").asLong()); LOG.info("updated= " + node.findValue("updated").asText()); - version.save(false); + Persistence.feedVersions.create(version); } else { LOG.warn("legacy FeedVersion {} already exists... skipping", id); } } - public static boolean validateAll (Request req, Response res) throws Exception { + /** + * HTTP endpoint that will trigger the initial or re-validation of all feed versions contained in the application. + * The intended use cases here are 1) to validate all versions after a fresh database copy has been loaded in and + * 2) to trigger a revalidation of all feed versions should a new validation stage be added to the validation process + * that needs to be applied to all feeds. + */ + public static boolean validateAll (boolean load, boolean force, String filterFeedId) throws Exception { LOG.info("validating all feeds..."); - Collection allVersions = FeedVersion.getAll(); + Collection allVersions = Persistence.feedVersions.getAll(); for(FeedVersion version: allVersions) { - boolean force = req.queryParams("force") != null ? req.queryParams("force").equals("true") : false; - FeedValidationResult result = version.validationResult; - if(!force && result != null && result.loadStatus.equals(LoadStatus.SUCCESS)) { + ValidationResult result = version.validationResult; + if(!force && result != null && result.fatalException != null) { + // If the force option is not true and the validation result did not fail, re-validate. continue; } - LOG.info("Validating {}", version.id); - try { - version.validate(); - version.save(); - } catch (Exception e) { - LOG.error("Could not validate", e); -// halt(400, "Error validating feed"); + MonitorableJob job; + if (filterFeedId != null && !version.feedSourceId.equals(filterFeedId)) { + // Skip all feeds except Cortland for now. + continue; + } + if (load) { + job = new ProcessSingleFeedJob(version, "system", false); + } else { + job = new ValidateFeedJob(version, "system", false); } + DataManager.heavyExecutor.execute(job); } - LOG.info("Finished validation..."); + // ValidateAllFeedsJob validateAllFeedsJob = new ValidateAllFeedsJob("system", force, load); return true; } + /** + * Enables the HTTP controllers at the specified prefix. + */ public static void register (String apiPrefix) { - post(apiPrefix + "loadLegacy", DumpController::loadLegacy, json::write); - post(apiPrefix + "load", DumpController::load, json::write); - post(apiPrefix + "validateAll", DumpController::validateAll, json::write); + post(apiPrefix + "loadLegacy", DumpController::getLegacy, json::write); + post(apiPrefix + "load", (request, response) -> load(request.body()), json::write); + post(apiPrefix + "validateAll", (request, response) -> { + boolean force = request.queryParams("force") != null && request.queryParams("force").equals("true"); + boolean load = request.queryParams("load") != null && request.queryParams("load").equals("true"); + return validateAll(load, force, null); + }, json::write); get(apiPrefix + "dump", DumpController::dump, json::write); - System.out.println("registered dump w/ prefix " + apiPrefix); + LOG.warn("registered dump w/ prefix " + apiPrefix); } } diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/AppInfoController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/AppInfoController.java new file mode 100644 index 000000000..03d3e11ac --- /dev/null +++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/AppInfoController.java @@ -0,0 +1,30 @@ +package com.conveyal.datatools.manager.controllers.api; + +import com.conveyal.datatools.manager.utils.json.JsonUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import spark.Request; +import spark.Response; + +import java.util.HashMap; +import java.util.Map; + +import static com.conveyal.datatools.manager.DataManager.commit; +import static com.conveyal.datatools.manager.DataManager.repoUrl; +import static spark.Spark.get; + +public class AppInfoController { + public static final Logger LOG = LoggerFactory.getLogger(AppInfoController.class); + + public static Map getInfo(Request req, Response res) { + // TODO: convert into a POJO if more stuff is needed here + Map json = new HashMap<>(); + json.put("repoUrl", repoUrl); + json.put("commit", commit); + return json; + } + + public static void register (String apiPrefix) { + get(apiPrefix + "public/appinfo", AppInfoController::getInfo, JsonUtil.objectMapper::writeValueAsString); + } +} diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/DeploymentController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/DeploymentController.java index 31cf7ec22..cc8c60e68 100644 --- a/src/main/java/com/conveyal/datatools/manager/controllers/api/DeploymentController.java +++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/DeploymentController.java @@ -1,5 +1,7 @@ package com.conveyal.datatools.manager.controllers.api; +import com.conveyal.datatools.manager.DataManager; +import com.conveyal.datatools.common.utils.SparkUtils; import com.conveyal.datatools.manager.auth.Auth0UserProfile; import com.conveyal.datatools.manager.jobs.DeployJob; import com.conveyal.datatools.manager.models.Deployment; @@ -8,12 +10,13 @@ import com.conveyal.datatools.manager.models.JsonViews; import com.conveyal.datatools.manager.models.OtpServer; import com.conveyal.datatools.manager.models.Project; +import com.conveyal.datatools.manager.persistence.Persistence; import com.conveyal.datatools.manager.utils.json.JsonManager; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; +import org.bson.Document; +import org.eclipse.jetty.http.HttpStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import spark.HaltException; import spark.Request; import spark.Response; @@ -21,138 +24,150 @@ import java.io.FileInputStream; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; import static spark.Spark.*; import static spark.Spark.get; /** - * Created by landon on 5/18/16. + * Handlers for HTTP API requests that affect Deployments. + * These methods are mapped to API endpoints by Spark. */ public class DeploymentController { - private static ObjectMapper mapper = new ObjectMapper(); - private static JsonManager json = - new JsonManager(Deployment.class, JsonViews.UserInterface.class); - - private static JsonManager statusJson = - new JsonManager(DeployJob.DeployStatus.class, JsonViews.UserInterface.class); - + private static JsonManager json = new JsonManager<>(Deployment.class, JsonViews.UserInterface.class); private static final Logger LOG = LoggerFactory.getLogger(DeploymentController.class); + private static Map deploymentJobsByServer = new HashMap<>(); - private static HashMap deploymentJobsByServer = new HashMap(); - - public static Object getDeployment (Request req, Response res) { - Auth0UserProfile userProfile = req.attribute("user"); - String id = req.params("id"); - Deployment d = Deployment.get(id); - if (d == null) { - halt(400, "Deployment does not exist."); - return null; - } - if (!userProfile.canAdministerProject(d.projectId, d.getOrganizationId()) && !userProfile.getUser_id().equals(d.getUser())) - halt(401); - else - return d; - - return null; - } - - public static Object deleteDeployment (Request req, Response res) { + /** + * Gets the deployment specified by the request's id parameter and ensure that user has access to the + * deployment. If the user does not have permission the Spark request is halted with an error. + */ + private static Deployment checkDeploymentPermissions (Request req, Response res) { Auth0UserProfile userProfile = req.attribute("user"); - String id = req.params("id"); - Deployment d = Deployment.get(id); - if (d == null) { - halt(400, "Deployment does not exist."); - return null; + String deploymentId = req.params("id"); + Deployment deployment = Persistence.deployments.getById(deploymentId); + if (deployment == null) { + haltWithMessage(req, HttpStatus.BAD_REQUEST_400, "Deployment does not exist."); } - if (!userProfile.canAdministerProject(d.projectId, d.getOrganizationId()) && !userProfile.getUser_id().equals(d.getUser())) - halt(401); - else { - d.delete(); - return d; + boolean isProjectAdmin = userProfile.canAdministerProject(deployment.projectId, deployment.organizationId()); + if (!isProjectAdmin && !userProfile.getUser_id().equals(deployment.user())) { + // If user is not a project admin and did not create the deployment, access to the deployment is denied. + haltWithMessage(req, HttpStatus.UNAUTHORIZED_401, "User not authorized for deployment."); } - - return null; + return deployment; } - /** Download all of the GTFS files in the feed */ - public static Object downloadDeployment (Request req, Response res) throws IOException { - Auth0UserProfile userProfile = req.attribute("user"); - String id = req.params("id"); - System.out.println(id); - Deployment d = Deployment.get(id); - - if (d == null) { - halt(400, "Deployment does not exist."); - return null; - } + private static Deployment getDeployment (Request req, Response res) { + return checkDeploymentPermissions(req, res); + } - if (!userProfile.canAdministerProject(d.projectId, d.getOrganizationId()) && !userProfile.getUser_id().equals(d.getUser())) - halt(401); + private static Deployment deleteDeployment (Request req, Response res) { + Deployment deployment = checkDeploymentPermissions(req, res); + deployment.delete(); + return deployment; + } + /** + * Download all of the GTFS files in the feed. + * + * TODO: Should there be an option to download the OSM network as well? + */ + private static FileInputStream downloadDeployment (Request req, Response res) throws IOException { + Deployment deployment = checkDeploymentPermissions(req, res); + // Create temp file in order to generate input stream. File temp = File.createTempFile("deployment", ".zip"); // just include GTFS, not any of the ancillary information - d.dump(temp, false, false, false); - + deployment.dump(temp, false, false, false); FileInputStream fis = new FileInputStream(temp); - + String cleanName = deployment.name.replaceAll("[^a-zA-Z0-9]", ""); res.type("application/zip"); - res.header("Content-Disposition", "attachment;filename=" + d.name.replaceAll("[^a-zA-Z0-9]", "") + ".zip"); + res.header("Content-Disposition", String.format("attachment;filename=%s.zip", cleanName)); - // will not actually be deleted until download has completed + // Delete temp file to avoid filling up disk space. + // Note: file will not actually be deleted until download has completed. // http://stackoverflow.com/questions/24372279 - temp.delete(); + if (temp.delete()) { + LOG.info("Temp deployment file at {} successfully deleted.", temp.getAbsolutePath()); + } else { + LOG.warn("Temp deployment file at {} could not be deleted. Disk space may fill up!", temp.getAbsolutePath()); + } return fis; } - public static Object getAllDeployments (Request req, Response res) throws JsonProcessingException { + /** + * Spark HTTP controller that returns a list of deployments for the entire application, a single project, or a single + * feed source (test deployments) depending on the query parameters supplied (e.g., projectId or feedSourceId) + */ + private static Collection getAllDeployments (Request req, Response res) { Auth0UserProfile userProfile = req.attribute("user"); String projectId = req.queryParams("projectId"); - Project project = Project.get(projectId); - if (!userProfile.canAdministerProject(projectId, project.organizationId)) - halt(401); - + String feedSourceId = req.queryParams("feedSourceId"); if (projectId != null) { - Project p = Project.get(projectId); - return p.getProjectDeployments(); - } - else { - return Deployment.getAll(); + // Return deployments for project + Project project = Persistence.projects.getById(projectId); + if (project == null) haltWithMessage(req, 400, "Must provide valid projectId value."); + if (!userProfile.canAdministerProject(projectId, project.organizationId)) + haltWithMessage(req, 401, "User not authorized to view project deployments."); + return project.retrieveDeployments(); + } else if (feedSourceId != null) { + // Return test deployments for feed source (note: these only include test deployments specific to the feed + // source and will not include all deployments that reference this feed source). + FeedSource feedSource = Persistence.feedSources.getById(feedSourceId); + if (feedSource == null) haltWithMessage(req, 400, "Must provide valid feedSourceId value."); + Project project = feedSource.retrieveProject(); + if (!userProfile.canViewFeed(project.organizationId, project.id, feedSourceId)) + haltWithMessage(req, 401, "User not authorized to view feed source deployments."); + return feedSource.retrieveDeployments(); + } else { + // If no query parameter is supplied, return all deployments for application. + if (!userProfile.canAdministerApplication()) + haltWithMessage(req, 401, "User not authorized to view application deployments."); + return Persistence.deployments.getAll(); } } - public static Object createDeployment (Request req, Response res) throws IOException { + /** + * Create a new deployment for the project. All feed sources with a valid latest version are added to the new + * deployment. + */ + private static Deployment createDeployment (Request req, Response res) { + // TODO error handling when request is bogus + // TODO factor out user profile fetching, permissions checks etc. Auth0UserProfile userProfile = req.attribute("user"); - JsonNode params = mapper.readTree(req.body()); - - // find the project - Project p = Project.get(params.get("projectId").asText()); - - if (!userProfile.canAdministerProject(p.id, p.organizationId)) - halt(401); - - Deployment d = new Deployment(p); - d.setUser(userProfile); - - applyJsonToDeployment(d, params); - - d.save(); - - return d; + Document newDeploymentFields = Document.parse(req.body()); + String projectId = newDeploymentFields.getString("projectId"); + String organizationId = newDeploymentFields.getString("organizationId"); + + boolean allowedToCreate = userProfile.canAdministerProject(projectId, organizationId); + + if (allowedToCreate) { + Project project = Persistence.projects.getById(projectId); + Deployment newDeployment = new Deployment(project); + + // FIXME: Here we are creating a deployment and updating it with the JSON string (two db operations) + // We do this because there is not currently apply JSON directly to an object (outside of Mongo codec + // operations) + Persistence.deployments.create(newDeployment); + return Persistence.deployments.update(newDeployment.id, req.body()); + } else { + haltWithMessage(req, 403, "Not authorized to create a deployment for project " + projectId); + return null; + } } /** - * Create a deployment for a particular feedsource - * @throws JsonProcessingException + * Create a deployment for a particular feed source. */ - public static Object createDeploymentFromFeedSource (Request req, Response res) throws JsonProcessingException { + private static Deployment createDeploymentFromFeedSource (Request req, Response res) { Auth0UserProfile userProfile = req.attribute("user"); String id = req.params("id"); - FeedSource s = FeedSource.get(id); + FeedSource feedSource = Persistence.feedSources.getById(id); // three ways to have permission to do this: // 1) be an admin @@ -160,158 +175,144 @@ public static Object createDeploymentFromFeedSource (Request req, Response res) // 3) have access to this feed through project permissions // if all fail, the user cannot do this. if ( - !userProfile.canAdministerProject(s.projectId, s.getOrganizationId()) - && !userProfile.getUser_id().equals(s.getUser()) -// && !userProfile.hasWriteAccess(s.id) - ) - halt(401); - - // never loaded - if (s.getLatestVersionId() == null) - halt(400); - - Deployment d = new Deployment(s); - d.setUser(userProfile); - d.save(); - - return d; + !userProfile.canAdministerProject(feedSource.projectId, feedSource.organizationId()) && + !userProfile.getUser_id().equals(feedSource.user()) + ) + haltWithMessage(req, 401, "User not authorized to perform this action"); + + if (feedSource.latestVersionId() == null) + haltWithMessage(req, 400, "Cannot create a deployment from a feed source with no versions."); + + Deployment deployment = new Deployment(feedSource); + deployment.storeUser(userProfile); + Persistence.deployments.create(deployment); + return deployment; } // @BodyParser.Of(value=BodyParser.Json.class, maxLength=1024*1024) - public static Object updateDeployment (Request req, Response res) throws IOException { - Auth0UserProfile userProfile = req.attribute("user"); - String id = req.params("id"); - Deployment d = Deployment.get(id); - - if (d == null) - halt(404); - - if (!userProfile.canAdministerProject(d.projectId, d.getOrganizationId()) && !userProfile.getUser_id().equals(d.getUser())) - halt(401); - - JsonNode params = mapper.readTree(req.body()); - applyJsonToDeployment(d, params); - - d.save(); - - return d; - } /** - * Apply JSON params to a deployment. - * @param d - * @param params + * Update a single deployment. If the deployment's feed versions are updated, checks to ensure that each + * version exists and is a part of the same parent project are performed before updating. */ - private static void applyJsonToDeployment(Deployment d, JsonNode params) { - Iterator> fieldsIter = params.fields(); - while (fieldsIter.hasNext()) { - Map.Entry entry = fieldsIter.next(); - if (entry.getKey() == "feedVersions") { - JsonNode versions = entry.getValue(); - ArrayList versionsToInsert = new ArrayList<>(versions.size()); - for (JsonNode version : versions) { - if (!version.has("id")) { - halt(400, "Version not supplied"); - } - FeedVersion v = FeedVersion.get(version.get("id").asText()); - if (v == null) { - halt(404, "Version not found"); - } - if (v.getFeedSource().projectId.equals(d.projectId)) { - versionsToInsert.add(v); - } + private static Object updateDeployment (Request req, Response res) { + Deployment deploymentToUpdate = checkDeploymentPermissions(req, res); + Document updateDocument = Document.parse(req.body()); + // FIXME use generic update hook, also feedVersions is getting serialized into MongoDB (which is undesirable) + // Check that feed versions in request body are OK to add to deployment, i.e., they exist and are a part of + // this project. + if (updateDocument.containsKey("feedVersions")) { + List versions = (List) updateDocument.get("feedVersions"); + ArrayList versionsToInsert = new ArrayList<>(versions.size()); + for (Document version : versions) { + if (!version.containsKey("id")) { + haltWithMessage(req, 400, "Version not supplied"); + } + FeedVersion feedVersion = null; + try { + feedVersion = Persistence.feedVersions.getById(version.getString("id")); + } catch (Exception e) { + haltWithMessage(req, 404, "Version not found"); + } + if (feedVersion == null) { + haltWithMessage(req, 404, "Version not found"); + } + // check that the version belongs to the correct project + if (feedVersion.parentFeedSource().projectId.equals(deploymentToUpdate.projectId)) { + versionsToInsert.add(feedVersion); } - - d.setFeedVersions(versionsToInsert); - } - if (entry.getKey() == "name") { - d.name = entry.getValue().asText(); } + + // Update deployment feedVersionIds field. + List versionIds = versionsToInsert.stream().map(v -> v.id).collect(Collectors.toList()); + Persistence.deployments.updateField(deploymentToUpdate.id, "feedVersionIds", versionIds); } + Deployment updatedDeployment = Persistence.deployments.update(deploymentToUpdate.id, req.body()); + // TODO: Should updates to the deployment's fields trigger a notification to subscribers? This could get + // very noisy. + // Notify subscribed users of changes to deployment. +// NotifyUsersForSubscriptionJob.createNotification( +// "deployment-updated", +// deploymentId, +// String.format("Deployment %s properties updated.", deploymentToUpdate.name) +// ); + return updatedDeployment; } /** - * Create a deployment bundle, and push it to OTP - * @throws IOException + * Create a deployment bundle, and send it to the specified OTP target servers (or the specified s3 bucket). */ - public static Object deploy (Request req, Response res) throws IOException { - Auth0UserProfile userProfile = req.attribute("user"); - String target = req.params("target"); - String id = req.params("id"); - Deployment d = Deployment.get(id); - Project p = Project.get(d.projectId); - - if (!userProfile.canAdministerProject(d.projectId, d.getOrganizationId()) && !userProfile.getUser_id().equals(d.getUser())) - halt(401); - - if (!userProfile.canAdministerProject(d.projectId, d.getOrganizationId()) && p.getServer(target).admin) - halt(401); - - // check if we can deploy - if (deploymentJobsByServer.containsKey(target)) { - DeployJob currentJob = deploymentJobsByServer.get(target); - if (currentJob != null && !currentJob.getStatus().completed) { - // send a 503 service unavailable as it is not possible to deploy to this target right now; - // someone else is deploying - halt(202, "Deployment currently in progress for target: " + target); - LOG.warn("Deployment currently in progress for target: " + target); + private static String deploy (Request req, Response res) { + try { + // Check parameters supplied in request for validity. + Auth0UserProfile userProfile = req.attribute("user"); + String target = req.params("target"); + Deployment deployment = checkDeploymentPermissions(req, res); + Project project = Persistence.projects.getById(deployment.projectId); + if (project == null) haltWithMessage(req, 400, "Internal reference error. Deployment's project ID is invalid"); + // FIXME: Currently the otp server to deploy to is determined by the string name field (with special characters + // replaced with underscores). This should perhaps be replaced with an immutable server ID so that there is + // no risk that these values can overlap. This may be over engineering this system though. The user deploying + // a set of feeds would likely not create two deployment targets with the same name (and the name is unlikely + // to change often). + OtpServer otpServer = project.retrieveServer(target); + if (otpServer == null) haltWithMessage(req, 400, "Must provide valid OTP server target ID."); + // Check that permissions of user allow them to deploy to target. + boolean isProjectAdmin = userProfile.canAdministerProject(deployment.projectId, deployment.organizationId()); + if (!isProjectAdmin && otpServer.admin) { + haltWithMessage(req, 401, "User not authorized to deploy to admin-only target OTP server."); + } + // Check that we can deploy to the specified target. (Any deploy job for the target that is presently active will + // cause a halt.) + if (deploymentJobsByServer.containsKey(target)) { + // There is a deploy job for the server. Check if it is active. + DeployJob deployJob = deploymentJobsByServer.get(target); + if (deployJob != null && !deployJob.status.completed) { + // Job for the target is still active! Send a 202 to the requester to indicate that it is not possible + // to deploy to this target right now because someone else is deploying. + String message = String.format( + "Will not process request to deploy %s. Deployment currently in progress for target: %s", + deployment.name, + target); + LOG.warn(message); + haltWithMessage(req, HttpStatus.ACCEPTED_202, message); + } + } + // Get the URLs to deploy to. + List targetUrls = otpServer.internalUrl; + if ((targetUrls == null || targetUrls.isEmpty()) && (otpServer.s3Bucket == null || otpServer.s3Bucket.isEmpty())) { + haltWithMessage(req, 400, String.format("OTP server %s has no internal URL or s3 bucket specified.", otpServer.name)); + } + // For any previous deployments sent to the server/router combination, set deployedTo to null because + // this new one will overwrite it. NOTE: deployedTo for the current deployment will only be updated after the + // successful completion of the deploy job. + for (Deployment oldDeployment : Deployment.retrieveDeploymentForServerAndRouterId(target, deployment.routerId)) { + LOG.info("Setting deployment target to null id={}", oldDeployment.id); + Persistence.deployments.updateField(oldDeployment.id, "deployedTo", null); } - } - OtpServer otpServer = p.getServer(target); - List targetUrls = otpServer.internalUrl; - Deployment oldD = Deployment.getDeploymentForServerAndRouterId(target, d.routerId); - if (oldD != null) { - oldD.deployedTo = null; - oldD.save(); + // Execute the deployment job and keep track of it in the jobs for server map. + DeployJob job = new DeployJob(deployment, userProfile.getUser_id(), otpServer); + DataManager.heavyExecutor.execute(job); + deploymentJobsByServer.put(target, job); + + return SparkUtils.formatJobMessage(job.jobId, "Deployment initiating."); + } catch (HaltException e) { + throw e; + } catch (Exception e) { + e.printStackTrace(); + haltWithMessage(req, 400, "Could not process deployment request. Please check request parameters and OTP server target fields."); + return null; } - - d.deployedTo = target; - d.save(); - - DeployJob job = new DeployJob(d, userProfile.getUser_id(), targetUrls, otpServer.publicUrl, otpServer.s3Bucket, otpServer.s3Credentials); - deploymentJobsByServer.put(target, job); - - Thread tnThread = new Thread(job); - tnThread.start(); - - halt(200, "{status: \"ok\"}"); - return null; } - /** - * The current status of a deployment, polled to update the progress dialog. - * @throws JsonProcessingException - */ - public static Object deploymentStatus (Request req, Response res) throws JsonProcessingException { - // this is not access-controlled beyond requiring auth, which is fine - // there's no good way to know who should be able to see this. - String target = req.queryParams("target"); - - if (!deploymentJobsByServer.containsKey(target)) - halt(404, "Deployment target '"+target+"' not found"); - - DeployJob j = deploymentJobsByServer.get(target); - - if (j == null) - halt(404, "No active job for " + target + " found"); - - return j.getStatus(); - } - - /** - * The servers that it is possible to deploy to. - */ -// public static Object deploymentTargets (Request req, Response res) { -// Auth0UserProfile userProfile = req.attribute("user"); -// return DeploymentManager.getDeploymentNames(userProfile.canAdministerApplication()); -// } - public static void register (String apiPrefix) { post(apiPrefix + "secure/deployments/:id/deploy/:target", DeploymentController::deploy, json::write); + post(apiPrefix + "secure/deployments/:id/deploy/", ((request, response) -> { + haltWithMessage(request, 400, "Must provide valid deployment target name"); + return null; + }), json::write); options(apiPrefix + "secure/deployments", (q, s) -> ""); - get(apiPrefix + "secure/deployments/status/:target", DeploymentController::deploymentStatus, json::write); -// get(apiPrefix + "secure/deployments/targets", DeploymentController::deploymentTargets, json::write); get(apiPrefix + "secure/deployments/:id/download", DeploymentController::downloadDeployment); get(apiPrefix + "secure/deployments/:id", DeploymentController::getDeployment, json::write); delete(apiPrefix + "secure/deployments/:id", DeploymentController::deleteDeployment, json::write); diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/FeedSourceController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/FeedSourceController.java index 25f817361..81022a90b 100644 --- a/src/main/java/com/conveyal/datatools/manager/controllers/api/FeedSourceController.java +++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/FeedSourceController.java @@ -1,36 +1,44 @@ package com.conveyal.datatools.manager.controllers.api; -import com.amazonaws.services.s3.model.CannedAccessControlList; -import com.amazonaws.services.s3.model.CopyObjectRequest; import com.conveyal.datatools.common.utils.SparkUtils; import com.conveyal.datatools.manager.DataManager; import com.conveyal.datatools.manager.auth.Auth0UserProfile; +import com.conveyal.datatools.manager.extensions.ExternalFeedResource; import com.conveyal.datatools.manager.jobs.FetchSingleFeedJob; import com.conveyal.datatools.manager.jobs.NotifyUsersForSubscriptionJob; -import com.conveyal.datatools.manager.models.*; -import com.conveyal.datatools.manager.persistence.FeedStore; +import com.conveyal.datatools.manager.models.ExternalFeedSourceProperty; +import com.conveyal.datatools.manager.models.FeedSource; +import com.conveyal.datatools.manager.models.JsonViews; +import com.conveyal.datatools.manager.models.Project; +import com.conveyal.datatools.manager.persistence.Persistence; import com.conveyal.datatools.manager.utils.json.JsonManager; -import com.conveyal.datatools.manager.utils.json.JsonUtil; -import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; +import org.bson.Document; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import spark.Request; import spark.Response; import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.Map; +import static com.conveyal.datatools.common.utils.SparkUtils.formatJobMessage; +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; import static com.conveyal.datatools.manager.auth.Auth0Users.getUserById; -import static spark.Spark.*; +import static com.conveyal.datatools.manager.models.ExternalFeedSourceProperty.constructId; +import static spark.Spark.delete; +import static spark.Spark.get; +import static spark.Spark.post; +import static spark.Spark.put; /** - * Created by demory on 3/21/16. + * Handlers for HTTP API requests that affect FeedSources. + * These methods are mapped to API endpoints by Spark. */ - public class FeedSourceController { public static final Logger LOG = LoggerFactory.getLogger(FeedSourceController.class); public static JsonManager json = @@ -42,16 +50,15 @@ public static FeedSource getFeedSource(Request req, Response res) { } public static Collection getAllFeedSources(Request req, Response res) { - Collection sources = new ArrayList<>(); + Collection feedSourcesToReturn = new ArrayList<>(); Auth0UserProfile requestingUser = req.attribute("user"); - System.out.println(requestingUser.getEmail()); String projectId = req.queryParams("projectId"); Boolean publicFilter = req.pathInfo().contains("public"); String userId = req.queryParams("userId"); if (projectId != null) { - for (FeedSource source: FeedSource.getAll()) { - String orgId = source.getOrganizationId(); + for (FeedSource source: Persistence.feedSources.getAll()) { + String orgId = source.organizationId(); if ( source != null && source.projectId != null && source.projectId.equals(projectId) && requestingUser != null && (requestingUser.canManageFeed(orgId, source.projectId, source.id) || requestingUser.canViewFeed(orgId, source.projectId, source.id)) @@ -59,30 +66,28 @@ public static Collection getAllFeedSources(Request req, Response res // if requesting public sources and source is not public; skip source if (publicFilter && !source.isPublic) continue; - sources.add(source); + feedSourcesToReturn.add(source); } } - } - // request feed sources a specified user has permissions for - else if (userId != null) { + } else if (userId != null) { + // request feed sources a specified user has permissions for Auth0UserProfile user = getUserById(userId); - if (user == null) return sources; + if (user == null) return feedSourcesToReturn; - for (FeedSource source: FeedSource.getAll()) { - String orgId = source.getOrganizationId(); + for (FeedSource source: Persistence.feedSources.getAll()) { + String orgId = source.organizationId(); if ( source != null && source.projectId != null && (user.canManageFeed(orgId, source.projectId, source.id) || user.canViewFeed(orgId, source.projectId, source.id)) ) { - sources.add(source); + feedSourcesToReturn.add(source); } } - } - // request feed sources that are public - else { - for (FeedSource source: FeedSource.getAll()) { - String orgId = source.getOrganizationId(); + } else { + // request feed sources that are public + for (FeedSource source: Persistence.feedSources.getAll()) { + String orgId = source.organizationId(); // if user is logged in and cannot view feed; skip source if ((requestingUser != null && !requestingUser.canManageFeed(orgId, source.projectId, source.id) && !requestingUser.canViewFeed(orgId, source.projectId, source.id))) continue; @@ -90,183 +95,150 @@ else if (userId != null) { // if requesting public sources and source is not public; skip source if (publicFilter && !source.isPublic) continue; - sources.add(source); + feedSourcesToReturn.add(source); } } - return sources; - } - - public static FeedSource createFeedSource(Request req, Response res) throws IOException { - FeedSource source; - /*if (req.queryParams("type") != null){ - //FeedSource.FeedSourceType type = FeedSource.FeedSourceType.TRANSITLAND; - source = new FeedSource("onestop-id"); - applyJsonToFeedSource(source, req.body()); - source.save(); - - return source; - } - else { - source = new FeedSource(); - - }*/ - - source = new FeedSource(); - - applyJsonToFeedSource(source, req.body()); - - // check permissions before saving - requestFeedSource(req, source, "create"); - if (source.projectId == null) { - halt(400, SparkUtils.formatJSON("Must provide project ID for feed source", 400)); - } - source.save(); - - for(String resourceType : DataManager.feedResources.keySet()) { - DataManager.feedResources.get(resourceType).feedSourceCreated(source, req.headers("Authorization")); - } - - return source; - } - - public static FeedSource updateFeedSource(Request req, Response res) throws IOException { - FeedSource source = requestFeedSourceById(req, "manage"); - - applyJsonToFeedSource(source, req.body()); - source.save(); - - // notify users after successful save - NotifyUsersForSubscriptionJob notifyFeedJob = new NotifyUsersForSubscriptionJob("feed-updated", source.id, "Feed property updated for " + source.name); - Thread notifyThread = new Thread(notifyFeedJob); - notifyThread.start(); - - NotifyUsersForSubscriptionJob notifyProjectJob = new NotifyUsersForSubscriptionJob("project-updated", source.projectId, "Project updated (feed source property for " + source.name + ")"); - Thread notifyProjectThread = new Thread(notifyProjectJob); - notifyProjectThread.start(); - - return source; + return feedSourcesToReturn; } - public static void applyJsonToFeedSource(FeedSource source, String json) throws IOException { - JsonNode node = mapper.readTree(json); - Iterator> fieldsIter = node.fields(); - while (fieldsIter.hasNext()) { - Map.Entry entry = fieldsIter.next(); - - if(entry.getKey().equals("projectId")) { - System.out.println("setting fs project"); - source.setProject(Project.get(entry.getValue().asText())); - } - - if(entry.getKey().equals("name")) { - source.name = entry.getValue().asText(); - } - - if(entry.getKey().equals("url")) { - String url = entry.getValue().asText(); - try { - source.url = new URL(url); - - // reset the last fetched date so it can be fetched again - source.lastFetched = null; - - } catch (MalformedURLException e) { - halt(400, "URL '" + url + "' not valid."); - } - - } - - if(entry.getKey().equals("retrievalMethod")) { - source.retrievalMethod = FeedSource.FeedRetrievalMethod.FETCHED_AUTOMATICALLY.valueOf(entry.getValue().asText()); - } - - if(entry.getKey().equals("snapshotVersion")) { - source.snapshotVersion = entry.getValue().asText(); - } - - if(entry.getKey().equals("isPublic")) { - source.isPublic = entry.getValue().asBoolean(); - // TODO: set AWS GTFS zips to public/private after "isPublic" change - if (DataManager.useS3) { - if (source.isPublic) { - source.makePublic(); - } - else { - source.makePrivate(); - } + /** + * HTTP endpoint to create a new feed source. + */ + public static FeedSource createFeedSource(Request req, Response res) { + // TODO factor out getting user profile, project ID and organization ID and permissions + Auth0UserProfile userProfile = req.attribute("user"); + Document newFeedSourceFields = Document.parse(req.body()); + String projectId = newFeedSourceFields.getString("projectId"); + String organizationId = newFeedSourceFields.getString("organizationId"); + boolean allowedToCreateFeedSource = userProfile.canAdministerProject(projectId, organizationId); + if (allowedToCreateFeedSource) { + try { + FeedSource newFeedSource = Persistence.feedSources.create(req.body()); + // Communicate to any registered external "resources" (sites / databases) the fact that a feed source has been + // created in our database. + for (String resourceType : DataManager.feedResources.keySet()) { + DataManager.feedResources.get(resourceType).feedSourceCreated(newFeedSource, req.headers("Authorization")); } + // Notify project subscribers of new feed source creation. + Project parentProject = Persistence.projects.getById(projectId); + NotifyUsersForSubscriptionJob.createNotification( + "project-updated", + projectId, + String.format("New feed %s created in project %s.", newFeedSource.name, parentProject.name)); + return newFeedSource; + } catch (Exception e) { + LOG.error("Unknown error creating feed source", e); + haltWithMessage(req, 400, "Unknown error encountered creating feed source", e); + return null; } + } else { + haltWithMessage(req, 400, "Must provide project ID for feed source"); + return null; + } + } - if(entry.getKey().equals("deployable")) { - source.deployable = entry.getValue().asBoolean(); + public static FeedSource updateFeedSource(Request req, Response res) { + String feedSourceId = req.params("id"); + + // call this method just for null and permissions check + // TODO: it's wasteful to request the entire feed source here, need to factor out permissions checks. However, + // we need the URL to see if it has been updated in order to then set the lastFetched value to null. + FeedSource formerFeedSource = requestFeedSourceById(req, "manage"); + Document fieldsToUpdate = Document.parse(req.body()); + if (fieldsToUpdate.containsKey("url") && formerFeedSource.url != null) { + // Reset last fetched timestamp if the URL has been updated. + if (!fieldsToUpdate.get("url").toString().equals(formerFeedSource.url.toString())) { + LOG.info("Feed source fetch URL has been modified. Resetting lastFetched value from {} to {}", formerFeedSource.lastFetched, null); + fieldsToUpdate.put("lastFetched", null); } - } + FeedSource source = Persistence.feedSources.update(feedSourceId, fieldsToUpdate.toJson()); + + // Notify feed- and project-subscribed users after successful save + NotifyUsersForSubscriptionJob.createNotification( + "feed-updated", + source.id, + String.format("Feed property updated for %s.", source.name)); + NotifyUsersForSubscriptionJob.createNotification( + "project-updated", + source.projectId, + String.format("Project updated (feed source property changed for %s).", source.name)); + return source; } + /** + * Update a set of properties for an external feed resource. This updates the local copy of the properties in the + * Mongo database and then triggers the {@link ExternalFeedResource#propertyUpdated} method to update the external + * resource. + * + * FIXME: Should we reconsider how we store external feed source properties now that we are using Mongo document + * storage? This might should be refactored in the future, but it isn't really hurting anything at the moment. + */ public static FeedSource updateExternalFeedResource(Request req, Response res) throws IOException { FeedSource source = requestFeedSourceById(req, "manage"); String resourceType = req.queryParams("resourceType"); JsonNode node = mapper.readTree(req.body()); - Iterator> fieldsIter = node.fields(); - - while (fieldsIter.hasNext()) { - Map.Entry entry = fieldsIter.next(); - ExternalFeedSourceProperty prop = - ExternalFeedSourceProperty.find(source, resourceType, entry.getKey()); - - if (prop != null) { - // update the property in our DB - String previousValue = prop.value; - prop.value = entry.getValue().asText(); - prop.save(); - - // trigger an event on the external resource - if(DataManager.feedResources.containsKey(resourceType)) { - DataManager.feedResources.get(resourceType).propertyUpdated(prop, previousValue, req.headers("Authorization")); - } - + Iterator> fieldsIterator = node.fields(); + ExternalFeedResource externalFeedResource = DataManager.feedResources.get(resourceType); + if (externalFeedResource == null) { + haltWithMessage(req, 400, String.format("Resource '%s' not registered with server.", resourceType)); + } + // Iterate over fields found in body and update external properties accordingly. + while (fieldsIterator.hasNext()) { + Map.Entry entry = fieldsIterator.next(); + String propertyId = constructId(source, resourceType, entry.getKey()); + ExternalFeedSourceProperty prop = Persistence.externalFeedSourceProperties.getById(propertyId); + + if (prop == null) { + haltWithMessage(req, 400, String.format("Property '%s' does not exist!", propertyId)); } - + // Hold previous value for use when updating third-party resource + String previousValue = prop.value; + // Update the property in our database. + ExternalFeedSourceProperty updatedProp = Persistence.externalFeedSourceProperties.updateField( + propertyId, "value", entry.getValue().asText()); + + // Trigger an event on the external resource + externalFeedResource.propertyUpdated(updatedProp, previousValue, req.headers("Authorization")); } - + // Updated external properties will be included in JSON (FeedSource#externalProperties) return source; } - public static FeedSource deleteFeedSource(Request req, Response res) { + /** + * HTTP endpoint to delete a feed source. + * + * FIXME: Should this just set a "deleted" flag instead of removing from the database entirely? + */ + private static FeedSource deleteFeedSource(Request req, Response res) { FeedSource source = requestFeedSourceById(req, "manage"); try { source.delete(); return source; } catch (Exception e) { - e.printStackTrace(); - halt(400, "Unknown error deleting feed source."); + LOG.error("Could not delete feed source", e); + haltWithMessage(req, 400, "Unknown error deleting feed source."); return null; } } /** - * Refetch this feed - * @throws JsonProcessingException + * Re-fetch this feed from the feed source URL. */ - public static boolean fetch (Request req, Response res) throws JsonProcessingException { + public static String fetch (Request req, Response res) { FeedSource s = requestFeedSourceById(req, "manage"); LOG.info("Fetching feed for source {}", s.name); Auth0UserProfile userProfile = req.attribute("user"); - FetchSingleFeedJob job = new FetchSingleFeedJob(s, userProfile.getUser_id()); - - // Don't run in thread because we want to return the HTTP status of the fetch operation - job.run(); + // Run in heavyExecutor because ProcessSingleFeedJob is chained to this job (if update finds new version). + FetchSingleFeedJob fetchSingleFeedJob = new FetchSingleFeedJob(s, userProfile.getUser_id(), false); + DataManager.lightExecutor.execute(fetchSingleFeedJob); - // WARNING: infinite 2D bounds Jackson error when returning job.result, so this method now returns true - // because we don't need to return the feed immediately anyways. - // return job.result; - - return true; + // Return the jobId so that the requester can track the job's progress. + return formatJobMessage(fetchSingleFeedJob.jobId, "Fetching latest feed source."); } /** @@ -275,33 +247,37 @@ public static boolean fetch (Request req, Response res) throws JsonProcessingExc * @param action action type (either "view" or "manage") * @return feedsource object for ID */ - private static FeedSource requestFeedSourceById(Request req, String action) { + public static FeedSource requestFeedSourceById(Request req, String action) { String id = req.params("id"); if (id == null) { - halt("Please specify id param"); + haltWithMessage(req, 400, "Please specify id param"); } - return requestFeedSource(req, FeedSource.get(id), action); + return checkFeedSourcePermissions(req, Persistence.feedSources.getById(id), action); } - public static FeedSource requestFeedSource(Request req, FeedSource s, String action) { + + public static FeedSource checkFeedSourcePermissions(Request req, FeedSource feedSource, String action) { Auth0UserProfile userProfile = req.attribute("user"); - Boolean publicFilter = Boolean.valueOf(req.queryParams("public")) || req.url().split("/api/manager/")[1].startsWith("public"); -// System.out.println(req.url().split("/api/manager/")[1].startsWith("public")); + Boolean publicFilter = Boolean.valueOf(req.queryParams("public")) || + req.url().split("/api/*/")[1].startsWith("public"); - // check for null feedsource - if (s == null) - halt(400, "Feed source ID does not exist"); - String orgId = s.getOrganizationId(); + // check for null feedSource + if (feedSource == null) + haltWithMessage(req, 400, "Feed source ID does not exist"); + String orgId = feedSource.organizationId(); boolean authorized; switch (action) { case "create": - authorized = userProfile.canAdministerProject(s.projectId, orgId); + authorized = userProfile.canAdministerProject(feedSource.projectId, orgId); break; case "manage": - authorized = userProfile.canManageFeed(orgId, s.projectId, s.id); + authorized = userProfile.canManageFeed(orgId, feedSource.projectId, feedSource.id); + break; + case "edit": + authorized = userProfile.canEditGTFS(orgId, feedSource.projectId, feedSource.id); break; case "view": if (!publicFilter) { - authorized = userProfile.canViewFeed(orgId, s.projectId, s.id); + authorized = userProfile.canViewFeed(orgId, feedSource.projectId, feedSource.id); } else { authorized = false; } @@ -314,34 +290,34 @@ public static FeedSource requestFeedSource(Request req, FeedSource s, String act // if requesting public sources if (publicFilter){ // if feed not public and user not authorized, halt - if (!s.isPublic && !authorized) - halt(403, "User not authorized to perform action on feed source"); - // if feed is public, but action is managerial, halt (we shouldn't ever get here, but just in case) - else if (s.isPublic && action.equals("manage")) - halt(403, "User not authorized to perform action on feed source"); + if (!feedSource.isPublic && !authorized) + haltWithMessage(req, 403, "User not authorized to perform action on feed source"); + // if feed is public, but action is managerial, halt (we shouldn't ever retrieveById here, but just in case) + else if (feedSource.isPublic && action.equals("manage")) + haltWithMessage(req, 403, "User not authorized to perform action on feed source"); } else { if (!authorized) - halt(403, "User not authorized to perform action on feed source"); + haltWithMessage(req, 403, "User not authorized to perform action on feed source"); } // if we make it here, user has permission and it's a valid feedsource - return s; + return feedSource; } + + // FIXME: use generic API controller and return JSON documents via BSON/Mongo public static void register (String apiPrefix) { get(apiPrefix + "secure/feedsource/:id", FeedSourceController::getFeedSource, json::write); - options(apiPrefix + "secure/feedsource", (q, s) -> ""); -// get(apiPrefix + "secure/feedsource/:id/status", FeedSourceController::fetchFeedStatus, json::write); get(apiPrefix + "secure/feedsource", FeedSourceController::getAllFeedSources, json::write); post(apiPrefix + "secure/feedsource", FeedSourceController::createFeedSource, json::write); put(apiPrefix + "secure/feedsource/:id", FeedSourceController::updateFeedSource, json::write); put(apiPrefix + "secure/feedsource/:id/updateExternal", FeedSourceController::updateExternalFeedResource, json::write); delete(apiPrefix + "secure/feedsource/:id", FeedSourceController::deleteFeedSource, json::write); - post(apiPrefix + "secure/feedsource/:id/fetch", FeedSourceController::fetch, JsonUtil.objectMapper::writeValueAsString); + post(apiPrefix + "secure/feedsource/:id/fetch", FeedSourceController::fetch, json::write); // Public routes get(apiPrefix + "public/feedsource/:id", FeedSourceController::getFeedSource, json::write); get(apiPrefix + "public/feedsource", FeedSourceController::getAllFeedSources, json::write); } -} \ No newline at end of file +} diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/FeedVersionController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/FeedVersionController.java index 611cee802..445428f0f 100644 --- a/src/main/java/com/conveyal/datatools/manager/controllers/api/FeedVersionController.java +++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/FeedVersionController.java @@ -1,373 +1,271 @@ package com.conveyal.datatools.manager.controllers.api; +import com.conveyal.datatools.common.utils.SparkUtils; import com.conveyal.datatools.manager.DataManager; import com.conveyal.datatools.manager.auth.Auth0UserProfile; -import com.conveyal.datatools.manager.jobs.BuildTransportNetworkJob; import com.conveyal.datatools.manager.jobs.CreateFeedVersionFromSnapshotJob; import com.conveyal.datatools.manager.jobs.ProcessSingleFeedJob; -import com.conveyal.datatools.manager.jobs.ReadTransportNetworkJob; import com.conveyal.datatools.manager.models.FeedDownloadToken; import com.conveyal.datatools.manager.models.FeedSource; import com.conveyal.datatools.manager.models.FeedVersion; import com.conveyal.datatools.manager.models.JsonViews; +import com.conveyal.datatools.manager.models.Snapshot; +import com.conveyal.datatools.manager.persistence.FeedStore; +import com.conveyal.datatools.manager.persistence.Persistence; import com.conveyal.datatools.manager.utils.HashUtils; import com.conveyal.datatools.manager.utils.json.JsonManager; -import com.conveyal.r5.analyst.PointSet; -import com.conveyal.r5.analyst.cluster.AnalystClusterRequest; -import com.conveyal.r5.analyst.cluster.ResultEnvelope; -import com.conveyal.r5.analyst.cluster.TaskStatistics; -import com.conveyal.r5.api.util.LegMode; -import com.conveyal.r5.api.util.TransitModes; -import com.conveyal.r5.profile.ProfileRequest; -import com.conveyal.r5.profile.RepeatedRaptorProfileRouter; -import com.conveyal.r5.profile.StreetMode; -import com.conveyal.r5.streets.LinkedPointSet; -import com.conveyal.r5.transit.TransportNetwork; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; -import com.fasterxml.jackson.core.JsonProcessingException; - -import java.io.*; -import java.nio.charset.StandardCharsets; -import java.time.LocalDate; -import java.time.ZoneId; -import java.time.format.DateTimeFormatter; -import java.util.*; -import java.util.stream.Collectors; - import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.io.ByteStreams; +import org.eclipse.jetty.http.HttpStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import spark.Request; import spark.Response; -import javax.servlet.MultipartConfigElement; -import javax.servlet.ServletException; -import javax.servlet.http.Part; +import javax.servlet.ServletInputStream; +import javax.servlet.ServletRequestWrapper; +import javax.servlet.http.HttpServletResponse; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.time.LocalDate; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.util.Collection; +import java.util.Date; +import java.util.EnumSet; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import static com.conveyal.datatools.common.status.MonitorableJob.JobType.BUILD_TRANSPORT_NETWORK; +import static com.conveyal.datatools.common.utils.S3Utils.downloadFromS3; import static com.conveyal.datatools.common.utils.SparkUtils.downloadFile; -import static com.conveyal.datatools.manager.controllers.api.FeedSourceController.requestFeedSource; -import static spark.Spark.*; +import static com.conveyal.datatools.common.utils.SparkUtils.formatJobMessage; +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; +import static com.conveyal.datatools.manager.controllers.api.FeedSourceController.checkFeedSourcePermissions; +import static spark.Spark.delete; +import static spark.Spark.get; +import static spark.Spark.post; +import static spark.Spark.put; public class FeedVersionController { + + // TODO use this instead of stringly typed permissions enum Permission { VIEW, MANAGE } + public static final Logger LOG = LoggerFactory.getLogger(FeedVersionController.class); - private static ObjectMapper mapper = new ObjectMapper(); - public static JsonManager json = - new JsonManager(FeedVersion.class, JsonViews.UserInterface.class); - private static Set readingNetworkVersionList = new HashSet<>(); + public static JsonManager json = new JsonManager<>(FeedVersion.class, JsonViews.UserInterface.class); /** - * Grab this feed version. + * Grab the feed version for the ID supplied in the request. * If you pass in ?summarized=true, don't include the full tree of validation results, only the counts. */ - public static FeedVersion getFeedVersion (Request req, Response res) throws JsonProcessingException { - FeedVersion v = requestFeedVersion(req, "view"); - - return v; + private static FeedVersion getFeedVersion (Request req, Response res) { + return requestFeedVersion(req, "view"); } - public static Collection getAllFeedVersions (Request req, Response res) throws JsonProcessingException { - Auth0UserProfile userProfile = req.attribute("user"); - FeedSource s = requestFeedSourceById(req, "view"); - - return s.getFeedVersions().stream() - .collect(Collectors.toCollection(ArrayList::new)); + /** + * Get all feed versions for a given feedSource (whose ID is specified in the request). + */ + private static Collection getAllFeedVersionsForFeedSource(Request req, Response res) { + // Check permissions and get the FeedSource whose FeedVersions we want. + FeedSource feedSource = requestFeedSourceById(req, "view"); + return feedSource.retrieveFeedVersions(); } - private static FeedSource requestFeedSourceById(Request req, String action) { - String id = req.queryParams("feedSourceId"); + + public static FeedSource requestFeedSourceById(Request req, String action, String paramName) { + String id = req.queryParams(paramName); if (id == null) { - halt("Please specify feedsourceId param"); + haltWithMessage(req, 400, "Please specify feedSourceId param"); } - return requestFeedSource(req, FeedSource.get(id), action); + return checkFeedSourcePermissions(req, Persistence.feedSources.getById(id), action); + } + + private static FeedSource requestFeedSourceById(Request req, String action) { + return requestFeedSourceById(req, action, "feedSourceId"); } /** * Upload a feed version directly. This is done behind Backbone's back, and as such uses * x-multipart-formdata rather than a json blob. This is done because uploading files in a JSON - * blob is not pretty, and we don't really need to get the Backbone object directly; page re-render isn't + * blob is not pretty, and we don't really need to retrieveById the Backbone object directly; page re-render isn't * a problem. * * Auto-fetched feeds are no longer restricted from having directly-uploaded versions, so we're not picky about * that anymore. - * @return - * @throws JsonProcessingException + * + * @return the job ID that allows monitoring progress of the load process */ - public static Boolean createFeedVersion (Request req, Response res) throws IOException, ServletException { + public static String createFeedVersionViaUpload(Request req, Response res) { Auth0UserProfile userProfile = req.attribute("user"); - FeedSource s = requestFeedSourceById(req, "manage"); - - FeedVersion latest = s.getLatest(); - FeedVersion v = new FeedVersion(s); - v.setUser(userProfile); - - if (req.raw().getAttribute("org.eclipse.jetty.multipartConfig") == null) { - MultipartConfigElement multipartConfigElement = new MultipartConfigElement(System.getProperty("java.io.tmpdir")); - req.raw().setAttribute("org.eclipse.jetty.multipartConfig", multipartConfigElement); - } - - Part part = req.raw().getPart("file"); - LOG.info("Saving feed from upload {}", s); + FeedSource feedSource = requestFeedSourceById(req, "manage"); + FeedVersion latestVersion = feedSource.retrieveLatest(); + FeedVersion newFeedVersion = new FeedVersion(feedSource); + newFeedVersion.retrievalMethod = FeedSource.FeedRetrievalMethod.MANUALLY_UPLOADED; - InputStream uploadStream; - File file = null; + // FIXME: Make the creation of new GTFS files generic to handle other feed creation methods, including fetching + // by URL and loading from the editor. + File newGtfsFile = new File(DataManager.getConfigPropertyAsText("application.data.gtfs"), newFeedVersion.id); try { - uploadStream = part.getInputStream(); - - /** - * Set last modified based on value of query param. This is determined/supplied by the client - * request because this data gets lost in the uploadStream otherwise. - */ - file = v.newGtfsFile(uploadStream, Long.valueOf(req.queryParams("lastModified"))); - LOG.info("Last modified: {}", new Date(file.lastModified())); + // Bypass Spark's request wrapper which always caches the request body in memory that may be a very large + // GTFS file. Also, the body of the request is the GTFS file instead of using multipart form data because + // multipart form handling code also caches the request body. + ServletInputStream inputStream = ((ServletRequestWrapper) req.raw()).getRequest().getInputStream(); + FileOutputStream fileOutputStream = new FileOutputStream(newGtfsFile); + // Guava's ByteStreams.copy uses a 4k buffer (no need to wrap output stream), but does not close streams. + ByteStreams.copy(inputStream, fileOutputStream); + fileOutputStream.close(); + inputStream.close(); + if (newGtfsFile.length() == 0) { + throw new IOException("No file found in request body."); + } + // Set last modified based on value of query param. This is determined/supplied by the client + // request because this data gets lost in the uploadStream otherwise. + Long lastModified = req.queryParams("lastModified") != null + ? Long.valueOf(req.queryParams("lastModified")) + : null; + if (lastModified != null) { + newGtfsFile.setLastModified(lastModified); + newFeedVersion.fileTimestamp = lastModified; + } + LOG.info("Last modified: {}", new Date(newGtfsFile.lastModified())); + LOG.info("Saving feed from upload {}", feedSource); } catch (Exception e) { - e.printStackTrace(); - LOG.error("Unable to open input stream from upload"); - halt(400, "Unable to read uploaded feed"); + LOG.error("Unable to open input stream from uploaded file", e); + haltWithMessage(req, 400, "Unable to read uploaded feed"); } - v.hash(); - // TODO: fix hash() call when called in this context. Nothing gets hashed because the file has not been saved yet. - v.hash = HashUtils.hashFile(file); - - // Check that hashes don't match (as long as v and latest are not the same entry) - if (latest != null && latest.hash.equals(v.hash)) { - LOG.error("Upload version {} matches latest version {}.", v.id, latest.id); - File gtfs = v.getGtfsFile(); - if (gtfs != null) { - gtfs.delete(); - } else { - file.delete(); - LOG.warn("File deleted"); - } - // Uploaded feed is same as latest version - v.delete(); - halt(304); + // TODO: fix FeedVersion.hash() call when called in this context. Nothing gets hashed because the file has not been saved yet. + // newFeedVersion.hash(); + newFeedVersion.fileSize = newGtfsFile.length(); + newFeedVersion.hash = HashUtils.hashFile(newGtfsFile); + + // Check that the hashes of the feeds don't match, i.e. that the feed has changed since the last version. + // (as long as there is a latest version, i.e. the feed source is not completely new) + if (latestVersion != null && latestVersion.hash.equals(newFeedVersion.hash)) { + // Uploaded feed matches latest. Delete GTFS file because it is a duplicate. + LOG.error("Upload version {} matches latest version {}.", newFeedVersion.id, latestVersion.id); + newGtfsFile.delete(); + LOG.warn("File deleted"); + + // There is no need to delete the newFeedVersion because it has not yet been persisted to MongoDB. + haltWithMessage(req, 304, "Uploaded feed is identical to the latest version known to the database."); } - v.name = v.getFormattedTimestamp() + " Upload"; -// v.fileTimestamp - v.userId = userProfile.getUser_id(); - v.save(); - new ProcessSingleFeedJob(v, userProfile.getUser_id()).run(); + newFeedVersion.name = newFeedVersion.formattedTimestamp() + " Upload"; + // TODO newFeedVersion.fileTimestamp still exists - /*if (DataManager.config.get("modules").get("validator").get("enabled").asBoolean()) { - BuildTransportNetworkJob btnj = new BuildTransportNetworkJob(v); - Thread tnThread = new Thread(btnj); - tnThread.start(); - }*/ + // Must be handled by executor because it takes a long time. + ProcessSingleFeedJob processSingleFeedJob = new ProcessSingleFeedJob(newFeedVersion, userProfile.getUser_id(), true); + DataManager.heavyExecutor.execute(processSingleFeedJob); - return true; + return formatJobMessage(processSingleFeedJob.jobId, "Feed version is processing."); } - public static Boolean createFeedVersionFromSnapshot (Request req, Response res) throws IOException, ServletException { + /** + * HTTP API handler that converts an editor snapshot into a "published" data manager feed version. + * + * FIXME: How should we handle this for the SQL version of the application. One proposal might be to: + * 1. "Freeze" the feed in the DB (making it read only). + * 2. Run validation on the feed. + * 3. Export a copy of the data to a GTFS file. + * + * OR we could just export the feed to a file and then re-import it per usual. This seems like it's wasting time/energy. + */ + private static boolean createFeedVersionFromSnapshot (Request req, Response res) { Auth0UserProfile userProfile = req.attribute("user"); - // TODO: should this be edit privilege? - FeedSource s = requestFeedSourceById(req, "manage"); - FeedVersion v = new FeedVersion(s); + // TODO: Should the ability to create a feedVersion from snapshot be controlled by the 'edit-gtfs' privilege? + FeedSource feedSource = requestFeedSourceById(req, "manage"); + Snapshot snapshot = Persistence.snapshots.getById(req.queryParams("snapshotId")); + if (snapshot == null) { + haltWithMessage(req, 400, "Must provide valid snapshot ID"); + } + FeedVersion feedVersion = new FeedVersion(feedSource); CreateFeedVersionFromSnapshotJob createFromSnapshotJob = - new CreateFeedVersionFromSnapshotJob(v, req.queryParams("snapshotId"), userProfile.getUser_id()); - createFromSnapshotJob.addNextJob(new ProcessSingleFeedJob(v, userProfile.getUser_id())); - new Thread(createFromSnapshotJob).start(); + new CreateFeedVersionFromSnapshotJob(feedVersion, snapshot, userProfile.getUser_id()); + DataManager.heavyExecutor.execute(createFromSnapshotJob); return true; } - public static FeedVersion deleteFeedVersion(Request req, Response res) { + /** + * Spark HTTP API handler that deletes a single feed version based on the ID in the request. + */ + private static FeedVersion deleteFeedVersion(Request req, Response res) { FeedVersion version = requestFeedVersion(req, "manage"); - version.delete(); - - // renumber the versions - Collection versions = version.getFeedSource().getFeedVersions(); - FeedVersion[] versionArray = versions.toArray(new FeedVersion[versions.size()]); - Arrays.sort(versionArray, (v1, v2) -> v1.updated.compareTo(v2.updated)); - for(int i = 0; i < versionArray.length; i++) { - FeedVersion v = versionArray[i]; - v.version = i + 1; - v.save(); - } - return version; } private static FeedVersion requestFeedVersion(Request req, String action) { - String id = req.params("id"); + return requestFeedVersion(req, action, req.params("id")); + } - FeedVersion version = FeedVersion.get(id); + public static FeedVersion requestFeedVersion(Request req, String action, String feedVersionId) { + FeedVersion version = Persistence.feedVersions.getById(feedVersionId); if (version == null) { - halt(404, "Version ID does not exist"); + haltWithMessage(req, 404, "Feed version ID does not exist"); } - // performs permissions checks for at feed source level and halts if any issues - requestFeedSource(req, version.getFeedSource(), action); + // Performs permissions checks on the feed source this feed version belongs to, and halts if permission is denied. + checkFeedSourcePermissions(req, version.parentFeedSource(), action); return version; } - public static JsonNode getValidationResult(Request req, Response res) { - return getValidationResult(req, res, false); - } - - public static JsonNode getPublicValidationResult(Request req, Response res) { - return getValidationResult(req, res, true); - } - - public static JsonNode getValidationResult(Request req, Response res, boolean checkPublic) { - FeedVersion version = requestFeedVersion(req, "view"); - - return version.getValidationResult(false); - } - - public static JsonNode getIsochrones(Request req, Response res) { - FeedVersion version = requestFeedVersion(req, "view"); - - Auth0UserProfile userProfile = req.attribute("user"); - // if tn is null, check first if it's being built, else try reading in tn - if (version.transportNetwork == null) { - buildOrReadTransportNetwork(version, userProfile); - } - else { - // remove version from list of reading network - if (readingNetworkVersionList.contains(version.id)) { - readingNetworkVersionList.remove(version.id); - } - AnalystClusterRequest clusterRequest = buildProfileRequest(req); - return getRouterResult(version.transportNetwork, clusterRequest); - } - return null; - } - - private static void buildOrReadTransportNetwork(FeedVersion version, Auth0UserProfile userProfile) { - InputStream is = null; - try { - if (!readingNetworkVersionList.contains(version.id)) { - is = new FileInputStream(version.getTransportNetworkPath()); - readingNetworkVersionList.add(version.id); - try { -// version.transportNetwork = TransportNetwork.read(is); - ReadTransportNetworkJob rtnj = new ReadTransportNetworkJob(version, userProfile.getUser_id()); - Thread readThread = new Thread(rtnj); - readThread.start(); - } catch (Exception e) { - e.printStackTrace(); - } - } - halt(202, "Try again later. Reading transport network"); - } - // Catch exception if transport network not built yet - catch (Exception e) { - if (DataManager.isModuleEnabled("validator") && !readingNetworkVersionList.contains(version.id)) { - LOG.warn("Transport network not found. Beginning build.", e); - readingNetworkVersionList.add(version.id); - BuildTransportNetworkJob btnj = new BuildTransportNetworkJob(version, userProfile.getUser_id()); - Thread tnThread = new Thread(btnj); - tnThread.start(); - } - halt(202, "Try again later. Building transport network"); - } - } - - private static JsonNode getRouterResult(TransportNetwork transportNetwork, AnalystClusterRequest clusterRequest) { - PointSet targets; - if (transportNetwork.gridPointSet == null) { - transportNetwork.rebuildLinkedGridPointSet(); - } - targets = transportNetwork.gridPointSet; - StreetMode mode = StreetMode.WALK; - final LinkedPointSet linkedTargets = targets.link(transportNetwork.streetLayer, mode); - RepeatedRaptorProfileRouter router = new RepeatedRaptorProfileRouter(transportNetwork, clusterRequest, linkedTargets, new TaskStatistics()); - ResultEnvelope result = router.route(); - - ByteArrayOutputStream out = new ByteArrayOutputStream(); - try { - JsonGenerator jgen = new JsonFactory().createGenerator(out); - jgen.writeStartObject(); - result.avgCase.writeIsochrones(jgen); - jgen.writeEndObject(); - jgen.close(); - out.close(); - String outString = new String( out.toByteArray(), StandardCharsets.UTF_8 ); - return mapper.readTree(outString); - } catch (IOException e) { - e.printStackTrace(); - } - return null; - } - - private static AnalystClusterRequest buildProfileRequest(Request req) { - // required fields? - Double fromLat = Double.valueOf(req.queryParams("fromLat")); - Double fromLon = Double.valueOf(req.queryParams("fromLon")); - Double toLat = Double.valueOf(req.queryParams("toLat")); - Double toLon = Double.valueOf(req.queryParams("toLon")); - LocalDate date = req.queryParams("date") != null ? LocalDate.parse(req.queryParams("date"), DateTimeFormatter.ISO_LOCAL_DATE) : LocalDate.now(); // 2011-12-03 - - // optional with defaults - Integer fromTime = req.queryParams("fromTime") != null ? Integer.valueOf(req.queryParams("fromTime")) : 9 * 3600; - Integer toTime = req.queryParams("toTime") != null ? Integer.valueOf(req.queryParams("toTime")) : 10 * 3600; - - // build request with transit as default mode - AnalystClusterRequest clusterRequest = new AnalystClusterRequest(); - clusterRequest.profileRequest = new ProfileRequest(); - clusterRequest.profileRequest.transitModes = EnumSet.of(TransitModes.TRANSIT); - clusterRequest.profileRequest.accessModes = EnumSet.of(LegMode.WALK); - clusterRequest.profileRequest.date = date; - clusterRequest.profileRequest.fromLat = fromLat; - clusterRequest.profileRequest.fromLon = fromLon; - clusterRequest.profileRequest.toLat = toLat; - clusterRequest.profileRequest.toLon = toLon; - clusterRequest.profileRequest.fromTime = fromTime; - clusterRequest.profileRequest.toTime = toTime; - clusterRequest.profileRequest.egressModes = EnumSet.of(LegMode.WALK); - clusterRequest.profileRequest.zoneId = ZoneId.of("America/New_York"); - - return clusterRequest; - } - - public static Boolean renameFeedVersion (Request req, Response res) throws JsonProcessingException { + private static boolean renameFeedVersion (Request req, Response res) { FeedVersion v = requestFeedVersion(req, "manage"); String name = req.queryParams("name"); if (name == null) { - halt(400, "Name parameter not specified"); + haltWithMessage(req, 400, "Name parameter not specified"); } - v.name = name; - v.save(); + Persistence.feedVersions.updateField(v.id, "name", name); return true; } private static Object downloadFeedVersionDirectly(Request req, Response res) { FeedVersion version = requestFeedVersion(req, "view"); - return downloadFile(version.getGtfsFile(), res); + return downloadFile(version.retrieveGtfsFile(), version.id, req, res); } - public static FeedDownloadToken getDownloadToken (Request req, Response res) { + /** + * Returns credentials that a client may use to then download a feed version. Functionality + * changes depending on whether application.data.use_s3_storage config property is true. + */ + private static Object getFeedDownloadCredentials(Request req, Response res) { FeedVersion version = requestFeedVersion(req, "view"); - FeedDownloadToken token = new FeedDownloadToken(version); - token.save(); - return token; - } - private static FeedDownloadToken getPublicDownloadToken (Request req, Response res) { - FeedVersion version = requestFeedVersion(req, "view"); - if(!version.getFeedSource().isPublic) { - halt(401, "Not a public feed"); - return null; + if (DataManager.useS3) { + // Return presigned download link if using S3. + return downloadFromS3(FeedStore.s3Client, DataManager.feedBucket, FeedStore.s3Prefix + version.id, false, res); + } else { + // when feeds are stored locally, single-use download token will still be used + FeedDownloadToken token = new FeedDownloadToken(version); + Persistence.tokens.create(token); + return token; } - FeedDownloadToken token = new FeedDownloadToken(version); - token.save(); - return token; } + /** + * API endpoint that instructs application to validate a feed if validation does not exist for version. + * FIXME! + */ private static JsonNode validate (Request req, Response res) { FeedVersion version = requestFeedVersion(req, "manage"); - return version.getValidationResult(true); + haltWithMessage(req, 400, "Validate endpoint not currently configured!"); + // FIXME: Update for sql-loader validation process? + return null; +// return version.retrieveValidationResult(true); } private static FeedVersion publishToExternalResource (Request req, Response res) { @@ -377,42 +275,60 @@ private static FeedVersion publishToExternalResource (Request req, Response res) for(String resourceType : DataManager.feedResources.keySet()) { DataManager.feedResources.get(resourceType).feedVersionCreated(version, null); } - FeedSource fs = version.getFeedSource(); - fs.publishedVersionId = version.id; - fs.save(); - return version; + if (!DataManager.isExtensionEnabled("mtc")) { + // update published version ID on feed source + Persistence.feedSources.updateField(version.feedSourceId, "publishedVersionId", version.namespace); + return version; + } else { + // NOTE: If the MTC extension is enabled, the parent feed source's publishedVersionId will not be updated to the + // version's namespace until the FeedUpdater has successfully downloaded the feed from the share S3 bucket. + return Persistence.feedVersions.updateField(version.id, "processing", true); + } } - private static Object downloadFeedVersionWithToken (Request req, Response res) { - FeedDownloadToken token = FeedDownloadToken.get(req.params("token")); + + /** + * Download locally stored feed version with token supplied by this application. This method is only used when + * useS3 is set to false. Otherwise, a direct download from s3 should be used. + */ + private static HttpServletResponse downloadFeedVersionWithToken (Request req, Response res) { + String tokenValue = req.params("token"); + FeedDownloadToken token = Persistence.tokens.getById(tokenValue); if(token == null || !token.isValid()) { - halt(400, "Feed download token not valid"); + LOG.error("Feed download token is invalid: {}", token); + haltWithMessage(req, 400, "Feed download token not valid"); } - - FeedVersion version = token.getFeedVersion(); - - token.delete(); - - return downloadFile(version.getGtfsFile(), res); + // Fetch feed version to download. + FeedVersion version = token.retrieveFeedVersion(); + if (version == null) { + haltWithMessage(req, 400, "Could not retrieve version to download"); + } + LOG.info("Using token {} to download feed version {}", token.id, version.id); + // Remove token so that it cannot be used again for feed download + Persistence.tokens.removeById(tokenValue); + File file = version.retrieveGtfsFile(); + return downloadFile(file, version.id, req, res); } public static void register (String apiPrefix) { + // TODO: Might it be easier down the road to create a separate JSON view to request a "detailed" feed version + // which would contain the full validationResult, so that a request for all versions does not become too large? + // This might not be an issue because validation issues are queried separately. + // TODO: We might need an endpoint to download a csv of all validation issues. This was supported in the + // previous version of data tools. get(apiPrefix + "secure/feedversion/:id", FeedVersionController::getFeedVersion, json::write); get(apiPrefix + "secure/feedversion/:id/download", FeedVersionController::downloadFeedVersionDirectly); - get(apiPrefix + "secure/feedversion/:id/downloadtoken", FeedVersionController::getDownloadToken, json::write); - get(apiPrefix + "secure/feedversion/:id/validation", FeedVersionController::getValidationResult, json::write); + get(apiPrefix + "secure/feedversion/:id/downloadtoken", FeedVersionController::getFeedDownloadCredentials, json::write); post(apiPrefix + "secure/feedversion/:id/validate", FeedVersionController::validate, json::write); - get(apiPrefix + "secure/feedversion/:id/isochrones", FeedVersionController::getIsochrones, json::write); - get(apiPrefix + "secure/feedversion", FeedVersionController::getAllFeedVersions, json::write); - post(apiPrefix + "secure/feedversion", FeedVersionController::createFeedVersion, json::write); + get(apiPrefix + "secure/feedversion", FeedVersionController::getAllFeedVersionsForFeedSource, json::write); + post(apiPrefix + "secure/feedversion", FeedVersionController::createFeedVersionViaUpload, json::write); post(apiPrefix + "secure/feedversion/fromsnapshot", FeedVersionController::createFeedVersionFromSnapshot, json::write); put(apiPrefix + "secure/feedversion/:id/rename", FeedVersionController::renameFeedVersion, json::write); post(apiPrefix + "secure/feedversion/:id/publish", FeedVersionController::publishToExternalResource, json::write); delete(apiPrefix + "secure/feedversion/:id", FeedVersionController::deleteFeedVersion, json::write); - get(apiPrefix + "public/feedversion", FeedVersionController::getAllFeedVersions, json::write); - get(apiPrefix + "public/feedversion/:id/validation", FeedVersionController::getPublicValidationResult, json::write); - get(apiPrefix + "public/feedversion/:id/downloadtoken", FeedVersionController::getPublicDownloadToken, json::write); + get(apiPrefix + "public/feedversion", FeedVersionController::getAllFeedVersionsForFeedSource, json::write); + get(apiPrefix + "public/feedversion/:id/downloadtoken", FeedVersionController::getFeedDownloadCredentials, json::write); get(apiPrefix + "downloadfeed/:token", FeedVersionController::downloadFeedVersionWithToken); diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/GtfsApiController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/GtfsApiController.java deleted file mode 100644 index 516411e95..000000000 --- a/src/main/java/com/conveyal/datatools/manager/controllers/api/GtfsApiController.java +++ /dev/null @@ -1,131 +0,0 @@ -package com.conveyal.datatools.manager.controllers.api; - -import com.amazonaws.services.s3.AmazonS3Client; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectSummary; -import com.conveyal.datatools.manager.DataManager; -import com.conveyal.datatools.manager.persistence.FeedStore; -import com.conveyal.datatools.manager.jobs.FeedUpdater; -import com.conveyal.gtfs.api.ApiMain; -import com.conveyal.gtfs.api.Routes; -import com.fasterxml.jackson.databind.JsonNode; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.HashMap; -import java.util.Map; - -/** - * Created by landon on 4/12/16. - */ -public class GtfsApiController { - public static final Logger LOG = LoggerFactory.getLogger(GtfsApiController.class); - public static String feedBucket; - public static FeedUpdater feedUpdater; - private static AmazonS3Client s3 = new AmazonS3Client(); - public static ApiMain gtfsApi; - public static String bucketFolder; - public static void register (String apiPrefix) throws IOException { - - // store list of GTFS feed eTags here - Map eTagMap = new HashMap<>(); - - // uses bucket, folder, and local cache according to main app config - gtfsApi.initialize(DataManager.gtfsCache); - - // check for use of extension... - String extensionType = DataManager.getConfigPropertyAsText("modules.gtfsapi.use_extension"); - switch (extensionType) { - case "mtc": - LOG.info("Using extension " + extensionType + " for service alerts module"); - feedBucket = DataManager.getConfigPropertyAsText("extensions." + extensionType + ".s3_bucket"); - bucketFolder = DataManager.getConfigPropertyAsText("extensions." + extensionType + ".s3_download_prefix"); - - // Adds feeds on startup - eTagMap.putAll(registerS3Feeds(null, feedBucket, bucketFolder)); - break; - default: - LOG.warn("No extension provided for GTFS API"); - // use application s3 bucket and s3Prefix - if ("true".equals(DataManager.getConfigPropertyAsText("application.data.use_s3_storage"))) { - feedBucket = DataManager.getConfigPropertyAsText("application.data.gtfs_s3_bucket"); - bucketFolder = FeedStore.s3Prefix; - } - else { - feedBucket = null; - } - break; - } - - // check for update interval (in seconds) and initialize feedUpdater - JsonNode updateFrequency = DataManager.getConfigProperty("modules.gtfsapi.update_frequency"); - if (updateFrequency != null) { - feedUpdater = new FeedUpdater(eTagMap, 0, updateFrequency.asInt()); - } - - // set gtfs-api routes with apiPrefix - Routes.routes(apiPrefix); - } - - public static Map registerS3Feeds (Map eTags, String bucket, String dir) { - if (eTags == null) { - eTags = new HashMap<>(); - } - Map newTags = new HashMap<>(); - // iterate over feeds in download_prefix folder and register to gtfsApi (MTC project) - ObjectListing gtfsList = s3.listObjects(bucket, dir); - for (S3ObjectSummary objSummary : gtfsList.getObjectSummaries()) { - - String eTag = objSummary.getETag(); - if (!eTags.containsValue(eTag)) { - String keyName = objSummary.getKey(); - - // don't add object if it is a dir - if (keyName.equals(dir)){ - continue; - } - String filename = keyName.split("/")[1]; - String feedId = filename.replace(".zip", ""); - try { - LOG.warn("New version found for " + keyName + " is null. Downloading from s3..."); - S3Object object = s3.getObject(bucket, keyName); - InputStream in = object.getObjectContent(); - byte[] buf = new byte[1024]; - File file = new File(FeedStore.basePath, filename); - OutputStream out = new FileOutputStream(file); - int count; - while( (count = in.read(buf)) != -1) - { - if( Thread.interrupted() ) - { - throw new InterruptedException(); - } - out.write(buf, 0, count); - } - out.close(); - in.close(); - - // delete old mapDB files - String[] dbFiles = {".db", ".db.p"}; - for (String type : dbFiles) { - File db = new File(FeedStore.basePath, feedId + type); - db.delete(); - } - newTags.put(feedId, eTag); - - // initiate load of feed source into API with get call - gtfsApi.getFeedSource(feedId); - } catch (Exception e) { - LOG.warn("Could not load feed " + keyName, e); - } - } - } - return newTags; - } -} diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/GtfsPlusController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/GtfsPlusController.java index c2599177f..c0038d7a0 100644 --- a/src/main/java/com/conveyal/datatools/manager/controllers/api/GtfsPlusController.java +++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/GtfsPlusController.java @@ -1,9 +1,11 @@ package com.conveyal.datatools.manager.controllers.api; -import com.conveyal.datatools.common.utils.SparkUtils; +import com.conveyal.datatools.common.utils.Consts; import com.conveyal.datatools.manager.DataManager; +import com.conveyal.datatools.manager.auth.Auth0UserProfile; import com.conveyal.datatools.manager.models.FeedVersion; import com.conveyal.datatools.manager.persistence.FeedStore; +import com.conveyal.datatools.manager.persistence.Persistence; import com.conveyal.datatools.manager.utils.json.JsonUtil; import com.conveyal.gtfs.GTFSFeed; import com.fasterxml.jackson.databind.JsonNode; @@ -15,15 +17,31 @@ import javax.servlet.MultipartConfigElement; import javax.servlet.ServletException; +import javax.servlet.http.HttpServletResponse; import javax.servlet.http.Part; -import java.io.*; -import java.util.*; +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.Serializable; +import java.util.Arrays; +import java.util.Collection; +import java.util.Enumeration; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; import java.util.zip.ZipOutputStream; +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; import static spark.Spark.get; -import static spark.Spark.halt; import static spark.Spark.post; /** @@ -38,7 +56,7 @@ public class GtfsPlusController { public static Boolean uploadGtfsPlusFile (Request req, Response res) throws IOException, ServletException { - //FeedSource s = FeedSource.get(req.queryParams("feedSourceId")); + //FeedSource s = FeedSource.retrieveById(req.queryParams("feedSourceId")); String feedVersionId = req.params("versionid"); if (req.raw().getAttribute("org.eclipse.jetty.multipartConfig") == null) { @@ -57,28 +75,28 @@ public static Boolean uploadGtfsPlusFile (Request req, Response res) throws IOEx gtfsPlusStore.newFeed(feedVersionId, uploadStream, null); } catch (Exception e) { LOG.error("Unable to open input stream from upload"); - halt("Unable to read uploaded feed"); + haltWithMessage(req, 500, "an unexpected error occurred", e); } return true; } - private static Object getGtfsPlusFile(Request req, Response res) { + private static HttpServletResponse getGtfsPlusFile(Request req, Response res) { String feedVersionId = req.params("versionid"); LOG.info("Downloading GTFS+ file for FeedVersion " + feedVersionId); // check for saved File file = gtfsPlusStore.getFeed(feedVersionId); if(file == null) { - return getGtfsPlusFromGtfs(feedVersionId, res); + return getGtfsPlusFromGtfs(feedVersionId, req, res); } LOG.info("Returning updated GTFS+ data"); - return downloadGtfsPlusFile(file, res); + return downloadGtfsPlusFile(file, req, res); } - private static Object getGtfsPlusFromGtfs(String feedVersionId, Response res) { + private static HttpServletResponse getGtfsPlusFromGtfs(String feedVersionId, Request req, Response res) { LOG.info("Extracting GTFS+ data from main GTFS feed"); - FeedVersion version = FeedVersion.get(feedVersionId); + FeedVersion version = Persistence.feedVersions.getById(feedVersionId); File gtfsPlusFile = null; @@ -96,7 +114,7 @@ private static Object getGtfsPlusFromGtfs(String feedVersionId, Response res) { ZipOutputStream zos = new ZipOutputStream(new FileOutputStream(gtfsPlusFile)); // iterate through the existing GTFS file, copying any GTFS+ tables - ZipFile gtfsFile = new ZipFile(version.getGtfsFile()); + ZipFile gtfsFile = new ZipFile(version.retrieveGtfsFile()); final Enumeration entries = gtfsFile.entries(); byte[] buffer = new byte[512]; while (entries.hasMoreElements()) { @@ -117,13 +135,13 @@ private static Object getGtfsPlusFromGtfs(String feedVersionId, Response res) { zos.close(); } catch (Exception e) { - halt(500, "Error getting GTFS+ file from GTFS"); + haltWithMessage(req, 500, "an unexpected error occurred", e); } - return downloadGtfsPlusFile(gtfsPlusFile, res); + return downloadGtfsPlusFile(gtfsPlusFile, req, res); } - private static Object downloadGtfsPlusFile(File file, Response res) { + private static HttpServletResponse downloadGtfsPlusFile(File file, Request req, Response res) { res.raw().setContentType("application/octet-stream"); res.raw().setHeader("Content-Disposition", "attachment; filename=" + file.getName() + ".zip"); @@ -139,8 +157,10 @@ private static Object downloadGtfsPlusFile(File file, Response res) { bufferedOutputStream.flush(); bufferedOutputStream.close(); - } catch (Exception e) { - halt(500, "Error serving GTFS+ file"); + } catch (IOException e) { + LOG.error("An error occurred while trying to download a gtfs plus file"); + e.printStackTrace(); + haltWithMessage(req, 500, "An error occurred while trying to download a gtfs plus file", e); } return res.raw(); @@ -151,26 +171,33 @@ private static Long getGtfsPlusFileTimestamp(Request req, Response res) { // check for saved GTFS+ data File file = gtfsPlusStore.getFeed(feedVersionId); - if(file == null) { - FeedVersion feedVersion = FeedVersion.get(feedVersionId); - if (feedVersion == null) { - halt(400, SparkUtils.formatJSON("Feed version ID is not valid", 400)); + if (file == null) { + FeedVersion feedVersion = Persistence.feedVersions.getById(feedVersionId); + if (feedVersion != null) { + file = feedVersion.retrieveGtfsFile(); + } else { + haltWithMessage(req, 400, "Feed version ID is not valid"); } - file = feedVersion.getGtfsFile(); } - return file.lastModified(); + if (file != null) { + return file.lastModified(); + } else { + haltWithMessage(req, 404, "Feed version file not found"); + return null; + } } private static Boolean publishGtfsPlusFile(Request req, Response res) { + Auth0UserProfile profile = req.attribute("user"); String feedVersionId = req.params("versionid"); LOG.info("Publishing GTFS+ for " + feedVersionId); File plusFile = gtfsPlusStore.getFeed(feedVersionId); if(plusFile == null || !plusFile.exists()) { - halt(400, "No saved GTFS+ data for version"); + haltWithMessage(req, 400, "No saved GTFS+ data for version"); } - FeedVersion feedVersion = FeedVersion.get(feedVersionId); + FeedVersion feedVersion = Persistence.feedVersions.getById(feedVersionId); // create a set of valid GTFS+ table names Set gtfsPlusTables = new HashSet<>(); @@ -188,7 +215,7 @@ private static Boolean publishGtfsPlusFile(Request req, Response res) { ZipOutputStream zos = new ZipOutputStream(new FileOutputStream(newFeed)); // iterate through the existing GTFS file, copying all non-GTFS+ tables - ZipFile gtfsFile = new ZipFile(feedVersion.getGtfsFile()); + ZipFile gtfsFile = new ZipFile(feedVersion.retrieveGtfsFile()); final Enumeration entries = gtfsFile.entries(); byte[] buffer = new byte[512]; while (entries.hasMoreElements()) { @@ -228,28 +255,24 @@ private static Boolean publishGtfsPlusFile(Request req, Response res) { } catch (Exception e) { e.printStackTrace(); - halt(500, "Error merging GTFS+ data with GTFS"); + haltWithMessage(req, 500, "an unexpected error occurred", e); } - FeedVersion newFeedVersion = new FeedVersion(feedVersion.getFeedSource()); + FeedVersion newFeedVersion = new FeedVersion(feedVersion.parentFeedSource()); try { newFeedVersion.newGtfsFile(new FileInputStream(newFeed)); } catch (Exception e) { e.printStackTrace(); - halt(500, "Error creating new FeedVersion from combined GTFS/GTFS+"); + haltWithMessage(req, 500, "Error creating new FeedVersion from combined GTFS/GTFS+", e); } newFeedVersion.hash(); // validation for the main GTFS content hasn't changed newFeedVersion.validationResult = feedVersion.validationResult; - - newFeedVersion.save(); - - for(String resourceType : DataManager.feedResources.keySet()) { - DataManager.feedResources.get(resourceType).feedVersionCreated(newFeedVersion, null); - } + newFeedVersion.storeUser(profile); + Persistence.feedVersions.create(newFeedVersion); return true; } @@ -257,18 +280,19 @@ private static Boolean publishGtfsPlusFile(Request req, Response res) { private static Collection getGtfsPlusValidation(Request req, Response res) { String feedVersionId = req.params("versionid"); LOG.info("Validating GTFS+ for " + feedVersionId); - FeedVersion feedVersion = FeedVersion.get(feedVersionId); + FeedVersion feedVersion = Persistence.feedVersions.getById(feedVersionId); List issues = new LinkedList<>(); // load the main GTFS - GTFSFeed gtfsFeed = feedVersion.getGtfsFeed(); + // FIXME: fix gtfs+ loading/validating for sql-load + GTFSFeed gtfsFeed = null; // feedVersion.retrieveFeed(); // check for saved GTFS+ data File file = gtfsPlusStore.getFeed(feedVersionId); if (file == null) { LOG.warn("GTFS+ file not found, loading from main version GTFS."); - file = feedVersion.getGtfsFile(); + file = feedVersion.retrieveGtfsFile(); } int gtfsPlusTableCount = 0; try { @@ -288,7 +312,7 @@ private static Collection getGtfsPlusValidation(Request req, Re } catch(Exception e) { e.printStackTrace(); - halt(500); + haltWithMessage(req, 500, "an unexpected error occurred", e); } LOG.info("GTFS+ tables found: {}/{}", gtfsPlusTableCount, DataManager.gtfsPlusConfig.size()); return issues; @@ -316,7 +340,7 @@ private static void validateTable(Collection issues, JsonNode t int rowIndex = 0; while((line = in.readLine()) != null) { - String[] values = line.split(",(?=([^\"]*\"[^\"]*\")*[^\"]*$)", -1); + String[] values = line.split(Consts.COLUMN_SPLIT, -1); for(int v=0; v < values.length; v++) { validateTableValue(issues, tableId, rowIndex, values[v], fieldNodes[v], gtfsFeed); } @@ -341,18 +365,21 @@ private static void validateTableValue(Collection issues, Strin for (JsonNode option : options) { String optionValue = option.get("value").asText(); + // NOTE: per client's request, this check has been made case insensitive + boolean valuesAreEqual = optionValue.equalsIgnoreCase(value); + // if value is found in list of options, break out of loop - if (optionValue.equals(value) || !fieldNode.get("required").asBoolean() && value.equals("")) { + if (valuesAreEqual || (!fieldNode.get("required").asBoolean() && value.equals(""))) { invalid = false; break; } } if (invalid) { - System.out.println("invalid: " + " " + value); issues.add(new ValidationIssue(tableId, fieldName, rowIndex, "Value: " + value + " is not a valid option.")); } break; case "TEXT": + // check if value exceeds max length requirement if(fieldNode.get("maxLength") != null) { int maxLength = fieldNode.get("maxLength").asInt(); if(value.length() > maxLength) { diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/NoteController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/NoteController.java index 79d706a56..4e08f780a 100644 --- a/src/main/java/com/conveyal/datatools/manager/controllers/api/NoteController.java +++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/NoteController.java @@ -2,9 +2,13 @@ import com.conveyal.datatools.manager.auth.Auth0UserProfile; import com.conveyal.datatools.manager.jobs.NotifyUsersForSubscriptionJob; -import com.conveyal.datatools.manager.models.*; +import com.conveyal.datatools.manager.models.FeedSource; +import com.conveyal.datatools.manager.models.FeedVersion; +import com.conveyal.datatools.manager.models.JsonViews; +import com.conveyal.datatools.manager.models.Model; +import com.conveyal.datatools.manager.models.Note; +import com.conveyal.datatools.manager.persistence.Persistence; import com.conveyal.datatools.manager.utils.json.JsonManager; -import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import spark.Request; @@ -14,7 +18,9 @@ import java.util.Collection; import java.util.Date; -import static spark.Spark.*; +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.client.model.Updates.push; import static spark.Spark.get; import static spark.Spark.post; @@ -26,36 +32,36 @@ public class NoteController { private static JsonManager json = new JsonManager(Note.class, JsonViews.UserInterface.class); - public static Collection getAllNotes (Request req, Response res) throws JsonProcessingException { + public static Collection getAllNotes (Request req, Response res) { Auth0UserProfile userProfile = req.attribute("user"); - if(userProfile == null) halt(401); + if (userProfile == null) haltWithMessage(req, 401, "User not authorized to perform this action"); String typeStr = req.queryParams("type"); String objectId = req.queryParams("objectId"); if (typeStr == null || objectId == null) { - halt(400, "Please specify objectId and type"); + haltWithMessage(req, 400, "Please specify objectId and type"); } Note.NoteType type = null; try { type = Note.NoteType.valueOf(typeStr); } catch (IllegalArgumentException e) { - halt(400, "Please specify a valid type"); + haltWithMessage(req, 400, "Please specify a valid type"); } Model model = null; switch (type) { case FEED_SOURCE: - model = FeedSource.get(objectId); + model = Persistence.feedSources.getById(objectId); break; case FEED_VERSION: - model = FeedVersion.get(objectId); + model = Persistence.feedVersions.getById(objectId); break; default: // this shouldn't ever happen, but Java requires that every case be covered somehow so model can't be used uninitialized - halt(400, "Unsupported type for notes"); + haltWithMessage(req, 400, "Unsupported type for notes"); } FeedSource s; @@ -64,15 +70,15 @@ public static Collection getAllNotes (Request req, Response res) throws Js s = (FeedSource) model; } else { - s = ((FeedVersion) model).getFeedSource(); + s = ((FeedVersion) model).parentFeedSource(); } - String orgId = s.getOrganizationId(); + String orgId = s.organizationId(); // check if the user has permission if (userProfile.canAdministerProject(s.projectId, orgId) || userProfile.canViewFeed(orgId, s.projectId, s.id)) { - return model.getNotes(); + return model.retrieveNotes(); } else { - halt(401); + haltWithMessage(req, 401, "User not authorized to perform this action"); } return null; @@ -80,7 +86,7 @@ public static Collection getAllNotes (Request req, Response res) throws Js public static Note createNote (Request req, Response res) throws IOException { Auth0UserProfile userProfile = req.attribute("user"); - if(userProfile == null) halt(401); + if(userProfile == null) haltWithMessage(req, 401, "User not authorized to perform this action"); String typeStr = req.queryParams("type"); String objectId = req.queryParams("objectId"); @@ -89,57 +95,71 @@ public static Note createNote (Request req, Response res) throws IOException { try { type = Note.NoteType.valueOf(typeStr); } catch (IllegalArgumentException e) { - halt(400, "Please specify a valid type"); + haltWithMessage(req, 400, "Please specify a valid type"); } - Model model = null; + Model objectWithNote = null; switch (type) { case FEED_SOURCE: - model = FeedSource.get(objectId); + objectWithNote = Persistence.feedSources.getById(objectId); break; case FEED_VERSION: - model = FeedVersion.get(objectId); + objectWithNote = Persistence.feedVersions.getById(objectId); break; default: // this shouldn't ever happen, but Java requires that every case be covered somehow so model can't be used uninitialized - halt(400, "Unsupported type for notes"); + haltWithMessage(req, 400, "Unsupported type for notes"); } - FeedSource s; + FeedSource feedSource; - if (model instanceof FeedSource) { - s = (FeedSource) model; - } - else { - s = ((FeedVersion) model).getFeedSource(); + if (objectWithNote instanceof FeedSource) { + feedSource = (FeedSource) objectWithNote; + } else { + feedSource = ((FeedVersion) objectWithNote).parentFeedSource(); } - String orgId = s.getOrganizationId(); - // check if the user has permission - if (userProfile.canAdministerProject(s.projectId, orgId) || userProfile.canViewFeed(orgId, s.projectId, s.id)) { - Note n = new Note(); - n.setUser(userProfile); + String orgId = feedSource.organizationId(); + boolean allowedToCreate = userProfile.canAdministerProject(feedSource.projectId, orgId) || + userProfile.canViewFeed(orgId, feedSource.projectId, feedSource.id); + if (allowedToCreate) { + Note note = new Note(); + note.storeUser(userProfile); ObjectMapper mapper = new ObjectMapper(); JsonNode node = mapper.readTree(req.body()); - n.body = node.get("body").asText(); - - n.userEmail = userProfile.getEmail(); - n.date = new Date(); - n.type = type; - model.addNote(n); - n.save(); - model.save(); - - // send notifications - NotifyUsersForSubscriptionJob notifyFeedJob = new NotifyUsersForSubscriptionJob("feed-commented-on", s.id, n.userEmail + " commented on " + s.name + " at " + n.date.toString() + ":
" + n.body + "
"); - Thread notifyThread = new Thread(notifyFeedJob); - notifyThread.start(); - - return n; + note.body = node.get("body").asText(); + + note.userEmail = userProfile.getEmail(); + note.date = new Date(); + note.type = type; + + Persistence.notes.create(note); + + // TODO: figure out a cleaner way to handle this update + if (objectWithNote instanceof FeedSource) { + Persistence.feedSources.getMongoCollection().updateOne(eq(objectWithNote.id), push("noteIds", note.id)); + } else { + Persistence.feedVersions.getMongoCollection().updateOne(eq(objectWithNote.id), push("noteIds", note.id)); + } + String message = String.format( + "%s commented on %s at %s:
%s
", + note.userEmail, + feedSource.name, + note.date.toString(), + note.body); + // Send notifications to comment subscribers. + // TODO: feed-commented-on has been merged into feed-updated subscription type. This should be clarified + // in the subject line/URL of the notification email. + NotifyUsersForSubscriptionJob.createNotification( + "feed-updated", + feedSource.id, + message + ); + return note; } else { - halt(401); + haltWithMessage(req, 401, "User not authorized to perform this action"); } return null; diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/OrganizationController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/OrganizationController.java index 972e7b565..46c6a3d3f 100644 --- a/src/main/java/com/conveyal/datatools/manager/controllers/api/OrganizationController.java +++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/OrganizationController.java @@ -1,13 +1,11 @@ package com.conveyal.datatools.manager.controllers.api; -import com.conveyal.datatools.common.utils.SparkUtils; import com.conveyal.datatools.manager.auth.Auth0UserProfile; import com.conveyal.datatools.manager.models.JsonViews; import com.conveyal.datatools.manager.models.Organization; import com.conveyal.datatools.manager.models.Project; +import com.conveyal.datatools.manager.persistence.Persistence; import com.conveyal.datatools.manager.utils.json.JsonManager; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import spark.Request; @@ -16,16 +14,13 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.Date; -import java.util.HashSet; -import java.util.Iterator; import java.util.List; -import java.util.Map; -import java.util.Set; -import static com.conveyal.datatools.common.utils.SparkUtils.formatJSON; -import static spark.Spark.*; +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; +import static spark.Spark.delete; import static spark.Spark.get; +import static spark.Spark.post; +import static spark.Spark.put; /** * Created by landon on 1/30/17. @@ -33,14 +28,13 @@ public class OrganizationController { public static JsonManager json = new JsonManager<>(Organization.class, JsonViews.UserInterface.class); public static final Logger LOG = LoggerFactory.getLogger(OrganizationController.class); - private static ObjectMapper mapper = new ObjectMapper(); public static Organization getOrganization (Request req, Response res) { String id = req.params("id"); if (id == null) { - halt(400, "Must specify valid organization id"); + haltWithMessage(req, 400, "Must specify valid organization id"); } - Organization org = Organization.get(id); + Organization org = Persistence.organizations.getById(id); return org; } @@ -48,107 +42,75 @@ public static Collection getAllOrganizations (Request req, Respons Auth0UserProfile userProfile = req.attribute("user"); boolean isOrgAdmin = userProfile.canAdministerOrganization(); if (userProfile.canAdministerApplication()) { - return Organization.getAll(); + return Persistence.organizations.getAll(); } else if (isOrgAdmin) { List orgs = new ArrayList<>(); orgs.add(userProfile.getOrganization()); LOG.info("returning org {}", orgs); return orgs; } else { - halt(401, "Must be application admin to view organizations"); + haltWithMessage(req, 401, "Must be application admin to view organizations"); } return null; } + // TODO Fix organization controllers to properly write beginDate/endDate to database as DATE_TIME type, and not string (or some other) public static Organization createOrganization (Request req, Response res) { Auth0UserProfile userProfile = req.attribute("user"); if (userProfile.canAdministerApplication()) { - Organization org = null; - try { - org = mapper.readValue(req.body(), Organization.class); - org.save(); - } catch (IOException e) { - LOG.warn("Could not create organization", e); - halt(400, e.getMessage()); - } + Organization org = Persistence.organizations.create(req.body()); return org; } else { - halt(401, "Must be application admin to view organizations"); + haltWithMessage(req, 401, "Must be application admin to view organizations"); } return null; } public static Organization updateOrganization (Request req, Response res) throws IOException { - Organization org = requestOrganizationById(req); - applyJsonToOrganization(org, req.body()); - org.save(); - return org; - } + String organizationId = req.params("id"); + requestOrganizationById(req); + Organization organization = Persistence.organizations.update(organizationId, req.body()); - private static void applyJsonToOrganization(Organization org, String json) throws IOException { - JsonNode node = mapper.readTree(json); - Iterator> fieldsIter = node.fields(); - while (fieldsIter.hasNext()) { - Map.Entry entry = fieldsIter.next(); + // FIXME: Add back in hook after organization is updated. + // See https://github.com/catalogueglobal/datatools-server/issues/111 +// JsonNode projects = entry.getValue(); +// Collection projectsToInsert = new ArrayList<>(projects.size()); +// Collection existingProjects = org.projects(); +// +// // set projects orgId for all valid projects in list +// for (JsonNode project : projects) { +// if (!project.has("id")) { +// halt(400, "Project not supplied"); +// } +// Project p = Project.retrieve(project.get("id").asText()); +// if (p == null) { +// halt(404, "Project not found"); +// } +// Organization previousOrg = p.retrieveOrganization(); +// if (previousOrg != null && !previousOrg.id.equals(org.id)) { +// halt(400, SparkUtils.formatJSON(String.format("Project %s cannot be reassigned while belonging to org %s", p.id, previousOrg.id), 400)); +// } +// projectsToInsert.add(p); +// p.organizationId = org.id; +// p.save(); +// } +// // assign remaining previously assigned projects to null +// existingProjects.removeAll(projectsToInsert); +// for (Project p : existingProjects) { +// p.organizationId = null; +// p.save(); +// } - if(entry.getKey().equals("name")) { - org.name = entry.getValue().asText(); - } else if(entry.getKey().equals("logoUrl")) { - org.logoUrl = entry.getValue().asText(); - } else if(entry.getKey().equals("usageTier")) { - org.usageTier = Organization.UsageTier.valueOf(entry.getValue().asText()); - } else if(entry.getKey().equals("active")) { - org.active = entry.getValue().asBoolean(); - } else if(entry.getKey().equals("extensions")) { - JsonNode extensions = entry.getValue(); - Set newExtensions = new HashSet<>(); - for (JsonNode extension : extensions) { - newExtensions.add(Organization.Extension.valueOf(extension.asText())); - } - org.extensions = newExtensions; - } else if(entry.getKey().equals("projects")) { - JsonNode projects = entry.getValue(); - Collection projectsToInsert = new ArrayList<>(projects.size()); - Collection existingProjects = org.getProjects(); - - // set projects orgId for all valid projects in list - for (JsonNode project : projects) { - if (!project.has("id")) { - halt(400, "Project not supplied"); - } - Project p = Project.get(project.get("id").asText()); - if (p == null) { - halt(404, "Project not found"); - } - Organization previousOrg = p.getOrganization(); - if (previousOrg != null && !previousOrg.id.equals(org.id)) { - halt(400, SparkUtils.formatJSON(String.format("Project %s cannot be reassigned while belonging to org %s", p.id, previousOrg.id), 400)); - } - projectsToInsert.add(p); - p.organizationId = org.id; - p.save(); - } - // assign remaining previously assigned projects to null - existingProjects.removeAll(projectsToInsert); - for (Project p : existingProjects) { - p.organizationId = null; - p.save(); - } - } else if(entry.getKey().equals("subscriptionBeginDate")) { - org.subscriptionBeginDate = new Date(entry.getValue().asLong()); - } else if(entry.getKey().equals("subscriptionEndDate")) { - org.subscriptionEndDate = new Date(entry.getValue().asLong()); - } - } + return organization; } public static Organization deleteOrganization (Request req, Response res) { Organization org = requestOrganizationById(req); - - if (org.getProjects().size() > 0) { - halt(400, formatJSON("Cannot delete organization that is referenced by projects.", 400)); + Collection organizationProjects = org.projects(); + if (organizationProjects != null && organizationProjects.size() > 0) { + haltWithMessage(req, 400, "Cannot delete organization that is referenced by projects."); } - org.delete(); + Persistence.organizations.removeById(org.id); return org; } @@ -156,23 +118,21 @@ private static Organization requestOrganizationById(Request req) { Auth0UserProfile userProfile = req.attribute("user"); String id = req.params("id"); if (id == null) { - halt(400, "Must specify valid organization id"); + haltWithMessage(req, 400, "Must specify valid organization id"); } if (userProfile.canAdministerApplication()) { - Organization org = Organization.get(id); + Organization org = Persistence.organizations.getById(id); if (org == null) { - halt(400, "Organization does not exist"); + haltWithMessage(req, 400, "Organization does not exist"); } return org; } else { - halt(401, "Must be application admin to modify organization"); + haltWithMessage(req, 401, "Must be application admin to modify organization"); } return null; } public static void register (String apiPrefix) { - options(apiPrefix + "secure/organization", (q, s) -> ""); - options(apiPrefix + "secure/organization/:id", (q, s) -> ""); get(apiPrefix + "secure/organization/:id", OrganizationController::getOrganization, json::write); get(apiPrefix + "secure/organization", OrganizationController::getAllOrganizations, json::write); post(apiPrefix + "secure/organization", OrganizationController::createOrganization, json::write); diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/ProjectController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/ProjectController.java index b1c01ea70..86f78ca40 100644 --- a/src/main/java/com/conveyal/datatools/manager/controllers/api/ProjectController.java +++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/ProjectController.java @@ -4,34 +4,22 @@ import com.conveyal.datatools.manager.auth.Auth0UserProfile; import com.conveyal.datatools.manager.jobs.FetchProjectFeedsJob; import com.conveyal.datatools.manager.jobs.MakePublicJob; -import com.conveyal.datatools.manager.models.FeedSource; +import com.conveyal.datatools.manager.jobs.MergeProjectFeedsJob; +import com.conveyal.datatools.manager.models.FeedDownloadToken; import com.conveyal.datatools.manager.models.FeedVersion; import com.conveyal.datatools.manager.models.JsonViews; -import com.conveyal.datatools.manager.models.Organization; -import com.conveyal.datatools.manager.models.OtpBuildConfig; -import com.conveyal.datatools.manager.models.OtpRouterConfig; -import com.conveyal.datatools.manager.models.OtpServer; import com.conveyal.datatools.manager.models.Project; +import com.conveyal.datatools.manager.persistence.FeedStore; +import com.conveyal.datatools.manager.persistence.Persistence; import com.conveyal.datatools.manager.utils.json.JsonManager; import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.ArrayNode; -import org.apache.http.concurrent.Cancellable; +import org.bson.Document; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import spark.Request; +import spark.Response; -import java.io.BufferedInputStream; -import java.io.BufferedOutputStream; -import java.io.BufferedReader; -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; @@ -39,680 +27,325 @@ import java.time.ZoneId; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; -import java.util.*; +import java.util.Collection; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import java.util.zip.ZipEntry; -import java.util.zip.ZipFile; -import java.util.zip.ZipOutputStream; -import spark.Request; -import spark.Response; - -import static spark.Spark.*; +import static com.conveyal.datatools.common.utils.S3Utils.downloadFromS3; +import static com.conveyal.datatools.common.utils.SparkUtils.downloadFile; +import static com.conveyal.datatools.common.utils.SparkUtils.formatJobMessage; +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; +import static com.conveyal.datatools.manager.DataManager.publicPath; +import static spark.Spark.delete; +import static spark.Spark.get; +import static spark.Spark.post; +import static spark.Spark.put; /** - * Created by demory on 3/14/16. + * Handlers for HTTP API requests that affect Projects. + * These methods are mapped to API endpoints by Spark. + * TODO we could probably have one generic controller for all data types, and use path elements from the URL to route to different typed persistence instances. */ - +@SuppressWarnings({"unused", "ThrowableNotThrown"}) public class ProjectController { + // TODO We can probably replace this with something from Mongo so we use one JSON serializer / deserializer throughout public static JsonManager json = new JsonManager<>(Project.class, JsonViews.UserInterface.class); public static final Logger LOG = LoggerFactory.getLogger(ProjectController.class); - private static ObjectMapper mapper = new ObjectMapper(); - public static Collection getAllProjects(Request req, Response res) throws JsonProcessingException { + /** + * @return a list of all projects that are public or visible given the current user and organization. + */ + private static Collection getAllProjects(Request req, Response res) throws JsonProcessingException { Auth0UserProfile userProfile = req.attribute("user"); -// Organization org = userProfile.getOrganization(); -// if (!userProfile.canAdministerOrganization()) { -// -// } -// org.getProjects(); - Collection filteredProjects = new ArrayList(); -// Collection projects = Project.getAll(); -// orgProjects = - LOG.info("found projects: " + Project.getAll().size()); - for (Project proj : Project.getAll()) { - // Get feedSources if making a public call -// Supplier> supplier = () -> new LinkedList(); - if (req.pathInfo().contains("public")) { - proj.feedSources = proj.getProjectFeedSources().stream().filter(fs -> fs != null && fs.isPublic).collect(Collectors.toList()); - } - else { - proj.feedSources = null; - } - if (req.pathInfo().contains("public") || userProfile.canAdministerApplication() || userProfile.hasProject(proj.id, proj.organizationId)) { - filteredProjects.add(proj); - } - } - - return filteredProjects; - } - - public static Project getProject(Request req, Response res) { - String id = req.params("id"); - Project proj = Project.get(id); - if (proj == null) { -// return new MissingResourceException("No project found", Project.class.getSimpleName(), id); - halt(404, "No project with id: " + id); - } - // Get feedSources if making a public call - if (req.pathInfo().contains("public")) { - Collection feeds = proj.getProjectFeedSources().stream().filter(fs -> fs.isPublic).collect(Collectors.toList()); - proj.feedSources = feeds; - } - else { - proj.feedSources = null; - } - return proj; - } - - public static Project createProject(Request req, Response res) throws IOException { - Project proj = new Project(); - - applyJsonToProject(proj, req.body()); - proj.save(); - - return proj; - } - - public static Project updateProject(Request req, Response res) throws IOException { - Project proj = requestProjectById(req, "manage"); - - applyJsonToProject(proj, req.body()); - proj.save(); - - return proj; + // TODO: move this filtering into database query to reduce traffic / memory + return Persistence.projects.getAll().stream() + .filter(p -> req.pathInfo().matches(publicPath) || userProfile.hasProject(p.id, p.organizationId)) + .map(p -> checkProjectPermissions(req, p, "view")) + .collect(Collectors.toList()); } - public static Project deleteProject(Request req, Response res) throws IOException { - Project proj = requestProjectById(req, "manage"); - proj.delete(); - - return proj; - + /** + * @return a Project object for the UUID included in the request. + */ + private static Project getProject(Request req, Response res) { + return requestProjectById(req, "view"); } - public static Boolean fetch(Request req, Response res) { + /** + * Create a new Project and store it, setting fields according to the JSON in the request body. + * @return the newly created Project with all the supplied fields, as it appears in the database. + */ + private static Project createProject(Request req, Response res) { + // TODO error handling when request is bogus + // TODO factor out user profile fetching, permissions checks etc. Auth0UserProfile userProfile = req.attribute("user"); - Project proj = requestProjectById(req, "manage"); - - FetchProjectFeedsJob fetchProjectFeedsJob = new FetchProjectFeedsJob(proj, userProfile.getUser_id()); - new Thread(fetchProjectFeedsJob).start(); - return true; - } - - public static void applyJsonToProject(Project proj, String json) throws IOException { - JsonNode node = mapper.readTree(json); - boolean updateFetchSchedule = false; - Iterator> fieldsIter = node.fields(); - while (fieldsIter.hasNext()) { - Map.Entry entry = fieldsIter.next(); - if(entry.getKey().equals("name")) { - proj.name = entry.getValue().asText(); - } - else if(entry.getKey().equals("defaultLocationLat")) { - proj.defaultLocationLat = entry.getValue().asDouble(); - LOG.info("updating default lat"); - } - else if(entry.getKey().equals("defaultLocationLon")) { - proj.defaultLocationLon = entry.getValue().asDouble(); - LOG.info("updating default lon"); - } - else if(entry.getKey().equals("north")) { - proj.north = entry.getValue().asDouble(); - } - else if(entry.getKey().equals("south")) { - proj.south = entry.getValue().asDouble(); - } - else if(entry.getKey().equals("east")) { - proj.east = entry.getValue().asDouble(); - } - else if(entry.getKey().equals("west")) { - proj.west = entry.getValue().asDouble(); - } - else if(entry.getKey().equals("organizationId")) { - proj.organizationId = entry.getValue().asText(); - } - else if(entry.getKey().equals("osmNorth")) { - proj.osmNorth = entry.getValue().asDouble(); - } - else if(entry.getKey().equals("osmSouth")) { - proj.osmSouth = entry.getValue().asDouble(); - } - else if(entry.getKey().equals("osmEast")) { - proj.osmEast = entry.getValue().asDouble(); - } - else if(entry.getKey().equals("osmWest")) { - proj.osmWest = entry.getValue().asDouble(); - } - else if(entry.getKey().equals("useCustomOsmBounds")) { - proj.useCustomOsmBounds = entry.getValue().asBoolean(); - } - else if(entry.getKey().equals("defaultLanguage")) { - proj.defaultLanguage = entry.getValue().asText(); - } - else if(entry.getKey().equals("defaultTimeZone")) { - proj.defaultTimeZone = entry.getValue().asText(); - } - else if(entry.getKey().equals("autoFetchHour")) { - proj.autoFetchHour = entry.getValue().asInt(); - updateFetchSchedule = true; - } - else if(entry.getKey().equals("autoFetchMinute")) { - proj.autoFetchMinute = entry.getValue().asInt(); - updateFetchSchedule = true; - } - else if(entry.getKey().equals("autoFetchFeeds")) { - proj.autoFetchFeeds = entry.getValue().asBoolean(); - updateFetchSchedule = true; - } - else if(entry.getKey().equals("otpServers")) { - updateOtpServers(proj, entry.getValue()); - } - else if (entry.getKey().equals("buildConfig")) { - updateBuildConfig(proj, entry.getValue()); - } - else if (entry.getKey().equals("routerConfig")) { - updateRouterConfig(proj, entry.getValue()); - } - } - if (updateFetchSchedule) { - // If auto fetch flag is turned on - if (proj.autoFetchFeeds){ - int interval = 1; // once per day interval - DataManager.autoFetchMap.put(proj.id, scheduleAutoFeedFetch(proj.id, proj.autoFetchHour, proj.autoFetchMinute, interval, proj.defaultTimeZone)); - } - // otherwise, cancel any existing task for this id - else{ - cancelAutoFetch(proj.id); - } + Document newProjectFields = Document.parse(req.body()); + String organizationId = (String) newProjectFields.get("organizationId"); + boolean allowedToCreate = userProfile.canAdministerApplication() || userProfile.canAdministerOrganization(organizationId); + // Data manager can operate without organizations for now, so we (hackishly/insecurely) deactivate permissions here + if (organizationId == null) allowedToCreate = true; + if (allowedToCreate) { + Project newlyStoredProject = Persistence.projects.create(req.body()); + return newlyStoredProject; + } else { + haltWithMessage(req, 403, "Not authorized to create a project on organization " + organizationId); + return null; } } - private static void updateOtpServers(Project proj, JsonNode otpServers) { - if (otpServers.isArray()) { - proj.otpServers = new ArrayList<>(); - for (int i = 0; i < otpServers.size(); i++) { - JsonNode otpServer = otpServers.get(i); - - OtpServer otpServerObj = new OtpServer(); - if (otpServer.has("name")) { - JsonNode name = otpServer.get("name"); - otpServerObj.name = name.isNull() ? null : name.asText(); - } - if (otpServer.has("admin")) { - JsonNode admin = otpServer.get("admin"); - otpServerObj.admin = admin.isNull() ? false : admin.asBoolean(); - } - if (otpServer.has("publicUrl")) { - JsonNode publicUrl = otpServer.get("publicUrl"); - otpServerObj.publicUrl = publicUrl.isNull() ? null : publicUrl.asText(); - } - if (otpServer.has("s3Bucket")) { - JsonNode s3Bucket = otpServer.get("s3Bucket"); - otpServerObj.s3Bucket = s3Bucket.isNull() ? null : s3Bucket.asText(); - } - if (otpServer.has("s3Credentials")) { - JsonNode s3Credentials = otpServer.get("s3Credentials"); - otpServerObj.s3Credentials = s3Credentials.isNull() ? null : s3Credentials.asText(); - } - if (otpServer.has("internalUrl") && otpServer.get("internalUrl").isArray()) { - JsonNode internalUrl = otpServer.get("internalUrl"); - for (int j = 0; j < internalUrl.size(); j++) { - if (internalUrl.get(j).isNull()) { - continue; - } - String url = internalUrl.get(j).asText(); - if (otpServerObj.internalUrl == null) { - otpServerObj.internalUrl = new ArrayList<>(); - } - otpServerObj.internalUrl.add(url); - } + /** + * Update fields in the Project with the given UUID. The fields to be updated are supplied as JSON in the request + * body. + * @return the Project as it appears in the database after the update. + */ + private static Project updateProject(Request req, Response res) throws IOException { + // Fetch the project once to check permissions + requestProjectById(req, "manage"); + try { + String id = req.params("id"); + Document updateDocument = Document.parse(req.body()); + Project updatedProject = Persistence.projects.update(id, req.body()); + // Catch updates to auto-fetch params, and update the autofetch schedule accordingly. + // TODO factor out into generic update hooks, or at least separate method + if (updateDocument.containsKey("autoFetchHour") + || updateDocument.containsKey("autoFetchMinute") + || updateDocument.containsKey("autoFetchFeeds") + || updateDocument.containsKey("defaultTimeZone")) { + // If auto fetch flag is turned on + if (updatedProject.autoFetchFeeds) { + ScheduledFuture fetchAction = scheduleAutoFeedFetch(updatedProject, 1); + DataManager.autoFetchMap.put(updatedProject.id, fetchAction); + } else { + // otherwise, cancel any existing task for this id + cancelAutoFetch(updatedProject.id); } - proj.otpServers.add(otpServerObj); } + return updatedProject; + } catch (Exception e) { + e.printStackTrace(); + haltWithMessage(req, 400, "Error updating project"); + return null; } } - private static void updateBuildConfig(Project proj, JsonNode buildConfig) { - if(proj.buildConfig == null) proj.buildConfig = new OtpBuildConfig(); - if(buildConfig.has("subwayAccessTime")) { - JsonNode subwayAccessTime = buildConfig.get("subwayAccessTime"); - // allow client to un-set option via 'null' value - proj.buildConfig.subwayAccessTime = subwayAccessTime.isNull() || subwayAccessTime.asText().equals("") ? null : subwayAccessTime.asDouble(); - } - if(buildConfig.has("fetchElevationUS")) { - JsonNode fetchElevationUS = buildConfig.get("fetchElevationUS"); - proj.buildConfig.fetchElevationUS = fetchElevationUS.isNull() || fetchElevationUS.asText().equals("") ? null : fetchElevationUS.asBoolean(); - } - if(buildConfig.has("stationTransfers")) { - JsonNode stationTransfers = buildConfig.get("stationTransfers"); - proj.buildConfig.stationTransfers = stationTransfers.isNull() || stationTransfers.asText().equals("") ? null : stationTransfers.asBoolean(); - } - if (buildConfig.has("fares")) { - JsonNode fares = buildConfig.get("fares"); - proj.buildConfig.fares = fares.isNull() || fares.asText().equals("") ? null : fares.asText(); + /** + * Delete the project for the UUID given in the request. + */ + private static Project deleteProject(Request req, Response res) { + // Fetch project first to check permissions, and so we can return the deleted project after deletion. + Project project = requestProjectById(req, "manage"); + boolean successfullyDeleted = project.delete(); + if (!successfullyDeleted) { + haltWithMessage(req, 400, "Did not delete project."); } + return project; } /** - * Helper function returns feed source if user has permission for specified action. + * Manually fetch a feed all feeds in the project as a one-off operation, when the user clicks a button to request it. + */ + public static Boolean fetch(Request req, Response res) { + Auth0UserProfile userProfile = req.attribute("user"); + Project p = requestProjectById(req, "manage"); + FetchProjectFeedsJob fetchProjectFeedsJob = new FetchProjectFeedsJob(p, userProfile.getUser_id()); + // This job is runnable because sometimes we schedule the task for a later time, but here we call it immediately + // because it is short lived and just cues up more work. + fetchProjectFeedsJob.run(); + return true; + } + + /** + * Public helper function that returns the requested object if the user has permissions for the specified action. + * FIXME why can't we do this checking by passing in the project ID rather than the whole request? + * FIXME: eliminate all stringly typed variables (action) * @param req spark Request object from API request * @param action action type (either "view" or "manage") - * @return + * @return requested project */ - private static Project requestProjectById(Request req, String action) { + private static Project requestProjectById (Request req, String action) { String id = req.params("id"); if (id == null) { - halt("Please specify id param"); + haltWithMessage(req, 400, "Please specify id param"); } - return requestProject(req, Project.get(id), action); + return checkProjectPermissions(req, Persistence.projects.getById(id), action); } - public static Project requestProject(Request req, Project p, String action) { + + /** + * Given a project object, this checks the user's permissions to take some specific action on it. + * If the user does not have permission the Spark request is halted with an error. + * TODO: remove all Spark halt calls from data manipulation functions, API implementation is leaking into data model + * If the user does have permission we return the same project object that was input, but with the feedSources nulled out. + * In the special case that the user is not logged in and is therefore only looking at public objects, the feed + * sources list is replaced with one that only contains publicly visible feed sources. + * This is because the UI only uses Project objects with embedded feedSources in the landing page, nowhere else. + * That fetch with embedded feedSources should be done with something like GraphQL generating multiple backend + * fetches, not with a field that's only populated and returned in special cases. + * FIXME: this is a method with side effects and no clear single purpose, in terms of transformation of input to output. + */ + private static Project checkProjectPermissions(Request req, Project project, String action) { + Auth0UserProfile userProfile = req.attribute("user"); - Boolean publicFilter = Boolean.valueOf(req.queryParams("public")); + // Check if request was made by a user that is not logged in + boolean publicFilter = req.pathInfo().matches(publicPath); - // check for null feedsource - if (p == null) - halt(400, "Feed source ID does not exist"); + // check for null project + if (project == null) { + haltWithMessage(req, 400, "Project ID does not exist"); + return null; + } boolean authorized; switch (action) { + // TODO: limit create action to app/org admins? see code currently in createProject. +// case "create": +// authorized = userProfile.canAdministerOrganization(p.organizationId); +// break; case "manage": - authorized = userProfile.canAdministerProject(p.id, p.organizationId); + authorized = userProfile.canAdministerProject(project.id, project.organizationId); break; case "view": - authorized = false; // userProfile.canViewProject(p.id, p.id); + // request only authorized if not via public path and user can view + authorized = !publicFilter && userProfile.hasProject(project.id, project.organizationId); break; default: authorized = false; break; } - // if requesting public sources -// if (publicFilter){ -// // if feed not public and user not authorized, halt -// if (!p.isPublic && !authorized) -// halt(403, "User not authorized to perform action on feed source"); -// // if feed is public, but action is managerial, halt (we shouldn't ever get here, but just in case) -// else if (p.isPublic && action.equals("manage")) -// halt(403, "User not authorized to perform action on feed source"); -// -// } -// else { -// if (!authorized) -// halt(403, "User not authorized to perform action on feed source"); -// } - - // if we make it here, user has permission and it's a valid feedsource - return p; - } - - private static void updateRouterConfig(Project proj, JsonNode routerConfig) { - if (proj.routerConfig == null) proj.routerConfig = new OtpRouterConfig(); - - if (routerConfig.has("numItineraries")) { - JsonNode numItineraries = routerConfig.get("numItineraries"); - proj.routerConfig.numItineraries = numItineraries.isNull() ? null : numItineraries.asInt(); - } - - if (routerConfig.has("walkSpeed")) { - JsonNode walkSpeed = routerConfig.get("walkSpeed"); - proj.routerConfig.walkSpeed = walkSpeed.isNull() ? null : walkSpeed.asDouble(); - } - - if (routerConfig.has("carDropoffTime")) { - JsonNode carDropoffTime = routerConfig.get("carDropoffTime"); - proj.routerConfig.carDropoffTime = carDropoffTime.isNull() ? null : carDropoffTime.asDouble(); - } - - if (routerConfig.has("stairsReluctance")) { - JsonNode stairsReluctance = routerConfig.get("stairsReluctance"); - proj.routerConfig.stairsReluctance = stairsReluctance.isNull() ? null : stairsReluctance.asDouble(); - } - - if (routerConfig.has("requestLogFile")) { - JsonNode requestLogFile = routerConfig.get("requestLogFile"); - proj.routerConfig.requestLogFile = requestLogFile.isNull() || requestLogFile.asText().equals("") ? null : requestLogFile.asText(); - } - - if (routerConfig.has("updaters")) { - updateProjectUpdaters(proj, routerConfig.get("updaters")); - } - } - - private static void updateProjectUpdaters(Project proj, JsonNode updaters) { - if (updaters.isArray()) { - proj.routerConfig.updaters = new ArrayList<>(); - for (int i = 0; i < updaters.size(); i++) { - JsonNode updater = updaters.get(i); - - OtpRouterConfig.Updater updaterObj = new OtpRouterConfig.Updater(); - if(updater.has("type")) { - JsonNode type = updater.get("type"); - updaterObj.type = type.isNull() ? null : type.asText(); - } - - if(updater.has("sourceType")) { - JsonNode sourceType = updater.get("sourceType"); - updaterObj.sourceType = sourceType.isNull() ? null : sourceType.asText(); - } - - if(updater.has("defaultAgencyId")) { - JsonNode defaultAgencyId = updater.get("defaultAgencyId"); - updaterObj.defaultAgencyId = defaultAgencyId.isNull() ? null : defaultAgencyId.asText(); - } - - if(updater.has("url")) { - JsonNode url = updater.get("url"); - updaterObj.url = url.isNull() ? null : url.asText(); - } - - if(updater.has("frequencySec")) { - JsonNode frequencySec = updater.get("frequencySec"); - updaterObj.frequencySec = frequencySec.isNull() ? null : frequencySec.asInt(); - } - - proj.routerConfig.updaters.add(updaterObj); + // If the user is not logged in, include only public feed sources + if (publicFilter){ + project.feedSources = project.retrieveProjectFeedSources().stream() + .filter(fs -> fs.isPublic) + .collect(Collectors.toList()); + } else { + project.feedSources = null; + if (!authorized) { + haltWithMessage(req, 403, "User not authorized to perform action on project"); + return null; } } + // if we make it here, user has permission and this is a valid project. + return project; } -// private static Object downloadFeedVersionWithToken (Request req, Response res) { -// FeedDownloadToken token = FeedDownloadToken.get(req.params("token")); -// -// if(token == null || !token.isValid()) { -// halt(400, "Feed download token not valid"); -// } -// -// FeedVersion version = token.getFeedVersion(); -// -// token.delete(); -// -// return downloadMergedFeed(project, res); -// } - - private static Object downloadMergedFeed(Request req, Response res) throws IOException { - String id = req.params("id"); - Project p = Project.get(id); - - if(p == null) halt(500, "Project is null"); - - // get feed sources in project - Collection feeds = p.getProjectFeedSources(); - - // create temp merged zip file to add feed content to - File mergedFile; - try { - mergedFile = File.createTempFile(p.id + "-merged", ".zip"); -// mergedFile.deleteOnExit(); - - } catch (IOException e) { - LOG.error("Could not create temp file"); - e.printStackTrace(); - - // // TODO: 5/29/16 add status of download job, move downloadMergedFeed to job... -// synchronized (status) { -// status.error = true; -// status.completed = true; -// status.message = "app.deployment.error.dump"; -// } - - return null; - } - - // create the zipfile - ZipOutputStream out; - try { - out = new ZipOutputStream(new FileOutputStream(mergedFile)); - } catch (FileNotFoundException e) { - throw new RuntimeException(e); - } - - LOG.info("Created project merge file: " + mergedFile.getAbsolutePath()); - - // map of feed versions to table entries contained within version's GTFS - Map feedSourceMap = new HashMap<>(); - - for (FeedSource fs : feeds) { - - // check if feed source has version (use latest) - FeedVersion version = fs.getLatest(); - if (version == null) { - LOG.info("Skipping {} because it has no feed versions", fs.name); - continue; - } - // modify feed version to use prepended feed id - LOG.info("Adding {} feed to merged zip", fs.name); - try { - File file = version.getGtfsFile(); - ZipFile zipFile = new ZipFile(file); - feedSourceMap.put(fs, zipFile); - } catch(Exception e) { - e.printStackTrace(); -// halt(500); - LOG.error("Zipfile for version {} not found", version.id); - } - } - - // loop through GTFS tables - for(int i = 0; i < DataManager.gtfsConfig.size(); i++) { - JsonNode tableNode = DataManager.gtfsConfig.get(i); - byte[] tableOut = mergeTables(tableNode, feedSourceMap); - - // if at least one feed has the table, include it - if (tableOut != null) { - String tableName = tableNode.get("name").asText(); - - // create entry for zip file - ZipEntry tableEntry = new ZipEntry(tableName); - out.putNextEntry(tableEntry); - LOG.info("Writing {} to merged feed", tableEntry.getName()); - out.write(tableOut); - out.closeEntry(); - } - } - out.close(); - - - -// FileInputStream fis = new FileInputStream(mergedFile); - -// res.type("application/zip"); -// res.header("Content-Disposition", "attachment;filename=" + p.name.replaceAll("[^a-zA-Z0-9]", "") + "-gtfs.zip"); - - // will not actually be deleted until download has completed - // http://stackoverflow.com/questions/24372279 -// mergedFile.delete(); - -// return fis; - -// // Deliver zipfile - res.raw().setContentType("application/octet-stream"); - res.raw().setHeader("Content-Disposition", "attachment; filename=" + mergedFile.getName()); - - - try { - BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(res.raw().getOutputStream()); - BufferedInputStream bufferedInputStream = new BufferedInputStream(new FileInputStream(mergedFile)); - - byte[] buffer = new byte[1024]; - int len; - while ((len = bufferedInputStream.read(buffer)) > 0) { - bufferedOutputStream.write(buffer, 0, len); - } - - bufferedOutputStream.flush(); - bufferedOutputStream.close(); - } catch (Exception e) { - halt(500, "Error serving GTFS file"); - } - - return res.raw(); + /** + * HTTP endpoint to initialize a merge project feeds operation. Client should check the job status endpoint for the + * completion of merge project feeds job. On successful completion of the job, the client should make a GET request + * to getFeedDownloadCredentials with the project ID to obtain either temporary S3 credentials or a download token + * (depending on application configuration "application.data.use_s3_storage") to download the zip file. + */ + private static String downloadMergedFeed(Request req, Response res) { + Project project = requestProjectById(req, "view"); + Auth0UserProfile userProfile = req.attribute("user"); + // TODO: make this an authenticated call? + MergeProjectFeedsJob mergeProjectFeedsJob = new MergeProjectFeedsJob(project, userProfile.getUser_id()); + DataManager.heavyExecutor.execute(mergeProjectFeedsJob); + // Return job ID to requester for monitoring job status. + return formatJobMessage(mergeProjectFeedsJob.jobId, "Merge operation is processing."); } - private static byte[] mergeTables(JsonNode tableNode, Map feedSourceMap) throws IOException { - - String tableName = tableNode.get("name").asText(); - ByteArrayOutputStream tableOut = new ByteArrayOutputStream(); - - int feedIndex = 0; - - ArrayNode fieldsNode = (ArrayNode) tableNode.get("fields"); -// fieldsNode. - List headers = new ArrayList<>(); - for (int i = 0; i < fieldsNode.size(); i++) { - JsonNode fieldNode = fieldsNode.get(i); - String fieldName = fieldNode.get("name").asText(); - Boolean notInSpec = fieldNode.has("datatools") && fieldNode.get("datatools").asBoolean(); - if (notInSpec) { - fieldsNode.remove(i); - } - headers.add(fieldName); - } - - // write headers to table - tableOut.write(String.join(",", headers).getBytes()); - tableOut.write("\n".getBytes()); - - for ( Map.Entry mapEntry : feedSourceMap.entrySet()) { - FeedSource fs = mapEntry.getKey(); - ZipFile zipFile = mapEntry.getValue(); - final Enumeration entries = zipFile.entries(); - while (entries.hasMoreElements()) { - final ZipEntry entry = entries.nextElement(); - if(tableName.equals(entry.getName())) { - LOG.info("Adding {} table for {}", entry.getName(), fs.name); - - InputStream inputStream = zipFile.getInputStream(entry); - - BufferedReader in = new BufferedReader(new InputStreamReader(inputStream)); - String line = in.readLine(); - String[] fields = line.split(","); - - List fieldList = Arrays.asList(fields); - - int rowIndex = 0; - while((line = in.readLine()) != null) { - String[] newValues = new String[fieldsNode.size()]; - String[] values = line.split(",", -1); - if (values.length == 1) { - LOG.warn("Found blank line. Skipping..."); - continue; - } - for(int v = 0; v < fieldsNode.size(); v++) { - JsonNode fieldNode = fieldsNode.get(v); - String fieldName = fieldNode.get("name").asText(); - - // get index of field from GTFS spec as it appears in feed - int index = fieldList.indexOf(fieldName); - String val = ""; - try { - index = fieldList.indexOf(fieldName); - if(index != -1) { - val = values[index]; - } - } catch (ArrayIndexOutOfBoundsException e) { - LOG.warn("Index {} out of bounds for file {} and feed {}", index, entry.getName(), fs.name); - continue; - } - - String fieldType = fieldNode.get("inputType").asText(); - - // if field is a gtfs identifier, prepend with feed id/name - if (fieldType.contains("GTFS") && !val.isEmpty()) { -// LOG.info("Adding feed id {} to entity {}: {}", fs.name, fieldName, val); - newValues[v] = fs.name + ":" + val; - } - else { - newValues[v] = val; - } - } - String newLine = String.join(",", newValues); - tableOut.write(newLine.getBytes()); - tableOut.write("\n".getBytes()); - rowIndex++; - } - } - } - feedIndex++; + /** + * Returns credentials that a client may use to then download a feed version. Functionality + * changes depending on whether application.data.use_s3_storage config property is true. + */ + public static Object getFeedDownloadCredentials(Request req, Response res) { + Project project = requestProjectById(req, "view"); + + // if storing feeds on s3, return temporary s3 credentials for that zip file + if (DataManager.useS3) { + // Return presigned download link if using S3. + String key = String.format("project/%s.zip", project.id); + return downloadFromS3(FeedStore.s3Client, DataManager.feedBucket, key, false, res); + } else { + // when feeds are stored locally, single-use download token will still be used + FeedDownloadToken token = new FeedDownloadToken(project); + Persistence.tokens.create(token); + return token; } - return tableOut.toByteArray(); } - public static boolean deployPublic (Request req, Response res) { + /** + * Copy all the latest feed versions for all public feed sources in this project to a bucket on S3. + * Updates the index.html document that serves as a listing of those objects on S3. + * This is often referred to as "deploying" the project. + */ + private static boolean publishPublicFeeds(Request req, Response res) { Auth0UserProfile userProfile = req.attribute("user"); String id = req.params("id"); if (id == null) { - halt(400, "must provide project id!"); + haltWithMessage(req, 400, "must provide project id!"); } - Project proj = Project.get(id); - - if (proj == null) - halt(400, "no such project!"); - - // run as sync job; if it gets too slow change to async - new MakePublicJob(proj, userProfile.getUser_id()).run(); + Project p = Persistence.projects.getById(id); + if (p == null) { + haltWithMessage(req, 400, "no such project!"); + } + // Run this as a synchronous job; if it proves to be too slow we will change to asynchronous. + new MakePublicJob(p, userProfile.getUser_id()).run(); return true; } - public static Project thirdPartySync(Request req, Response res) throws Exception { + /** + * Spark endpoint to synchronize this project's feed sources with another website or service that maintains an + * index of GTFS data. This action is triggered manually by a UI button and for now never happens automatically. + * An ExternalFeedResource of the specified type must be present in DataManager.feedResources + */ + private static Project thirdPartySync(Request req, Response res) { Auth0UserProfile userProfile = req.attribute("user"); String id = req.params("id"); - Project proj = Project.get(id); + Project proj = Persistence.projects.getById(id); String syncType = req.params("type"); - if (!userProfile.canAdministerProject(proj.id, proj.organizationId)) - halt(403); + if (!userProfile.canAdministerProject(proj.id, proj.organizationId)) { + haltWithMessage(req, 403, "Third-party sync not permitted for user."); + } LOG.info("syncing with third party " + syncType); - if(DataManager.feedResources.containsKey(syncType)) { DataManager.feedResources.get(syncType).importFeedsForProject(proj, req.headers("Authorization")); return proj; } - halt(404); + haltWithMessage(req, 404, syncType + " sync type not enabled for application."); return null; } - public static ScheduledFuture scheduleAutoFeedFetch (String id, int hour, int minute, int intervalInDays, String timezoneId){ + + /** + * Schedule an action that fetches all the feeds in the given project according to the autoFetch fields of that project. + * Currently feeds are not auto-fetched independently, they must be all fetched together as part of a project. + * This method is called when a Project's auto-fetch settings are updated, and when the system starts up to populate + * the auto-fetch scheduler. + */ + public static ScheduledFuture scheduleAutoFeedFetch (Project project, int intervalInDays) { TimeUnit minutes = TimeUnit.MINUTES; try { // First cancel any already scheduled auto fetch task for this project id. - cancelAutoFetch(id); - - Project p = Project.get(id); - if (p == null) - return null; - - Cancellable task = null; + cancelAutoFetch(project.id); ZoneId timezone; try { - timezone = ZoneId.of(timezoneId); + timezone = ZoneId.of(project.defaultTimeZone); }catch(Exception e){ timezone = ZoneId.of("America/New_York"); } - LOG.info("Scheduling autofetch for projectID: {}", p.id); - - long delayInMinutes = 0; - + LOG.info("Scheduling auto-fetch for projectID: {}", project.id); // NOW in default timezone ZonedDateTime now = ZonedDateTime.ofInstant(Instant.now(), timezone); - // SCHEDULED START TIME - ZonedDateTime startTime = LocalDateTime.of(LocalDate.now(), LocalTime.of(hour, minute)).atZone(timezone); + // Scheduled start time + ZonedDateTime startTime = LocalDateTime.of(LocalDate.now(), + LocalTime.of(project.autoFetchHour, project.autoFetchMinute)).atZone(timezone); LOG.info("Now: {}", now.format(DateTimeFormatter.ISO_ZONED_DATE_TIME)); LOG.info("Scheduled start time: {}", startTime.format(DateTimeFormatter.ISO_ZONED_DATE_TIME)); // Get diff between start time and current time long diffInMinutes = (startTime.toEpochSecond() - now.toEpochSecond()) / 60; + long delayInMinutes; if ( diffInMinutes >= 0 ){ delayInMinutes = diffInMinutes; // delay in minutes } @@ -722,24 +355,33 @@ public static ScheduledFuture scheduleAutoFeedFetch (String id, int hour, int mi LOG.info("Auto fetch begins in {} hours and runs every {} hours", String.valueOf(delayInMinutes / 60.0), TimeUnit.DAYS.toHours(intervalInDays)); - FetchProjectFeedsJob fetchProjectFeedsJob = new FetchProjectFeedsJob(p, null); - - return DataManager.scheduler.scheduleAtFixedRate(fetchProjectFeedsJob, delayInMinutes, TimeUnit.DAYS.toMinutes(intervalInDays), minutes); + // system is defined as owner because owner field must not be null + FetchProjectFeedsJob fetchProjectFeedsJob = new FetchProjectFeedsJob(project, "system"); + return DataManager.scheduler.scheduleAtFixedRate(fetchProjectFeedsJob, + delayInMinutes, TimeUnit.DAYS.toMinutes(intervalInDays), minutes); } catch (Exception e) { e.printStackTrace(); return null; } } - public static void cancelAutoFetch(String id){ - Project p = Project.get(id); + + /** + * Cancel an existing auto-fetch job that is scheduled for the given project ID. + * There is only one auto-fetch job per project, not one for each feedSource within the project. + */ + private static void cancelAutoFetch(String projectId){ + Project p = Persistence.projects.getById(projectId); if ( p != null && DataManager.autoFetchMap.get(p.id) != null) { - LOG.info("Cancelling autofetch for projectID: {}", p.id); + LOG.info("Cancelling auto-fetch for projectID: {}", p.id); DataManager.autoFetchMap.get(p.id).cancel(true); } } + + /** + * This connects all the above HTTP API handlers to URL paths (registers them with the Spark framework). + * A bit too static/global for an OO language, but that's how Spark works. + */ public static void register (String apiPrefix) { - options(apiPrefix + "secure/project", (q, s) -> ""); - options(apiPrefix + "secure/project/:id", (q, s) -> ""); get(apiPrefix + "secure/project/:id", ProjectController::getProject, json::write); get(apiPrefix + "secure/project", ProjectController::getAllProjects, json::write); post(apiPrefix + "secure/project", ProjectController::createProject, json::write); @@ -747,12 +389,32 @@ public static void register (String apiPrefix) { delete(apiPrefix + "secure/project/:id", ProjectController::deleteProject, json::write); get(apiPrefix + "secure/project/:id/thirdPartySync/:type", ProjectController::thirdPartySync, json::write); post(apiPrefix + "secure/project/:id/fetch", ProjectController::fetch, json::write); - post(apiPrefix + "secure/project/:id/deployPublic", ProjectController::deployPublic, json::write); + post(apiPrefix + "secure/project/:id/deployPublic", ProjectController::publishPublicFeeds, json::write); - get(apiPrefix + "public/project/:id/download", ProjectController::downloadMergedFeed); + get(apiPrefix + "secure/project/:id/download", ProjectController::downloadMergedFeed); + get(apiPrefix + "secure/project/:id/downloadtoken", ProjectController::getFeedDownloadCredentials, json::write); get(apiPrefix + "public/project/:id", ProjectController::getProject, json::write); get(apiPrefix + "public/project", ProjectController::getAllProjects, json::write); + get(apiPrefix + "downloadprojectfeed/:token", ProjectController::downloadMergedFeedWithToken); + } + + /** + * HTTP endpoint that allows the requester to download a merged project feeds file stored locally (it should only + * be invoked if the application is not using S3 storage) given that the requester supplies a valid token. + */ + private static Object downloadMergedFeedWithToken(Request req, Response res) { + FeedDownloadToken token = Persistence.tokens.getById(req.params("token")); + + if(token == null || !token.isValid()) { + haltWithMessage(req, 400, "Feed download token not valid"); + } + + Project project = token.retrieveProject(); + + Persistence.tokens.removeById(token.id); + String fileName = project.id + ".zip"; + return downloadFile(FeedVersion.feedStore.getFeed(fileName), fileName, req, res); } } diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/RegionController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/RegionController.java deleted file mode 100644 index 6759e90a1..000000000 --- a/src/main/java/com/conveyal/datatools/manager/controllers/api/RegionController.java +++ /dev/null @@ -1,210 +0,0 @@ -package com.conveyal.datatools.manager.controllers.api; - -import com.conveyal.datatools.manager.DataManager; -import com.conveyal.datatools.manager.models.Region; -import com.conveyal.datatools.manager.models.JsonViews; -import com.conveyal.datatools.manager.utils.json.JsonManager; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.vividsolutions.jts.geom.Envelope; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.MultiPolygon; -import com.vividsolutions.jts.geom.Point; -import org.apache.commons.io.FilenameUtils; -import org.geotools.geojson.geom.GeometryJSON; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import spark.Request; -import spark.Response; - -import java.io.IOException; -import java.io.InputStream; -import java.io.Reader; -import java.io.StringReader; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Iterator; -import java.util.Map; - -import static spark.Spark.*; -import static spark.Spark.get; - -/** - * Created by landon on 4/15/16. - */ -public class RegionController { - public static final Logger LOG = LoggerFactory.getLogger(ProjectController.class); - - public static JsonManager json = - new JsonManager<>(Region.class, JsonViews.UserInterface.class); - - public static Region getRegion(Request req, Response res) { - String id = req.params("id"); - return Region.get(id); - } - - public static Collection getAllRegions(Request req, Response res) throws JsonProcessingException { - Collection regions = new ArrayList<>(); - - String projectId = req.queryParams("projectId"); - System.out.println(req.pathInfo()); - regions = Region.getAll(); -// Boolean publicFilter = Boolean.valueOf(req.queryParams("public")); -// if(projectId != null) { -// for (Region region: Region.getAll()) { -// if(region.projectId.equals(projectId)) { -// // if requesting public regions and region is not public; skip region -// if (publicFilter && !region.isPublic) -// continue; -// regions.add(region); -// } -// } -// } -// else { -// for (Region region: Region.getAll()) { -// // if requesting public regions and region is not public; skip region -// if (publicFilter && !region.isPublic) -// continue; -// regions.add(region); -// } -// } - - return regions; - } - - public static Region createRegion(Request req, Response res) throws IOException { - Region region; - - region = new Region(); - - applyJsonToRegion(region, req.body()); - region.save(); - - return region; - } - - public static Region updateRegion(Request req, Response res) throws IOException { - String id = req.params("id"); - Region region = Region.get(id); - - applyJsonToRegion(region, req.body()); - region.save(); - - return region; - } - - public static void applyJsonToRegion(Region region, String json) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - JsonNode node = mapper.readTree(json); - Iterator> fieldsIter = node.fields(); - while (fieldsIter.hasNext()) { - Map.Entry entry = fieldsIter.next(); - - if(entry.getKey().equals("name")) { - region.name = entry.getValue().asText(); - } - - if(entry.getKey().equals("order")) { - region.order = entry.getValue().asText(); - } - - if(entry.getKey().equals("geometry")) { - region.geometry = entry.getValue().asText(); - } - - if(entry.getKey().equals("isPublic")) { - region.isPublic = entry.getValue().asBoolean(); - } - - } - } - - public static Collection seedRegions(Request req, Response res) throws IOException { - Region.deleteAll(); - Collection regions = new ArrayList<>(); - String regionsDir = DataManager.getConfigPropertyAsText("application.data.regions"); - LOG.info(regionsDir); - GeometryJSON gjson = new GeometryJSON(); - Files.walk(Paths.get(regionsDir)).forEach(filePath -> { - if (Files.isRegularFile(filePath) && FilenameUtils.getExtension(filePath.toString()).equalsIgnoreCase("geojson")) { - LOG.info(String.valueOf(filePath)); - ObjectMapper mapper = new ObjectMapper(); - JsonNode root; - try { - root = mapper.readTree(filePath.toFile()); - JsonNode features = root.get("features"); - for (JsonNode feature : features){ - Region region = new Region(); - String name; - if (feature.get("properties").has("NAME")) - name = feature.get("properties").get("NAME").asText(); - else if (feature.get("properties").has("name")) - name = feature.get("properties").get("name").asText(); - else - continue; - - region.name = name; -// LOG.info(region.name); - if (feature.get("properties").has("featurecla")) - region.order = feature.get("properties").get("featurecla").asText(); - -// LOG.info("getting geometry"); - if (feature.has("geometry")) { - region.geometry = feature.get("geometry").toString(); - Reader reader = new StringReader(feature.toString()); - MultiPolygon poly = gjson.readMultiPolygon(reader); - Point center = poly.getCentroid(); - region.lon = center.getX(); - region.lat = center.getY(); - Envelope envelope = poly.getEnvelopeInternal(); - region.east = envelope.getMaxX(); - region.west = envelope.getMinX(); - region.north = envelope.getMaxY(); - region.south = envelope.getMinY(); - } - else { - LOG.info("no geometry for " + region.name); - } - - region.isPublic = true; - region.save(); - regions.add(region); - } - } catch (IOException e) { - e.printStackTrace(); - } catch (Exception e) { - e.printStackTrace(); - } - } - else { - LOG.warn(filePath.getFileName() + " is not geojson"); - } - }); - return regions; - } - public static Region deleteRegion(Request req, Response res) { - String id = req.params("id"); - Region region = Region.get(id); - region.delete(); - return region; - } - - public static void register (String apiPrefix) { - get(apiPrefix + "secure/region/:id", RegionController::getRegion, json::write); - options(apiPrefix + "secure/region", (q, s) -> ""); - get(apiPrefix + "secure/region", RegionController::getAllRegions, json::write); - post(apiPrefix + "secure/region", RegionController::createRegion, json::write); - put(apiPrefix + "secure/region/:id", RegionController::updateRegion, json::write); - delete(apiPrefix + "secure/region/:id", RegionController::deleteRegion, json::write); - - // Public routes - get(apiPrefix + "public/region/:id", RegionController::getRegion, json::write); - get(apiPrefix + "public/region", RegionController::getAllRegions, json::write); - - get(apiPrefix + "public/seedregions", RegionController::seedRegions, json::write); - } -} diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/StatusController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/StatusController.java index 83b897e1e..355ac8fe1 100644 --- a/src/main/java/com/conveyal/datatools/manager/controllers/api/StatusController.java +++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/StatusController.java @@ -1,20 +1,23 @@ package com.conveyal.datatools.manager.controllers.api; +import com.conveyal.datatools.common.status.MonitorableJob; import com.conveyal.datatools.manager.DataManager; import com.conveyal.datatools.manager.auth.Auth0UserProfile; -import com.conveyal.datatools.common.status.MonitorableJob; import com.conveyal.datatools.manager.models.JsonViews; import com.conveyal.datatools.manager.utils.json.JsonManager; +import org.eclipse.jetty.util.ConcurrentHashSet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import spark.Request; import spark.Response; -import java.util.HashSet; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.Set; +import java.util.stream.Collectors; -import static spark.Spark.*; -import static spark.Spark.delete; +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; import static spark.Spark.get; /** @@ -26,23 +29,107 @@ public class StatusController { public static JsonManager json = new JsonManager<>(MonitorableJob.Status.class, JsonViews.UserInterface.class); - /*public static Object getStatus(Request req, Response res) { -// Auth0UserProfile userProfile = req.attribute("user"); - String userId = req.params("id"); - System.out.println("getting status for: " + userId); - return DataManager.userJobsMap.get(userId); - }*/ + // TODO: Admin API route to return active jobs for all application users. + private static Set getAllJobsRoute(Request req, Response res) { + Auth0UserProfile userProfile = req.attribute("user"); + if (!userProfile.canAdministerApplication()) { + haltWithMessage(req, 401, "User not authorized to view all jobs"); + } + return getAllJobs(); + } + + public static Set getAllJobs() { + return DataManager.userJobsMap.values().stream() + .flatMap(Collection::stream) + .collect(Collectors.toSet()); + } + + /** + * API route that returns single job by ID from among the jobs for the currently authenticated user. + */ + private static MonitorableJob getOneJobRoute(Request req, Response res) { + String jobId = req.params("jobId"); + Auth0UserProfile userProfile = req.attribute("user"); + // FIXME: refactor underscore in user_id methods + String userId = userProfile.getUser_id(); + return getJobById(userId, jobId, true); + } + + /** + * Gets a job by user ID and job ID. + * @param clearCompleted if true, remove requested job if it has completed or errored + */ + public static MonitorableJob getJobById(String userId, String jobId, boolean clearCompleted) { + // Get jobs set directly from userJobsMap because we may remove an element from it below. + Set userJobs = DataManager.userJobsMap.get(userId); + if (userJobs == null) { + return null; + } + for (MonitorableJob job : userJobs) { + if (job.jobId.equals(jobId)) { + if (clearCompleted && (job.status.completed || job.status.error)) { + // remove job if completed or errored + userJobs.remove(job); + } + return job; + } + } + // if job is not found (because it doesn't exist or was completed). + return null; + } - public static Set getUserJobs(Request req, Response res) { + /** + * API route that returns a set of active jobs for the currently authenticated user. + */ + public static Set getUserJobsRoute(Request req, Response res) { Auth0UserProfile userProfile = req.attribute("user"); + // FIXME: refactor underscore in user_id methods String userId = userProfile.getUser_id(); - return DataManager.userJobsMap.containsKey(userId) - ? DataManager.userJobsMap.get(userId) - : new HashSet<>(); + // Get a copy of all existing jobs before we purge the completed ones. + return getJobsByUserId(userId, true); + } + + public static Set filterJobsByType (MonitorableJob.JobType ...jobType) { + return getAllJobs().stream() + .filter(job -> Arrays.asList(jobType).contains(job.type)) + .collect(Collectors.toSet()); + } + + /** + * Get set of active jobs by user ID. + * + * @param clearCompleted if true, remove all completed and errored jobs for this user. + */ + private static Set getJobsByUserId(String userId, boolean clearCompleted) { + Set allJobsForUser = DataManager.userJobsMap.get(userId); + if (allJobsForUser == null) { + return Collections.EMPTY_SET; + } + if (clearCompleted) { + // Any active jobs will still have their status updated, so they need to be retrieved again with any status + // updates. All completed or errored jobs are in their final state and will not be updated any longer, so we + // remove them once the client has seen them. + ConcurrentHashSet jobsStillActive = filterActiveJobs(allJobsForUser); + + DataManager.userJobsMap.put(userId, jobsStillActive); + } + return allJobsForUser; + } + + public static ConcurrentHashSet filterActiveJobs(Set jobs) { + ConcurrentHashSet jobsStillActive = new ConcurrentHashSet<>(); + jobs.stream() + .filter(job -> !job.status.completed && !job.status.error) + .forEach(jobsStillActive::add); + return jobsStillActive; } public static void register (String apiPrefix) { - options(apiPrefix + "public/status", (q, s) -> ""); - get(apiPrefix + "secure/status/jobs", StatusController::getUserJobs, json::write); + + // These endpoints return all jobs for the current user, all application jobs, or a specific job + get(apiPrefix + "secure/status/jobs", StatusController::getUserJobsRoute, json::write); + // FIXME Change endpoint for all jobs (to avoid overlap with jobId param)? + get(apiPrefix + "secure/status/jobs/all", StatusController::getAllJobsRoute, json::write); + get(apiPrefix + "secure/status/jobs/:jobId", StatusController::getOneJobRoute, json::write); } } diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/UserController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/UserController.java index 05896b077..eb6ff7793 100644 --- a/src/main/java/com/conveyal/datatools/manager/controllers/api/UserController.java +++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/UserController.java @@ -1,19 +1,24 @@ package com.conveyal.datatools.manager.controllers.api; +import com.conveyal.datatools.manager.DataManager; import com.conveyal.datatools.manager.auth.Auth0UserProfile; +import com.conveyal.datatools.manager.auth.Auth0Users; import com.conveyal.datatools.manager.models.FeedSource; +import com.conveyal.datatools.manager.models.FeedVersion; import com.conveyal.datatools.manager.models.JsonViews; import com.conveyal.datatools.manager.models.Note; import com.conveyal.datatools.manager.models.Project; +import com.conveyal.datatools.manager.persistence.Persistence; import com.conveyal.datatools.manager.utils.json.JsonManager; -import com.conveyal.datatools.manager.DataManager; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.client.HttpClient; -import org.apache.http.client.methods.*; +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPatch; +import org.apache.http.client.methods.HttpPost; import org.apache.http.entity.ByteArrayEntity; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.util.EntityUtils; @@ -22,23 +27,26 @@ import spark.Request; import spark.Response; -import java.io.*; +import java.io.IOException; +import java.io.Serializable; import java.net.URLEncoder; -import java.time.Instant; -import java.time.LocalDate; -import java.time.ZoneId; +import java.time.ZoneOffset; import java.time.ZonedDateTime; -import java.util.*; - -import com.conveyal.datatools.manager.auth.Auth0Users; - -import javax.persistence.Entity; +import java.util.ArrayList; +import java.util.Date; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; import static com.conveyal.datatools.manager.auth.Auth0Users.getUserById; -import static spark.Spark.*; +import static spark.Spark.delete; +import static spark.Spark.get; +import static spark.Spark.post; +import static spark.Spark.put; /** - * Created by landon on 3/29/16. + * Handles the HTTP endpoints related to CRUD operations for Auth0 users. */ public class UserController { @@ -50,7 +58,11 @@ public class UserController { public static JsonManager json = new JsonManager<>(Project.class, JsonViews.UserInterface.class); - public static Object getUser(Request req, Response res) throws IOException { + /** + * HTTP endpoint to get a single Auth0 user for the application (by specified ID param). Note, this uses a different + * Auth0 API (get user) than the other get methods (user search query). + */ + private static String getUser(Request req, Response res) throws IOException { String url = "https://" + AUTH0_DOMAIN + "/api/v2/users/" + URLEncoder.encode(req.params("id"), "UTF-8"); String charset = "UTF-8"; @@ -65,42 +77,62 @@ public static Object getUser(Request req, Response res) throws IOException { return result; } - public static Object getAllUsers(Request req, Response res) throws IOException { + /** + * HTTP endpoint to get all users for the application (using a filtered search on all users for the Auth0 tenant). + */ + private static String getAllUsers(Request req, Response res) throws IOException { res.type("application/json"); int page = Integer.parseInt(req.queryParams("page")); - String queryString = getUserQuery(req); - Object users = mapper.readTree(Auth0Users.getAuth0Users(queryString, page)); + String queryString = filterUserSearchQuery(req); + String users = Auth0Users.getAuth0Users(queryString, page); return users; } - private static String getUserQuery(Request req) { + /** + * Filters a search query for users by the query string and the requesting user's permissions. For example, an + * organization admin is only permitted to view the users assigned to that organization, whereas an application + * admin can view all users for all organizations. + */ + private static String filterUserSearchQuery(Request req) { Auth0UserProfile userProfile = req.attribute("user"); String queryString = req.queryParams("queryString"); if(queryString != null) queryString = "email:" + queryString + "*"; if (userProfile.canAdministerApplication()) { - // do nothing, proceed with search - } - else if (userProfile.canAdministerOrganization()) { + // do not filter further based on permissions, proceed with search + return queryString; + } else if (userProfile.canAdministerOrganization()) { + String organizationId = userProfile.getOrganizationId(); // filter by organization_id if (queryString == null) { - queryString = "app_metadata.datatools.organizations.organization_id:" + userProfile.getOrganizationId(); + queryString = "app_metadata.datatools.organizations.organization_id:" + organizationId; } else { - queryString += " AND app_metadata.datatools.organizations.organization_id:" + userProfile.getOrganizationId(); + queryString += " AND app_metadata.datatools.organizations.organization_id:" + organizationId; } + return queryString; } else { - halt(401, "Must be application or organization admin to view users"); + haltWithMessage(req, 401, "Must be application or organization admin to view users"); + // Return statement cannot be reached due to halt. + return null; } - return queryString; } - public static Object getUserCount(Request req, Response res) throws IOException { + /** + * Gets the total count of users that match the filtered user search query. + */ + private static int getUserCount(Request req, Response res) throws IOException { res.type("application/json"); - String queryString = getUserQuery(req); + String queryString = filterUserSearchQuery(req); return Auth0Users.getAuth0UserCount(queryString); } - public static Object createPublicUser(Request req, Response res) throws IOException { + /** + * HTTP endpoint to create a "public user" that has no permissions to access projects in the application. + * + * Note, this passes a "blank" app_metadata object to the newly created user, so there is no risk of someone + * injecting permissions somehow into the create user request. + */ + private static String createPublicUser(Request req, Response res) throws IOException { String url = "https://" + AUTH0_DOMAIN + "/api/v2/users"; String charset = "UTF-8"; @@ -109,7 +141,12 @@ public static Object createPublicUser(Request req, Response res) throws IOExcept request.setHeader("Accept-Charset", charset); request.setHeader("Content-Type", "application/json"); JsonNode jsonNode = mapper.readTree(req.body()); - String json = String.format("{ \"connection\": \"Username-Password-Authentication\", \"email\": %s, \"password\": %s, \"app_metadata\": {\"datatools\": [{\"permissions\": [], \"projects\": [], \"subscriptions\": [], \"client_id\": \"%s\" }] } }", jsonNode.get("email"), jsonNode.get("password"), AUTH0_CLIENT_ID); + String json = String.format("{" + + "\"connection\": \"Username-Password-Authentication\"," + + "\"email\": %s," + + "\"password\": %s," + + "\"app_metadata\": {\"datatools\": [{\"permissions\": [], \"projects\": [], \"subscriptions\": [], \"client_id\": \"%s\" }] } }", + jsonNode.get("email"), jsonNode.get("password"), AUTH0_CLIENT_ID); HttpEntity entity = new ByteArrayEntity(json.getBytes(charset)); request.setEntity(entity); @@ -117,12 +154,17 @@ public static Object createPublicUser(Request req, Response res) throws IOExcept HttpResponse response = client.execute(request); String result = EntityUtils.toString(response.getEntity()); int statusCode = response.getStatusLine().getStatusCode(); - if(statusCode >= 300) halt(statusCode, response.toString()); + if(statusCode >= 300) haltWithMessage(req, statusCode, response.toString()); return result; } - public static Object createUser(Request req, Response res) throws IOException { + /** + * HTTP endpoint to create new Auth0 user for the application. + * + * FIXME: This endpoint fails if the user's email already exists in the Auth0 tenant. + */ + private static String createUser(Request req, Response res) throws IOException { String url = "https://" + AUTH0_DOMAIN + "/api/v2/users"; String charset = "UTF-8"; @@ -131,7 +173,12 @@ public static Object createUser(Request req, Response res) throws IOException { request.setHeader("Accept-Charset", charset); request.setHeader("Content-Type", "application/json"); JsonNode jsonNode = mapper.readTree(req.body()); - String json = String.format("{ \"connection\": \"Username-Password-Authentication\", \"email\": %s, \"password\": %s, \"app_metadata\": {\"datatools\": [%s] } }", jsonNode.get("email"), jsonNode.get("password"), jsonNode.get("permissions")); + String json = String.format("{" + + "\"connection\": \"Username-Password-Authentication\"," + + "\"email\": %s," + + "\"password\": %s," + + "\"app_metadata\": {\"datatools\": [%s] } }" + , jsonNode.get("email"), jsonNode.get("password"), jsonNode.get("permissions")); HttpEntity entity = new ByteArrayEntity(json.getBytes(charset)); request.setEntity(entity); @@ -140,14 +187,14 @@ public static Object createUser(Request req, Response res) throws IOException { String result = EntityUtils.toString(response.getEntity()); int statusCode = response.getStatusLine().getStatusCode(); - if(statusCode >= 300) halt(statusCode, response.toString()); + if(statusCode >= 300) haltWithMessage(req, statusCode, response.toString()); System.out.println(result); return result; } - public static Object updateUser(Request req, Response res) throws IOException { + private static Object updateUser(Request req, Response res) throws IOException { String userId = req.params("id"); Auth0UserProfile user = getUserById(userId); @@ -164,7 +211,7 @@ public static Object updateUser(Request req, Response res) throws IOException { request.setHeader("Content-Type", "application/json"); JsonNode jsonNode = mapper.readTree(req.body()); -// JsonNode data = mapper.readValue(jsonNode.get("data"), Auth0UserProfile.DatatoolsInfo.class); //jsonNode.get("data"); +// JsonNode data = mapper.readValue(jsonNode.retrieveById("data"), Auth0UserProfile.DatatoolsInfo.class); //jsonNode.retrieveById("data"); JsonNode data = jsonNode.get("data"); System.out.println(data.asText()); Iterator> fieldsIter = data.fields(); @@ -173,7 +220,7 @@ public static Object updateUser(Request req, Response res) throws IOException { System.out.println(entry.getValue()); } // if (!data.has("client_id")) { -// ((ObjectNode)data).put("client_id", DataManager.config.get("auth0").get("client_id").asText()); +// ((ObjectNode)data).put("client_id", DataManager.config.retrieveById("auth0").retrieveById("client_id").asText()); // } String json = "{ \"app_metadata\": { \"datatools\" : " + data + " }}"; System.out.println(json); @@ -187,7 +234,7 @@ public static Object updateUser(Request req, Response res) throws IOException { return mapper.readTree(result); } - public static Object deleteUser(Request req, Response res) throws IOException { + private static Object deleteUser(Request req, Response res) throws IOException { String url = "https://" + AUTH0_DOMAIN + "/api/v2/users/" + URLEncoder.encode(req.params("id"), "UTF-8"); String charset = "UTF-8"; @@ -198,59 +245,176 @@ public static Object deleteUser(Request req, Response res) throws IOException { HttpClient client = HttpClientBuilder.create().build(); HttpResponse response = client.execute(request); int statusCode = response.getStatusLine().getStatusCode(); - if(statusCode >= 300) halt(statusCode, response.getStatusLine().getReasonPhrase()); + if(statusCode >= 300) haltWithMessage(req, statusCode, response.getStatusLine().getReasonPhrase()); return true; } - public static Object getRecentActivity(Request req, Response res) { + private static Object getRecentActivity(Request req, Response res) { Auth0UserProfile userProfile = req.attribute("user"); - String from = req.queryParams("from"); - String to = req.queryParams("to"); -// if (from == null || to == null) { -// halt(400, "Please provide valid from/to dates"); -// } - List activity = new ArrayList<>(); - for (Auth0UserProfile.Subscription sub : userProfile.getApp_metadata().getDatatoolsInfo().getSubscriptions()) { + /* TODO: Allow custom from/to range + String fromStr = req.queryParams("from"); + String toStr = req.queryParams("to"); */ + + // Default range: past 7 days + ZonedDateTime from = ZonedDateTime.now(ZoneOffset.UTC).minusDays(7); + ZonedDateTime to = ZonedDateTime.now(ZoneOffset.UTC); + + List activityList = new ArrayList<>(); + Auth0UserProfile.DatatoolsInfo datatools = userProfile.getApp_metadata().getDatatoolsInfo(); + if (datatools == null) { + // NOTE: this condition will also occur if DISABLE_AUTH is set to true + haltWithMessage(req, 403, "User does not have permission to access to this application"); + } + + Auth0UserProfile.Subscription[] subscriptions = datatools.getSubscriptions(); + if (subscriptions == null) return activityList; + + /* NOTE: as of May-08-2018 we decided to limit subscriptions to two types: + * 'feed-updated' and 'project-updated'. Comment subscriptions are now always + * assumed if the containing 'feed-updated' subscription is active + */ + for (Auth0UserProfile.Subscription sub : subscriptions) { switch (sub.getType()) { - // TODO: add all activity types - case "feed-commented-on": + case "feed-updated": + for (String targetId : sub.getTarget()) { + FeedSource fs = Persistence.feedSources.getById(targetId); + if (fs == null) continue; + + // FeedSource comments + for (Note note : fs.retrieveNotes()) { + ZonedDateTime datePosted = toZonedDateTime(note.date); + if (datePosted.isBefore(from) || datePosted.isAfter(to)) continue; + activityList.add(new FeedSourceCommentActivity(note, fs)); + } + + // Iterate through this Feed's FeedVersions + for(FeedVersion version : fs.retrieveFeedVersions()) { + // FeedVersion creation event + ZonedDateTime dateCreated = toZonedDateTime(fs.dateCreated); + if (dateCreated.isAfter(from) && dateCreated.isBefore(to)) { + activityList.add(new FeedVersionCreationActivity(version, fs)); + } + + // FeedVersion comments + for (Note note : version.retrieveNotes()) { + ZonedDateTime datePosted = toZonedDateTime(note.date); + if (datePosted.isBefore(from) || datePosted.isAfter(to)) continue; + activityList.add(new FeedVersionCommentActivity(note, fs, version)); + } + } + } + break; + + case "project-updated": + // Iterate through Project IDs, skipping any that don't resolve to actual projects for (String targetId : sub.getTarget()) { - FeedSource fs = FeedSource.get(targetId); - if(fs == null) continue; - for (Note note : fs.getNotes()) { - // TODO: Check if actually recent -// if (note.date.after(Date.from(Instant.ofEpochSecond(from))) && note.date.before(Date.from(Instant.ofEpochSecond(to)))) { - Activity act = new Activity(); - act.type = sub.getType(); - act.userId = note.userId; - act.userName = note.userEmail; - act.body = note.body; - act.date = note.date; - act.targetId = targetId; - act.targetName = fs.name; - activity.add(act); -// } + Project proj = Persistence.projects.getById(targetId); + if (proj == null) continue; + + // Iterate through Project's FeedSources, creating "Feed created" items as needed + for (FeedSource fs : proj.retrieveProjectFeedSources()) { + ZonedDateTime dateCreated = toZonedDateTime(fs.dateCreated); + if (dateCreated.isBefore(from) || dateCreated.isAfter(to)) continue; + activityList.add(new FeedSourceCreationActivity(fs, proj)); } } break; } } - return activity; + return activityList; + } + + private static ZonedDateTime toZonedDateTime (Date date) { + return ZonedDateTime.ofInstant(date.toInstant(), ZoneOffset.UTC); } - static class Activity implements Serializable { + static abstract class Activity implements Serializable { + private static final long serialVersionUID = 1L; public String type; public String userId; public String userName; - public String body; - public String targetId; - public String targetName; public Date date; } + static class FeedSourceCreationActivity extends Activity { + private static final long serialVersionUID = 1L; + public String feedSourceId; + public String feedSourceName; + public String projectId; + public String projectName; + + public FeedSourceCreationActivity(FeedSource fs, Project proj) { + this.type = "feed-created"; + this.date = fs.dateCreated; + this.userId = fs.userId; + this.userName = fs.userEmail; + this.feedSourceId = fs.id; + this.feedSourceName = fs.name; + this.projectId = proj.id; + this.projectName = proj.name; + } + } + + static class FeedVersionCreationActivity extends Activity { + private static final long serialVersionUID = 1L; + public Integer feedVersionIndex; + public String feedVersionName; + public String feedSourceId; + public String feedSourceName; + + public FeedVersionCreationActivity(FeedVersion version, FeedSource fs) { + this.type = "version-created"; + this.date = version.dateCreated; + this.userId = version.userId; + this.userName = version.userEmail; + this.feedVersionIndex = version.version; + this.feedVersionName = version.name; + this.feedSourceId = fs.id; + this.feedSourceName = fs.name; + } + } + + static abstract class CommentActivity extends Activity { + private static final long serialVersionUID = 1L; + public String body; + + public CommentActivity (Note note) { + this.date = note.date; + this.userId = note.userId; + this.userName = note.userEmail; + this.body = note.body; + } + } + + static class FeedSourceCommentActivity extends CommentActivity { + private static final long serialVersionUID = 1L; + public String feedSourceId; + public String feedSourceName; + + public FeedSourceCommentActivity(Note note, FeedSource feedSource) { + super(note); + this.type = "feed-commented-on"; + this.feedSourceId = feedSource.id; + this.feedSourceName = feedSource.name; + } + } + + static class FeedVersionCommentActivity extends FeedSourceCommentActivity { + private static final long serialVersionUID = 1L; + public Integer feedVersionIndex; + public String feedVersionName; + + public FeedVersionCommentActivity(Note note, FeedSource feedSource, FeedVersion version) { + super(note, feedSource); + this.type = "version-commented-on"; + this.feedVersionIndex = version.version; + this.feedVersionName = version.name; + } + } + public static void register (String apiPrefix) { get(apiPrefix + "secure/user/:id", UserController::getUser, json::write); get(apiPrefix + "secure/user/:id/recentactivity", UserController::getRecentActivity, json::write); diff --git a/src/main/java/com/conveyal/datatools/manager/extensions/mtc/MtcFeedResource.java b/src/main/java/com/conveyal/datatools/manager/extensions/mtc/MtcFeedResource.java index dbdbf9d63..c70d83baa 100644 --- a/src/main/java/com/conveyal/datatools/manager/extensions/mtc/MtcFeedResource.java +++ b/src/main/java/com/conveyal/datatools/manager/extensions/mtc/MtcFeedResource.java @@ -1,10 +1,5 @@ package com.conveyal.datatools.manager.extensions.mtc; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; -import com.amazonaws.auth.profile.ProfileCredentialsProvider; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3Client; import com.amazonaws.services.s3.model.PutObjectRequest; import com.conveyal.datatools.manager.DataManager; import com.conveyal.datatools.manager.extensions.ExternalFeedResource; @@ -12,11 +7,14 @@ import com.conveyal.datatools.manager.models.FeedSource; import com.conveyal.datatools.manager.models.FeedVersion; import com.conveyal.datatools.manager.models.Project; +import com.conveyal.datatools.manager.persistence.FeedStore; +import com.conveyal.datatools.manager.persistence.Persistence; import com.fasterxml.jackson.databind.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.BufferedReader; +import java.io.File; import java.io.InputStreamReader; import java.io.OutputStreamWriter; import java.lang.reflect.Field; @@ -24,6 +22,8 @@ import java.net.MalformedURLException; import java.net.URL; +import static com.conveyal.datatools.manager.models.ExternalFeedSourceProperty.constructId; + /** * Created by demory on 3/30/16. */ @@ -33,16 +33,18 @@ public class MtcFeedResource implements ExternalFeedResource { private String rtdApi, s3Bucket, s3Prefix, s3CredentialsFilename; + public static final String AGENCY_ID = "AgencyId"; + public static final String RESOURCE_TYPE = "MTC"; public MtcFeedResource() { - rtdApi = DataManager.getConfigPropertyAsText("extensions.mtc.rtd_api"); - s3Bucket = DataManager.getConfigPropertyAsText("extensions.mtc.s3_bucket"); - s3Prefix = DataManager.getConfigPropertyAsText("extensions.mtc.s3_prefix"); - //s3CredentialsFilename = DataManager.config.get("extensions").get("mtc").get("s3_credentials_file").asText(); + rtdApi = DataManager.getExtensionPropertyAsText(RESOURCE_TYPE, "rtd_api"); + s3Bucket = DataManager.getExtensionPropertyAsText(RESOURCE_TYPE, "s3_bucket"); + s3Prefix = DataManager.getExtensionPropertyAsText(RESOURCE_TYPE, "s3_prefix"); + //s3CredentialsFilename = DataManager.config.retrieveById("extensions").retrieveById("mtc").retrieveById("s3_credentials_file").asText(); } @Override public String getResourceType() { - return "MTC"; + return RESOURCE_TYPE; } @Override @@ -53,7 +55,7 @@ public void importFeedsForProject(Project project, String authHeader) { try { url = new URL(rtdApi + "/Carrier"); } catch(MalformedURLException ex) { - LOG.error("Could not construct URL for RTD API: " + rtdApi); + LOG.error("Could not construct URL for RTD API: {}", rtdApi); return; } @@ -67,12 +69,12 @@ public void importFeedsForProject(Project project, String authHeader) { con.setRequestProperty("User-Agent", "User-Agent"); // add auth header - System.out.println("authHeader="+authHeader); + LOG.info("authHeader="+authHeader); con.setRequestProperty("Authorization", authHeader); int responseCode = con.getResponseCode(); - System.out.println("\nSending 'GET' request to URL : " + url); - System.out.println("Response Code : " + responseCode); + LOG.info("Sending 'GET' request to URL : " + url); + LOG.info("Response Code : " + responseCode); BufferedReader in = new BufferedReader( new InputStreamReader(con.getInputStream())); @@ -90,16 +92,16 @@ public void importFeedsForProject(Project project, String authHeader) { // String className = "RtdCarrier"; // Object car = Class.forName(className).newInstance(); RtdCarrier car = results[i]; - //System.out.println("car id=" + car.AgencyId + " name=" + car.AgencyName); + //LOG.info("car id=" + car.AgencyId + " name=" + car.AgencyName); FeedSource source = null; // check if a FeedSource with this AgencyId already exists - for (FeedSource existingSource : project.getProjectFeedSources()) { - ExternalFeedSourceProperty agencyIdProp = - ExternalFeedSourceProperty.find(existingSource, this.getResourceType(), "AgencyId"); + for (FeedSource existingSource : project.retrieveProjectFeedSources()) { + ExternalFeedSourceProperty agencyIdProp; + agencyIdProp = Persistence.externalFeedSourceProperties.getById(constructId(existingSource, this.getResourceType(), AGENCY_ID)); if (agencyIdProp != null && agencyIdProp.value != null && agencyIdProp.value.equals(car.AgencyId)) { - //System.out.println("already exists: " + car.AgencyId); + //LOG.info("already exists: " + car.AgencyId); source = existingSource; } } @@ -118,16 +120,21 @@ public void importFeedsForProject(Project project, String authHeader) { } else source.name = feedName; - source.setProject(project); - - source.save(); + source.projectId = project.id; + // Store the feed source. + Persistence.feedSources.create(source); // create / update the properties for(Field carrierField : car.getClass().getDeclaredFields()) { String fieldName = carrierField.getName(); String fieldValue = carrierField.get(car) != null ? carrierField.get(car).toString() : null; - ExternalFeedSourceProperty.updateOrCreate(source, this.getResourceType(), fieldName, fieldValue); + ExternalFeedSourceProperty prop = new ExternalFeedSourceProperty(source, this.getResourceType(), fieldName, fieldValue); + if (Persistence.externalFeedSourceProperties.getById(prop.id) == null) { + Persistence.externalFeedSourceProperties.create(prop); + } else { + Persistence.externalFeedSourceProperties.updateField(prop.id, fieldName, fieldValue); + } } } } catch(Exception ex) { @@ -136,90 +143,65 @@ public void importFeedsForProject(Project project, String authHeader) { } } + /** + * Do nothing for now. Creating a new agency for RTD requires adding the AgencyId property (when it was previously + * null. See {@link #propertyUpdated(ExternalFeedSourceProperty, String, String)}. + */ @Override public void feedSourceCreated(FeedSource source, String authHeader) { - LOG.info("Processing new FeedSource " + source.name + " for RTD"); - - RtdCarrier carrier = new RtdCarrier(); - carrier.AgencyName = source.name; - - try { - for (Field carrierField : carrier.getClass().getDeclaredFields()) { - String fieldName = carrierField.getName(); - String fieldValue = carrierField.get(carrier) != null ? carrierField.get(carrier).toString() : null; - ExternalFeedSourceProperty.updateOrCreate(source, this.getResourceType(), fieldName, fieldValue); - } - } catch (Exception e) { - LOG.error("Error creating external properties for new FeedSource"); - } + LOG.info("Processing new FeedSource {} for RTD. (No action taken.)", source.name); } + /** + * Sync an updated property with the RTD database. Note: if the property is AgencyId and the value was previously + * null create/register a new carrier with RTD. + */ @Override - public void propertyUpdated(ExternalFeedSourceProperty property, String previousValue, String authHeader) { - LOG.info("Update property in MTC carrier table: " + property.name); - - // sync w/ RTD - RtdCarrier carrier = new RtdCarrier(); - String feedSourceId = property.getFeedSourceId(); - FeedSource source = FeedSource.get(feedSourceId); - - carrier.AgencyId = ExternalFeedSourceProperty.find(source, this.getResourceType(), "AgencyId").value; - carrier.AgencyPhone = ExternalFeedSourceProperty.find(source, this.getResourceType(), "AgencyPhone").value; - carrier.AgencyName = ExternalFeedSourceProperty.find(source, this.getResourceType(), "AgencyName").value; - carrier.RttAgencyName = ExternalFeedSourceProperty.find(source, this.getResourceType(), "RttAgencyName").value; - carrier.RttEnabled = ExternalFeedSourceProperty.find(source, this.getResourceType(), "RttEnabled").value; - carrier.AgencyShortName = ExternalFeedSourceProperty.find(source, this.getResourceType(), "AgencyShortName").value; - carrier.AgencyPublicId = ExternalFeedSourceProperty.find(source, this.getResourceType(), "AgencyPublicId").value; - carrier.AddressLat = ExternalFeedSourceProperty.find(source, this.getResourceType(), "AddressLat").value; - carrier.AddressLon = ExternalFeedSourceProperty.find(source, this.getResourceType(), "AddressLon").value; - carrier.DefaultRouteType = ExternalFeedSourceProperty.find(source, this.getResourceType(), "DefaultRouteType").value; - carrier.CarrierStatus = ExternalFeedSourceProperty.find(source, this.getResourceType(), "CarrierStatus").value; - carrier.AgencyAddress = ExternalFeedSourceProperty.find(source, this.getResourceType(), "AgencyAddress").value; - carrier.AgencyEmail = ExternalFeedSourceProperty.find(source, this.getResourceType(), "AgencyEmail").value; - carrier.AgencyUrl = ExternalFeedSourceProperty.find(source, this.getResourceType(), "AgencyUrl").value; - carrier.AgencyFareUrl = ExternalFeedSourceProperty.find(source, this.getResourceType(), "AgencyFareUrl").value; - - if(property.name.equals("AgencyId") && previousValue == null) { + public void propertyUpdated(ExternalFeedSourceProperty updatedProperty, String previousValue, String authHeader) { + LOG.info("Update property in MTC carrier table: " + updatedProperty.name); + String feedSourceId = updatedProperty.feedSourceId; + FeedSource source = Persistence.feedSources.getById(feedSourceId); + RtdCarrier carrier = new RtdCarrier(source); + + if(updatedProperty.name.equals(AGENCY_ID) && previousValue == null) { + // If the property being updated is the agency ID field and it previously was null, this indicates that a + // new carrier should be written to the RTD. writeCarrierToRtd(carrier, true, authHeader); - } - else { + } else { + // Otherwise, this is just a standard prop update. writeCarrierToRtd(carrier, false, authHeader); } } + /** + * When feed version is created/published, write the feed to the shared S3 bucket. + */ @Override public void feedVersionCreated(FeedVersion feedVersion, String authHeader) { - LOG.info("Pushing to MTC S3 Bucket " + s3Bucket); - - if(s3Bucket == null) return; - - AWSCredentials creds; - if (this.s3CredentialsFilename != null) { - creds = new ProfileCredentialsProvider(this.s3CredentialsFilename, "default").getCredentials(); - LOG.info("Writing to S3 using supplied credentials file"); - } - else { - // default credentials providers, e.g. IAM role - creds = new DefaultAWSCredentialsProviderChain().getCredentials(); + if(s3Bucket == null) { + LOG.error("Cannot push {} to S3 bucket. No bucket name specified.", feedVersion.id); + return; } + // Construct agency ID from feed source and retrieve from MongoDB. + ExternalFeedSourceProperty agencyIdProp = Persistence.externalFeedSourceProperties.getById( + constructId(feedVersion.parentFeedSource(), this.getResourceType(), AGENCY_ID) + ); - ExternalFeedSourceProperty agencyIdProp = - ExternalFeedSourceProperty.find(feedVersion.getFeedSource(), this.getResourceType(), "AgencyId"); - - if(agencyIdProp == null || agencyIdProp.equals("null")) { - LOG.error("Could not read AgencyId for FeedSource " + feedVersion.feedSourceId); + if(agencyIdProp == null || agencyIdProp.value.equals("null")) { + LOG.error("Could not read {} for FeedSource {}", AGENCY_ID, feedVersion.feedSourceId); return; } - String keyName = this.s3Prefix + agencyIdProp.value + ".zip"; + String keyName = String.format("%s%s.zip", this.s3Prefix, agencyIdProp.value); LOG.info("Pushing to MTC S3 Bucket: " + keyName); - - AmazonS3 s3client = new AmazonS3Client(creds); - s3client.putObject(new PutObjectRequest( - s3Bucket, keyName, feedVersion.getGtfsFile())); + File file = feedVersion.retrieveGtfsFile(); + FeedStore.s3Client.putObject(new PutObjectRequest(s3Bucket, keyName, file)); } + /** + * Update or create a carrier and its properties with an HTTP request to the RTD. + */ private void writeCarrierToRtd(RtdCarrier carrier, boolean createNew, String authHeader) { try { @@ -228,7 +210,7 @@ private void writeCarrierToRtd(RtdCarrier carrier, boolean createNew, String aut String carrierJson = mapper.writeValueAsString(carrier); URL rtdUrl = new URL(rtdApi + "/Carrier/" + (createNew ? "" : carrier.AgencyId)); - LOG.info("Writing to RTD URL: " + rtdUrl); + LOG.info("Writing to RTD URL: {}", rtdUrl); HttpURLConnection connection = (HttpURLConnection) rtdUrl.openConnection(); connection.setRequestMethod(createNew ? "POST" : "PUT"); @@ -241,10 +223,9 @@ private void writeCarrierToRtd(RtdCarrier carrier, boolean createNew, String aut osw.write(carrierJson); osw.flush(); osw.close(); - LOG.info("RTD API response: " + connection.getResponseCode() + " / " + connection.getResponseMessage()); + LOG.info("RTD API response: {}/{}", connection.getResponseCode(), connection.getResponseMessage()); } catch (Exception e) { - LOG.error("error writing to RTD"); - e.printStackTrace(); + LOG.error("Error writing to RTD", e); } } } diff --git a/src/main/java/com/conveyal/datatools/manager/extensions/mtc/RtdCarrier.java b/src/main/java/com/conveyal/datatools/manager/extensions/mtc/RtdCarrier.java index fe2d6c39f..87bd30cdc 100644 --- a/src/main/java/com/conveyal/datatools/manager/extensions/mtc/RtdCarrier.java +++ b/src/main/java/com/conveyal/datatools/manager/extensions/mtc/RtdCarrier.java @@ -1,8 +1,11 @@ package com.conveyal.datatools.manager.extensions.mtc; import com.conveyal.datatools.manager.models.FeedSource; +import com.conveyal.datatools.manager.persistence.Persistence; import com.fasterxml.jackson.annotation.JsonProperty; +import static com.conveyal.datatools.manager.models.ExternalFeedSourceProperty.constructId; + /** * Created by demory on 3/30/16. */ @@ -63,23 +66,36 @@ public class RtdCarrier { public RtdCarrier() { } - /*public void mapFeedSource(FeedSource source){ - source.defaultGtfsId = this.AgencyId; - source.shortName = this.AgencyShortName; - source.AgencyPhone = this.AgencyPhone; - source.RttAgencyName = this.RttAgencyName; - source.RttEnabled = this.RttEnabled; - source.AgencyShortName = this.AgencyShortName; - source.AgencyPublicId = this.AgencyPublicId; - source.AddressLat = this.AddressLat; - source.AddressLon = this.AddressLon; - source.DefaultRouteType = this.DefaultRouteType; - source.CarrierStatus = this.CarrierStatus; - source.AgencyAddress = this.AgencyAddress; - source.AgencyEmail = this.AgencyEmail; - source.AgencyUrl = this.AgencyUrl; - source.AgencyFareUrl = this.AgencyFareUrl; - - source.save(); - }*/ + /** + * Construct an RtdCarrier given the provided feed source. + * @param source + */ + public RtdCarrier(FeedSource source) { + AgencyId = getValueForField(source, MtcFeedResource.AGENCY_ID); + AgencyPhone = getValueForField(source, "AgencyPhone"); + AgencyName = getValueForField(source, "AgencyName"); + RttAgencyName = getValueForField(source, "RttAgencyName"); + RttEnabled = getValueForField(source, "RttEnabled"); + AgencyShortName = getValueForField(source, "AgencyShortName"); + AgencyPublicId = getValueForField(source, "AgencyPublicId"); + AddressLat = getValueForField(source, "AddressLat"); + AddressLon = getValueForField(source, "AddressLon"); + DefaultRouteType = getValueForField(source, "DefaultRouteType"); + CarrierStatus = getValueForField(source, "CarrierStatus"); + AgencyAddress = getValueForField(source, "AgencyAddress"); + AgencyEmail = getValueForField(source, "AgencyEmail"); + AgencyUrl = getValueForField(source, "AgencyUrl"); + AgencyFareUrl = getValueForField(source, "AgencyFareUrl"); + } + + private String getPropId(FeedSource source, String fieldName) { + return constructId(source, MtcFeedResource.RESOURCE_TYPE, fieldName); + } + + /** + * FIXME: Are there cases where this might throw NPEs? + */ + private String getValueForField (FeedSource source, String fieldName) { + return Persistence.externalFeedSourceProperties.getById(getPropId(source, fieldName)).value; + } } \ No newline at end of file diff --git a/src/main/java/com/conveyal/datatools/manager/extensions/transitfeeds/TransitFeedsFeedResource.java b/src/main/java/com/conveyal/datatools/manager/extensions/transitfeeds/TransitFeedsFeedResource.java index fc04f5f5a..421e6a314 100644 --- a/src/main/java/com/conveyal/datatools/manager/extensions/transitfeeds/TransitFeedsFeedResource.java +++ b/src/main/java/com/conveyal/datatools/manager/extensions/transitfeeds/TransitFeedsFeedResource.java @@ -6,6 +6,7 @@ import com.conveyal.datatools.manager.models.FeedSource; import com.conveyal.datatools.manager.models.FeedVersion; import com.conveyal.datatools.manager.models.Project; +import com.conveyal.datatools.manager.persistence.Persistence; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import org.slf4j.Logger; @@ -18,6 +19,8 @@ import java.net.MalformedURLException; import java.net.URL; +import static com.conveyal.datatools.manager.models.ExternalFeedSourceProperty.constructId; + /** * Created by demory on 3/31/16. */ @@ -101,10 +104,10 @@ public void importFeedsForProject(Project project, String authHeader) { } // test that feed falls in bounding box (if box exists) - if (project.north != null) { + if (project.bounds != null) { Double lat = feed.get("l").get("lat").asDouble(); Double lng = feed.get("l").get("lng").asDouble(); - if (lat < project.south || lat > project.north || lng < project.west || lng > project.east) { + if (lat < project.bounds.south || lat > project.bounds.north || lng < project.bounds.west || lng > project.bounds.east) { continue; } } @@ -113,9 +116,9 @@ public void importFeedsForProject(Project project, String authHeader) { String tfId = feed.get("id").asText(); // check if a feed already exists with this id - for (FeedSource existingSource : project.getProjectFeedSources()) { + for (FeedSource existingSource : project.retrieveProjectFeedSources()) { ExternalFeedSourceProperty idProp = - ExternalFeedSourceProperty.find(existingSource, this.getResourceType(), "id"); + Persistence.externalFeedSourceProperties.getById(constructId(existingSource, this.getResourceType(), "id")); if (idProp != null && idProp.value.equals(tfId)) { source = existingSource; } @@ -128,7 +131,7 @@ public void importFeedsForProject(Project project, String authHeader) { else source.name = feedName; source.retrievalMethod = FeedSource.FeedRetrievalMethod.FETCHED_AUTOMATICALLY; - source.setName(feedName); + source.name = feedName; System.out.println(source.name); try { @@ -143,11 +146,13 @@ public void importFeedsForProject(Project project, String authHeader) { LOG.error("Error constructing URLs from TransitFeeds API response"); } - source.setProject(project); - source.save(); + source.projectId = project.id; + // FIXME: Store feed source +// source.save(); // create/update the external props - ExternalFeedSourceProperty.updateOrCreate(source, this.getResourceType(), "id", tfId); + // FIXME: Add this back in +// ExternalFeedSourceProperty.updateOrCreate(source, this.getResourceType(), "id", tfId); } if (transitFeedNode.get("results").get("page") == transitFeedNode.get("results").get("numPages")){ diff --git a/src/main/java/com/conveyal/datatools/manager/extensions/transitland/TransitLandFeed.java b/src/main/java/com/conveyal/datatools/manager/extensions/transitland/TransitLandFeed.java index e5cfcc40b..67adae9a8 100644 --- a/src/main/java/com/conveyal/datatools/manager/extensions/transitland/TransitLandFeed.java +++ b/src/main/java/com/conveyal/datatools/manager/extensions/transitland/TransitLandFeed.java @@ -125,35 +125,22 @@ public TransitLandFeed(JsonNode jsonMap){ this.license_attribution_text = jsonMap.get("license_attribution_text").asText(); this.last_fetched_at = jsonMap.get("last_fetched_at").asText(); this.last_imported_at = jsonMap.get("last_imported_at").asText(); - this.latest_fetch_exception_log = jsonMap.get("latest_fetch_exception_log").asText(); +// this.latest_fetch_exception_log = jsonMap.retrieveById("latest_fetch_exception_log").asText(); this.import_status = jsonMap.get("import_status").asText(); this.created_at = jsonMap.get("created_at").asText(); this.updated_at = jsonMap.get("updated_at").asText(); this.feed_versions_count = jsonMap.get("feed_versions_count").asText(); this.feed_versions_url = jsonMap.get("feed_versions_url").asText(); -// this.feed_versions = jsonMap.get("feed_versions").asText(); +// this.feed_versions = jsonMap.retrieveById("feed_versions").asText(); this.active_feed_version = jsonMap.get("active_feed_version").asText(); this.import_level_of_active_feed_version = jsonMap.get("import_level_of_active_feed_version").asText(); this.created_or_updated_in_changeset_id = jsonMap.get("created_or_updated_in_changeset_id").asText(); this.changesets_imported_from_this_feed = jsonMap.get("changesets_imported_from_this_feed").asText(); this.operators_in_feed = jsonMap.get("operators_in_feed").asText(); -// this.gtfs_agency_id = jsonMap.get("gtfs_agency_id").asText(); -// this.operator_onestop_id = jsonMap.get("operator_onestop_id").asText(); -// this.feed_onestop_id = jsonMap.get("feed_onestop_id").asText(); -// this.operator_url = jsonMap.get("operator_url").asText(); -// this.feed_url = jsonMap.get("feed_url").asText(); - } - - public void mapFeedSource(FeedSource source){ - - // set the - source.retrievalMethod = FeedSource.FeedRetrievalMethod.FETCHED_AUTOMATICALLY; - try { - source.url = new URL(this.url); - } catch (MalformedURLException e) { - e.printStackTrace(); - } - - source.save(); +// this.gtfs_agency_id = jsonMap.retrieveById("gtfs_agency_id").asText(); +// this.operator_onestop_id = jsonMap.retrieveById("operator_onestop_id").asText(); +// this.feed_onestop_id = jsonMap.retrieveById("feed_onestop_id").asText(); +// this.operator_url = jsonMap.retrieveById("operator_url").asText(); +// this.feed_url = jsonMap.retrieveById("feed_url").asText(); } } \ No newline at end of file diff --git a/src/main/java/com/conveyal/datatools/manager/extensions/transitland/TransitLandFeedResource.java b/src/main/java/com/conveyal/datatools/manager/extensions/transitland/TransitLandFeedResource.java index 37ca9d10c..7d1404966 100644 --- a/src/main/java/com/conveyal/datatools/manager/extensions/transitland/TransitLandFeedResource.java +++ b/src/main/java/com/conveyal/datatools/manager/extensions/transitland/TransitLandFeedResource.java @@ -6,19 +6,21 @@ import com.conveyal.datatools.manager.models.FeedSource; import com.conveyal.datatools.manager.models.FeedVersion; import com.conveyal.datatools.manager.models.Project; +import com.conveyal.datatools.manager.persistence.Persistence; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.BufferedReader; -import java.io.IOException; import java.io.InputStreamReader; import java.lang.reflect.Field; import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.URL; +import static com.conveyal.datatools.manager.models.ExternalFeedSourceProperty.constructId; + /** * Created by demory on 3/31/16. */ @@ -43,83 +45,116 @@ public void importFeedsForProject(Project project, String authHeader) { LOG.info("Importing TransitLand feeds"); URL url = null; ObjectMapper mapper = new ObjectMapper(); + int perPage = 10000; + int count = 0; + int offset; + int total = 0; String locationFilter = ""; - if (project.north != null && project.south != null && project.east != null && project.west != null) - locationFilter = "&bbox=" + project.west + "," + + project.south + "," + project.east + "," + project.north; + boolean nextPage = true; - try { - url = new URL(api + "?per_page=10000" + locationFilter); - } catch (MalformedURLException ex) { - LOG.error("Error constructing TransitLand API URL"); + if (project.bounds != null) { + locationFilter = "&bbox=" + project.bounds.toTransitLandString(); } - try { - HttpURLConnection con = (HttpURLConnection) url.openConnection(); - - // optional default is GET - con.setRequestMethod("GET"); - - //add request header - con.setRequestProperty("User-Agent", "User-Agent"); - - int responseCode = con.getResponseCode(); - System.out.println("\nSending 'GET' request to URL : " + url); - System.out.println("Response Code : " + responseCode); - - BufferedReader in = new BufferedReader( - new InputStreamReader(con.getInputStream())); - String inputLine; - StringBuffer response = new StringBuffer(); - - while ((inputLine = in.readLine()) != null) { - response.append(inputLine); + do { + offset = perPage * count; + try { + url = new URL(api + "?total=true&per_page=" + perPage + "&offset=" + offset + locationFilter); + } catch (MalformedURLException ex) { + LOG.error("Error constructing TransitLand API URL"); } - in.close(); - - String json = response.toString(); - JsonNode node = mapper.readTree(json); - for (JsonNode feed : node.get("feeds")) { - TransitLandFeed tlFeed = new TransitLandFeed(feed); + try { + HttpURLConnection con = (HttpURLConnection) url.openConnection(); - FeedSource source = null; + // optional default is GET + con.setRequestMethod("GET"); - // check if a feed already exists with this id - for (FeedSource existingSource : project.getProjectFeedSources()) { - ExternalFeedSourceProperty onestopIdProp = - ExternalFeedSourceProperty.find(existingSource, this.getResourceType(), "onestop_id"); - if (onestopIdProp != null && onestopIdProp.value.equals(tlFeed.onestop_id)) { - source = existingSource; - } - } + //add request header + con.setRequestProperty("User-Agent", "User-Agent"); - String feedName; - feedName = tlFeed.onestop_id; + int responseCode = con.getResponseCode(); + System.out.println("\nSending 'GET' request to URL : " + url); + System.out.println("Response Code : " + responseCode); - if (source == null) source = new FeedSource(feedName); - else source.name = feedName; - tlFeed.mapFeedSource(source); + BufferedReader in = new BufferedReader( + new InputStreamReader(con.getInputStream())); + String inputLine; + StringBuffer response = new StringBuffer(); - source.setName(feedName); - System.out.println(source.name); - - source.setProject(project); + while ((inputLine = in.readLine()) != null) { + response.append(inputLine); + } + in.close(); + + String json = response.toString(); + JsonNode node = mapper.readTree(json); + total = node.get("meta").get("total").asInt(); + for (JsonNode feed : node.get("feeds")) { + TransitLandFeed tlFeed = new TransitLandFeed(feed); + + FeedSource source = null; + + // Check if a feed source already exists in the project with this id, i.e., a sync + // has already occurred in the past and most feed sources may already exist + for (FeedSource existingSource : project.retrieveProjectFeedSources()) { + ExternalFeedSourceProperty onestopIdProp = + Persistence.externalFeedSourceProperties.getById(constructId(existingSource, this.getResourceType(), "onestop_id")); + if (onestopIdProp != null && onestopIdProp.value.equals(tlFeed.onestop_id)) { + source = existingSource; + } + } - source.save(); + String feedName; + feedName = tlFeed.onestop_id; + + // FIXME: lots of duplicated code here, but I'm not sure if Mongo has an updateOrCreate function. + // Feed source is new, let's store a new one. + if (source == null) { + source = new FeedSource(feedName); + source.projectId = project.id; + source.retrievalMethod = FeedSource.FeedRetrievalMethod.FETCHED_AUTOMATICALLY; + try { + source.url = new URL(tlFeed.url); + } catch (MalformedURLException e) { + e.printStackTrace(); + } + Persistence.feedSources.create(source); + LOG.info("Creating new feed source: {}", source.name); + } else { + // Feed source already existed. Let's just sync it. + URL feedUrl; + source.retrievalMethod = FeedSource.FeedRetrievalMethod.FETCHED_AUTOMATICALLY; + try { + feedUrl = new URL(tlFeed.url); + Persistence.feedSources.updateField(source.id, "url", feedUrl); + } catch (MalformedURLException e) { + e.printStackTrace(); + } + // FIXME: These shouldn't be separate updates. + Persistence.feedSources.updateField(source.id, "name", feedName); + Persistence.feedSources.updateField(source.id, "retrievalMethod", FeedSource.FeedRetrievalMethod.FETCHED_AUTOMATICALLY); + LOG.info("Syncing properties: {}", source.name); + } - // create / update the properties + // create / update the properties - for(Field tlField : tlFeed.getClass().getDeclaredFields()) { - String fieldName = tlField.getName(); - String fieldValue = tlField.get(tlFeed) != null ? tlField.get(tlFeed).toString() : null; + for(Field tlField : tlFeed.getClass().getDeclaredFields()) { + String fieldName = tlField.getName(); + String fieldValue = tlField.get(tlFeed) != null ? tlField.get(tlFeed).toString() : null; - ExternalFeedSourceProperty.updateOrCreate(source, this.getResourceType(), fieldName, fieldValue); + // FIXME +// ExternalFeedSourceProperty.updateOrCreate(source, this.getResourceType(), fieldName, fieldValue); + } } + } catch (Exception ex) { + LOG.error("Error reading from TransitLand API"); + ex.printStackTrace(); } - } catch (Exception ex) { - LOG.error("Error reading from TransitLand API"); - ex.printStackTrace(); + count++; } + // iterate over results until most recent total exceeds total feeds in TransitLand + while(offset + perPage < total); } diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/BuildTransportNetworkJob.java b/src/main/java/com/conveyal/datatools/manager/jobs/BuildTransportNetworkJob.java deleted file mode 100644 index 6f136fec2..000000000 --- a/src/main/java/com/conveyal/datatools/manager/jobs/BuildTransportNetworkJob.java +++ /dev/null @@ -1,101 +0,0 @@ -package com.conveyal.datatools.manager.jobs; - -import com.conveyal.datatools.common.status.MonitorableJob; -import com.conveyal.datatools.manager.DataManager; -import com.conveyal.datatools.manager.models.FeedSource; -import com.conveyal.datatools.manager.models.FeedVersion; -import com.conveyal.r5.point_to_point.builder.TNBuilderConfig; -import com.conveyal.r5.transit.TransportNetwork; -import org.apache.commons.io.IOUtils; - -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.Map; - -import static com.conveyal.datatools.manager.models.Deployment.getOsmExtract; - -/** - * Created by landon on 4/30/16. - */ -public class BuildTransportNetworkJob extends MonitorableJob { - - public FeedVersion feedVersion; - private TransportNetwork result; - public Status status; - - public BuildTransportNetworkJob (FeedVersion feedVersion, String owner) { - super(owner, "Building Transport Network for " + feedVersion.getFeedSource().name, JobType.BUILD_TRANSPORT_NETWORK); - this.feedVersion = feedVersion; - this.result = null; - this.status = new Status(); - status.message = "Waiting to begin job..."; - } - - @Override - public void run() { - System.out.println("Building network"); - try { - if (feedVersion.validationResult != null) { - feedVersion.buildTransportNetwork(eventBus); - } - else { - synchronized (status) { - status.message = "Transport network skipped because of bad validation."; - status.percentComplete = 100; - status.error = true; - status.completed = true; - } - } - } catch (Exception e) { - e.printStackTrace(); - synchronized (status) { - status.message = "Transport network failed!"; - status.percentComplete = 100; - status.error = true; - status.completed = true; - } - } - if (!status.error) { - synchronized (status) { - status.message = "Transport network built successfully!"; - status.percentComplete = 100; - status.completed = true; - } - } - jobFinished(); - } - - @Override - public Status getStatus() { - synchronized (status) { - return status.clone(); - } - } - - @Override - public void handleStatusEvent(Map statusMap) { - try { - synchronized (status) { - status.message = (String) statusMap.get("message"); - status.percentComplete = (double) statusMap.get("percentComplete"); - status.error = (boolean) statusMap.get("error"); - } - } catch (Exception e) { - e.printStackTrace(); - } - } - -// @Override -// public void handleStatusEvent(StatusEvent statusEvent) { -// synchronized (status) { -// status.message = statusEvent.message; -// status.percentComplete = statusEvent.percentComplete -// status.error = statusEvent.error; -// } -// } - -} diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/CreateFeedVersionFromSnapshotJob.java b/src/main/java/com/conveyal/datatools/manager/jobs/CreateFeedVersionFromSnapshotJob.java index 5519e4ebc..2b6d9d294 100644 --- a/src/main/java/com/conveyal/datatools/manager/jobs/CreateFeedVersionFromSnapshotJob.java +++ b/src/main/java/com/conveyal/datatools/manager/jobs/CreateFeedVersionFromSnapshotJob.java @@ -1,91 +1,46 @@ package com.conveyal.datatools.manager.jobs; import com.conveyal.datatools.common.status.MonitorableJob; -import com.conveyal.datatools.editor.controllers.api.SnapshotController; -import com.conveyal.datatools.editor.models.Snapshot; +import com.conveyal.datatools.editor.jobs.ExportSnapshotToGTFSJob; import com.conveyal.datatools.manager.models.FeedSource; import com.conveyal.datatools.manager.models.FeedVersion; +import com.conveyal.datatools.manager.models.Snapshot; +import com.fasterxml.jackson.annotation.JsonProperty; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.FileInputStream; -import java.util.Map; - -import static spark.Spark.halt; - /** * Created by demory on 7/27/16. */ -public class CreateFeedVersionFromSnapshotJob extends MonitorableJob { +public class CreateFeedVersionFromSnapshotJob extends MonitorableJob { public static final Logger LOG = LoggerFactory.getLogger(CreateFeedVersionFromSnapshotJob.class); public FeedVersion feedVersion; - private String snapshotId; - private Status status; + private final Snapshot snapshot; - public CreateFeedVersionFromSnapshotJob (FeedVersion feedVersion, String snapshotId, String owner) { - super(owner, "Creating Feed Version from Snapshot for " + feedVersion.getFeedSource().name, JobType.CREATE_FEEDVERSION_FROM_SNAPSHOT); + public CreateFeedVersionFromSnapshotJob(FeedVersion feedVersion, Snapshot snapshot, String owner) { + super(owner, "Creating Feed Version from Snapshot for " + feedVersion.parentFeedSource().name, JobType.CREATE_FEEDVERSION_FROM_SNAPSHOT); this.feedVersion = feedVersion; - this.snapshotId = snapshotId; - this.status = new Status(); + this.snapshot = snapshot; status.message = "Initializing..."; } @Override - public void run() { - File file = null; - - try { - file = File.createTempFile("snapshot", ".zip"); - SnapshotController.writeSnapshotAsGtfs(snapshotId, file); - } catch (Exception e) { - e.printStackTrace(); - String message = "Unable to create temp file for snapshot"; - LOG.error(message); - synchronized (status) { - status.error = true; - status.message = message; - status.completed = true; - } - } - - try { - feedVersion.newGtfsFile(new FileInputStream(file)); - } catch (Exception e) { - LOG.error("Unable to open input stream from upload"); - String message = "Unable to read uploaded feed"; - synchronized (status) { - status.error = true; - status.message = message; - status.completed = true; - } - } - - feedVersion.name = Snapshot.get(snapshotId).name + " Snapshot Export"; - feedVersion.hash(); - feedVersion.save(); - synchronized (status) { - status.message = "Version created successfully."; - status.completed = true; - status.percentComplete = 100.0; - } - jobFinished(); + public void jobLogic() { + // Set feed version properties. + feedVersion.retrievalMethod = FeedSource.FeedRetrievalMethod.PRODUCED_IN_HOUSE; + feedVersion.name = snapshot.name + " Snapshot Export"; + // FIXME: This should probably just create a new snapshot, and then validate those tables. + // First export the snapshot to GTFS. + ExportSnapshotToGTFSJob exportSnapshotToGTFSJob = new ExportSnapshotToGTFSJob(owner, snapshot, feedVersion.id); + // Process feed version once GTFS file written. + ProcessSingleFeedJob processSingleFeedJob = new ProcessSingleFeedJob(feedVersion, owner, true); + addNextJob(exportSnapshotToGTFSJob, processSingleFeedJob); + status.update("Beginning export...", 10); } - @Override - public Status getStatus() { - synchronized (status) { - return status.clone(); - } - } - - @Override - public void handleStatusEvent(Map statusMap) { - synchronized (status) { - status.message = (String) statusMap.get("message"); - status.percentComplete = (double) statusMap.get("percentComplete"); - status.error = (boolean) statusMap.get("error"); - } + @JsonProperty + public String getFeedSourceId () { + return snapshot.feedSourceId; } -} +} \ No newline at end of file diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/DeployJob.java b/src/main/java/com/conveyal/datatools/manager/jobs/DeployJob.java index 990e562ef..2f421e857 100644 --- a/src/main/java/com/conveyal/datatools/manager/jobs/DeployJob.java +++ b/src/main/java/com/conveyal/datatools/manager/jobs/DeployJob.java @@ -1,35 +1,43 @@ package com.conveyal.datatools.manager.jobs; import com.amazonaws.AmazonClientException; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; -import com.amazonaws.auth.profile.ProfileCredentialsProvider; -import com.amazonaws.event.ProgressEvent; import com.amazonaws.event.ProgressListener; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3Client; import com.amazonaws.services.s3.model.CopyObjectRequest; import com.amazonaws.services.s3.transfer.TransferManager; +import com.amazonaws.services.s3.transfer.TransferManagerBuilder; import com.amazonaws.services.s3.transfer.Upload; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; +import java.io.InputStream; +import java.io.Serializable; import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.URL; import java.nio.channels.Channels; import java.nio.channels.FileChannel; import java.nio.channels.WritableByteChannel; +import java.util.Date; import java.util.List; -import java.util.Map; +import java.util.Scanner; import com.conveyal.datatools.common.status.MonitorableJob; import com.conveyal.datatools.manager.models.Deployment; +import com.conveyal.datatools.manager.models.OtpServer; +import com.conveyal.datatools.manager.persistence.FeedStore; +import com.conveyal.datatools.manager.persistence.Persistence; +import com.fasterxml.jackson.annotation.JsonProperty; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static com.mongodb.client.model.Filters.and; +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.client.model.Filters.not; +import static com.mongodb.client.model.Updates.pull; +import static com.mongodb.client.model.Updates.set; + /** * Deploy the given deployment to the OTP servers specified by targets. * @author mattwigway @@ -40,195 +48,135 @@ public class DeployJob extends MonitorableJob { private static final Logger LOG = LoggerFactory.getLogger(DeployJob.class); private static final String bundlePrefix = "bundles/"; - /** The URLs to deploy to */ - private List targets; - - /** The base URL to otp.js on these targets */ - private String publicUrl; + /** The deployment to deploy */ + private Deployment deployment; - /** An optional AWS S3 bucket to copy the bundle to */ - private String s3Bucket; + /** The OTP server to deploy to (also contains S3 information). */ + private final OtpServer otpServer; - /** An AWS credentials file to use when uploading to S3 */ - private String s3CredentialsFilename; + /** Temporary file that contains the deployment data */ + private File deploymentTempFile; - /** The number of servers that have successfully been deployed to */ - private DeployStatus status; + /** This hides the status field on the parent class, providing additional fields. */ + public DeployStatus status; - /** The deployment to deploy */ - private Deployment deployment; + @JsonProperty + public String getDeploymentId () { + return deployment.id; + } - public DeployJob(Deployment deployment, String owner, List targets, String publicUrl, String s3Bucket, String s3CredentialsFilename) { - super(owner); + public DeployJob(Deployment deployment, String owner, OtpServer otpServer) { + // TODO add new job type or get rid of enum in favor of just using class names + super(owner, "Deploying " + deployment.name, JobType.DEPLOY_TO_OTP); this.deployment = deployment; - this.targets = targets; - this.publicUrl = publicUrl; - this.s3Bucket = s3Bucket; - this.s3CredentialsFilename = s3CredentialsFilename; + this.otpServer = otpServer; + // Use a special subclass of status here that has additional fields this.status = new DeployStatus(); - this.name = "Deploying " + deployment.name; status.message = "Initializing..."; status.built = false; status.numServersCompleted = 0; - status.totalServers = targets == null ? 0 : targets.size(); - } - - public DeployStatus getStatus () { - synchronized (status) { - return status.clone(); - } + status.totalServers = otpServer.internalUrl == null ? 0 : otpServer.internalUrl.size(); } - @Override - public void handleStatusEvent(Map statusMap) { - synchronized (status) { - status.message = (String) statusMap.get("message"); - status.percentComplete = (double) statusMap.get("percentComplete"); - status.error = (boolean) statusMap.get("error"); - } - } - - public void run() { - int targetCount = targets != null ? targets.size() : 0; + public void jobLogic () { + int targetCount = otpServer.internalUrl != null ? otpServer.internalUrl.size() : 0; int totalTasks = 1 + targetCount; int tasksCompleted = 0; + String statusMessage; - // create a temporary file in which to save the deployment - File temp; try { - temp = File.createTempFile("deployment", ".zip"); + deploymentTempFile = File.createTempFile("deployment", ".zip"); } catch (IOException e) { - LOG.error("Could not create temp file"); + statusMessage = "Could not create temp file for deployment"; + LOG.error(statusMessage); e.printStackTrace(); - - synchronized (status) { - status.error = true; - status.completed = true; - status.message = "app.deployment.error.dump"; - } - - jobFinished(); + status.fail(statusMessage); return; } - LOG.info("Created deployment bundle file: " + temp.getAbsolutePath()); + LOG.info("Created deployment bundle file: " + deploymentTempFile.getAbsolutePath()); - // dump the deployment bundle + // Dump the deployment bundle to the temp file. try { - synchronized (status) { - status.message = "Creating OTP Bundle"; - } - this.deployment.dump(temp, true, true, true); + status.message = "Creating OTP Bundle"; + this.deployment.dump(deploymentTempFile, true, true, true); tasksCompleted++; } catch (Exception e) { - LOG.error("Error dumping deployment"); + statusMessage = "Error dumping deployment"; + LOG.error(statusMessage); e.printStackTrace(); - - synchronized (status) { - status.error = true; - status.completed = true; - status.message = "app.deployment.error.dump"; - } - - jobFinished(); + status.fail(statusMessage); return; } - synchronized (status) { - status.percentComplete = 100.0 * (double) tasksCompleted / totalTasks; - System.out.println("pctComplete = " + status.percentComplete); - status.built = true; - } + status.percentComplete = 100.0 * (double) tasksCompleted / totalTasks; + LOG.info("Deployment pctComplete = {}", status.percentComplete); + status.built = true; - // upload to S3, if applicable - if(this.s3Bucket != null) { - synchronized (status) { - status.message = "Uploading to S3"; - status.uploadingS3 = true; - } + // Upload to S3, if applicable + if(otpServer.s3Bucket != null) { + status.message = "Uploading to S3"; + status.uploadingS3 = true; LOG.info("Uploading deployment {} to s3", deployment.name); - + String key = null; try { - AWSCredentials creds; - if (this.s3CredentialsFilename != null) { - creds = new ProfileCredentialsProvider(this.s3CredentialsFilename, "default").getCredentials(); - } - else { - // default credentials providers, e.g. IAM role - creds = new DefaultAWSCredentialsProviderChain().getCredentials(); - } - - TransferManager tx = new TransferManager(creds); - String key = bundlePrefix + deployment.getProject().id + "/" + deployment.name + ".zip"; - final Upload upload = tx.upload(this.s3Bucket, key, temp); + TransferManager tx = TransferManagerBuilder.standard().withS3Client(FeedStore.s3Client).build(); + key = bundlePrefix + deployment.parentProject().id + "/" + deployment.name + ".zip"; + final Upload upload = tx.upload(otpServer.s3Bucket, key, deploymentTempFile); - upload.addProgressListener(new ProgressListener() { - public void progressChanged(ProgressEvent progressEvent) { - synchronized (status) { - status.percentUploaded = upload.getProgress().getPercentTransferred(); - } - } + upload.addProgressListener((ProgressListener) progressEvent -> { + status.percentUploaded = upload.getProgress().getPercentTransferred(); }); upload.waitForCompletion(); - tx.shutdownNow(); + + // Shutdown the Transfer Manager, but don't shut down the underlying S3 client. + // The default behavior for shutdownNow shut's down the underlying s3 client + // which will cause any following s3 operations to fail. + tx.shutdownNow(false); // copy to [name]-latest.zip - String copyKey = bundlePrefix + deployment.getProject().id + "/" + deployment.getProject().name.toLowerCase() + "-latest.zip"; - AmazonS3 s3client = new AmazonS3Client(creds); + String copyKey = bundlePrefix + deployment.parentProject().id + "/" + deployment.parentProject().name.toLowerCase() + "-latest.zip"; CopyObjectRequest copyObjRequest = new CopyObjectRequest( - this.s3Bucket, key, this.s3Bucket, copyKey); - s3client.copyObject(copyObjRequest); - + otpServer.s3Bucket, key, otpServer.s3Bucket, copyKey); + FeedStore.s3Client.copyObject(copyObjRequest); } catch (AmazonClientException|InterruptedException e) { - LOG.error("Error uploading deployment bundle to S3"); + statusMessage = String.format("Error uploading (or copying) deployment bundle to s3://%s/%s", otpServer.s3Bucket, key); + LOG.error(statusMessage); e.printStackTrace(); - - synchronized (status) { - status.error = true; - status.completed = true; - status.message = "app.deployment.error.dump"; - } - + status.fail(statusMessage); return; } - synchronized (status) { - status.uploadingS3 = false; - } + status.uploadingS3 = false; } - // if no OTP targets (i.e. we're only deploying to S3), we're done - if(this.targets == null) { - synchronized (status) { - status.completed = true; - } - - jobFinished(); + // If there are no OTP targets (i.e. we're only deploying to S3), we're done. + if(otpServer.internalUrl == null) { + status.completed = true; return; } // figure out what router we're using String router = deployment.routerId != null ? deployment.routerId : "default"; - // load it to OTP - for (String rawUrl : this.targets) { - synchronized (status) { - status.message = "Deploying to " + rawUrl; - status.uploading = true; - } + // Send the deployment file over the wire to each OTP server. + for (String rawUrl : otpServer.internalUrl) { + status.message = "Deploying to " + rawUrl; + status.uploading = true; + LOG.info(status.message); URL url; try { url = new URL(rawUrl + "/routers/" + router); } catch (MalformedURLException e) { - LOG.error("Malformed deployment URL {}", rawUrl); - - synchronized (status) { - status.error = true; - status.message = "app.deployment.error.config"; - } + statusMessage = String.format("Malformed deployment URL %s", rawUrl); + LOG.error(statusMessage); + // do not set percentComplete to 100 because we continue to the next server + // TODO: should this return instead so that the job is cancelled? + status.error = true; + status.message = statusMessage; continue; } @@ -237,13 +185,13 @@ public void progressChanged(ProgressEvent progressEvent) { try { conn = (HttpURLConnection) url.openConnection(); } catch (IOException e) { - LOG.error("Unable to open URL of OTP server {}", url); - - synchronized (status) { - status.error = true; - status.message = "app.deployment.error.net"; - } + statusMessage = String.format("Unable to open URL of OTP server %s", url); + LOG.error(statusMessage); + // do not set percentComplete to 100 because we continue to the next server + // TODO: should this return instead so that the job is cancelled? + status.error = true; + status.message = statusMessage; continue; } @@ -251,55 +199,36 @@ public void progressChanged(ProgressEvent progressEvent) { conn.setDoOutput(true); // graph build can take a long time but not more than an hour, I should think conn.setConnectTimeout(60 * 60 * 1000); - conn.setFixedLengthStreamingMode(temp.length()); + conn.setFixedLengthStreamingMode(deploymentTempFile.length()); // this makes it a post request so that we can upload our file WritableByteChannel post; try { post = Channels.newChannel(conn.getOutputStream()); } catch (IOException e) { - LOG.error("Could not open channel to OTP server {}", url); + statusMessage = String.format("Could not open channel to OTP server %s", url); + LOG.error(statusMessage); e.printStackTrace(); - - synchronized (status) { - status.error = true; - status.message = "app.deployment.error.net"; - status.completed = true; - } - - jobFinished(); + status.fail(statusMessage); return; } - // get the input file + // retrieveById the input file FileChannel input; try { - input = new FileInputStream(temp).getChannel(); + input = new FileInputStream(deploymentTempFile).getChannel(); } catch (FileNotFoundException e) { LOG.error("Internal error: could not read dumped deployment!"); - - synchronized (status) { - status.error = true; - status.message = "app.deployment.error.dump"; - status.completed = true; - } - - jobFinished(); + status.fail("Internal error: could not read dumped deployment!"); return; } try { conn.connect(); } catch (IOException e) { - LOG.error("Unable to open connection to OTP server {}", url); - - synchronized (status) { - status.error = true; - status.message = "app.deployment.error.net"; - status.completed = true; - } - - jobFinished(); + statusMessage = String.format("Unable to open connection to OTP server %s", url); + LOG.error(statusMessage); + status.fail(statusMessage); return; } @@ -307,31 +236,20 @@ public void progressChanged(ProgressEvent progressEvent) { try { input.transferTo(0, Long.MAX_VALUE, post); } catch (IOException e) { - LOG.error("Unable to transfer deployment to server {}" , url); + statusMessage = String.format("Unable to transfer deployment to server %s", url); + LOG.error(statusMessage); e.printStackTrace(); - - synchronized (status) { - status.error = true; - status.message = "app.deployment.error.net"; - status.completed = true; - } - - jobFinished(); + status.fail(statusMessage); return; } try { post.close(); } catch (IOException e) { - LOG.error("Error finishing connection to server {}", url); + String message = String.format("Error finishing connection to server %s", url); + LOG.error(message); e.printStackTrace(); - - synchronized (status) { - status.error = true; - status.message = "app.deployment.error.net"; - status.completed = true; - } - jobFinished(); + status.fail(message); return; } @@ -339,73 +257,80 @@ public void progressChanged(ProgressEvent progressEvent) { input.close(); } catch (IOException e) { // do nothing + LOG.warn("Could not close input stream for deployment file."); } - synchronized (status) { - status.uploading = false; - } + status.uploading = false; // wait for the server to build the graph // TODO: timeouts? try { - if (conn.getResponseCode() != HttpURLConnection.HTTP_CREATED) { - LOG.error("Got response code {} from server", conn.getResponseCode()); - synchronized (status) { - status.error = true; - status.message = "app.deployment.error.graph_build_failed"; - status.completed = true; + int code = conn.getResponseCode(); + if (code != HttpURLConnection.HTTP_CREATED) { + // Get input/error stream from connection response. + InputStream stream = code < HttpURLConnection.HTTP_BAD_REQUEST + ? conn.getInputStream() + : conn.getErrorStream(); + String response; + try (Scanner scanner = new Scanner(stream)) { + scanner.useDelimiter("\\Z"); + response = scanner.next(); } - - // no reason to take out more servers, it's going to have the same result - jobFinished(); + statusMessage = String.format("Got response code %d from server due to %s", code, response); + LOG.error(statusMessage); + status.fail(statusMessage); + // Skip deploying to any other servers. + // There is no reason to take out the rest of the servers, it's going to have the same result. return; } } catch (IOException e) { - LOG.error("Could not finish request to server {}", url); - - synchronized (status) { - status.completed = true; - status.error = true; - status.message = "app.deployment.error.net"; - } + statusMessage = String.format("Could not finish request to server %s", url); + LOG.error(statusMessage); + status.fail(statusMessage); } - synchronized (status) { - status.numServersCompleted++; - tasksCompleted++; - status.percentComplete = 100.0 * (double) tasksCompleted / totalTasks; - } - } - - synchronized (status) { - status.completed = true; - status.baseUrl = this.publicUrl; + status.numServersCompleted++; + tasksCompleted++; + status.percentComplete = 100.0 * (double) tasksCompleted / totalTasks; } - jobFinished(); + status.completed = true; + status.baseUrl = otpServer.publicUrl; + } - temp.deleteOnExit(); + @Override + public void jobFinished () { + // Delete temp file containing OTP deployment (OSM extract and GTFS files) so that the server's disk storage + // does not fill up. + boolean deleted = deploymentTempFile.delete(); + if (!deleted) { + LOG.error("Deployment {} not deleted! Disk space in danger of filling up.", deployment.id); + } + String message; + if (!status.error) { + // Update status with successful completion state only if no error was encountered. + status.update(false, "Deployment complete!", 100, true); + // Store the target server in the deployedTo field. + LOG.info("Updating deployment target to {} id={}", otpServer.target(), deployment.id); + Persistence.deployments.updateField(deployment.id, "deployedTo", otpServer.target()); + // Update last deployed field. + Persistence.deployments.updateField(deployment.id, "lastDeployed", new Date()); + message = String.format("Deployment %s successfully deployed to %s", deployment.name, otpServer.publicUrl); + } else { + message = String.format("WARNING: Deployment %s failed to deploy to %s", deployment.name, otpServer.publicUrl); + } + // Send notification to those subscribed to updates for the deployment. + NotifyUsersForSubscriptionJob.createNotification("deployment-updated", deployment.id, message); } /** * Represents the current status of this job. */ public static class DeployStatus extends Status { -// /** What error message (defined in messages.) should be displayed to the user? */ -// public String message; -// -// /** Is this deployment completed (successfully or unsuccessfully) */ -// public boolean completed; - -// /** Was there an error? */ -// public boolean error; - + private static final long serialVersionUID = 1L; /** Did the manager build the bundle successfully */ public boolean built; -// /** Is the bundle currently being uploaded to the server? */ -// public boolean uploading; - /** Is the bundle currently being uploaded to an S3 bucket? */ public boolean uploadingS3; @@ -421,20 +346,5 @@ public static class DeployStatus extends Status { /** Where can the user see the result? */ public String baseUrl; - public DeployStatus clone () { - DeployStatus ret = new DeployStatus(); - ret.message = message; - ret.completed = completed; - ret.error = error; - ret.built = built; - ret.uploading = uploading; - ret.uploadingS3 = uploadingS3; - ret.percentComplete = percentComplete; - ret.percentUploaded = percentUploaded; - ret.numServersCompleted = numServersCompleted; - ret.totalServers = totalServers; - ret.baseUrl = baseUrl; - return ret; - } } } diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/FeedUpdater.java b/src/main/java/com/conveyal/datatools/manager/jobs/FeedUpdater.java index d98b68ce1..9cd6a52d7 100644 --- a/src/main/java/com/conveyal/datatools/manager/jobs/FeedUpdater.java +++ b/src/main/java/com/conveyal/datatools/manager/jobs/FeedUpdater.java @@ -1,64 +1,171 @@ package com.conveyal.datatools.manager.jobs; -import com.amazonaws.services.s3.AmazonS3Client; +import com.amazonaws.services.s3.model.ObjectListing; +import com.amazonaws.services.s3.model.S3Object; +import com.amazonaws.services.s3.model.S3ObjectSummary; import com.conveyal.datatools.manager.DataManager; -import com.conveyal.datatools.manager.controllers.api.GtfsApiController; +import java.io.File; +import java.io.FileOutputStream; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Timer; -import java.util.TimerTask; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import com.conveyal.datatools.manager.models.ExternalFeedSourceProperty; +import com.conveyal.datatools.manager.models.FeedSource; +import com.conveyal.datatools.manager.models.FeedVersion; +import com.conveyal.datatools.manager.persistence.FeedStore; +import com.conveyal.datatools.manager.persistence.Persistence; +import com.conveyal.datatools.manager.utils.HashUtils; +import com.google.common.io.ByteStreams; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static com.conveyal.datatools.manager.extensions.mtc.MtcFeedResource.AGENCY_ID; +import static com.mongodb.client.model.Filters.and; +import static com.mongodb.client.model.Filters.eq; + /** - * Created by landon on 3/24/16. + * This class is used to schedule an {@link UpdateFeedsTask}, which will check the specified S3 bucket (and prefix) for + * new files. If a new feed is found, the feed will be downloaded and its MD5 hash will be checked against the feed + * versions for the related feed source. When it finds a match, it will ensure that the {@link FeedSource#publishedVersionId} + * matches the {@link FeedVersion#namespace} for the version/file found on S3 and will update it if not. + * + * This is all done to ensure that the "published" version in MTC's RTD database matches the published version in Data + * Tools, which is primarily used to ensure that any alerts are built using GTFS stop or route IDs from the active GTFS + * feed. */ public class FeedUpdater { - public Map eTags; - private static Timer timer; - private static AmazonS3Client s3; - - public static final Logger LOG = LoggerFactory.getLogger(FeedUpdater.class); - - public FeedUpdater(Map eTagMap, int delay, int seconds) { - this.eTags = eTagMap; - DataManager.scheduler.scheduleAtFixedRate(new UpdateFeedsTask(), delay, seconds, TimeUnit.SECONDS); - } + private Map eTagForFeed; + private final String feedBucket; + private final String bucketFolder; + private static final Logger LOG = LoggerFactory.getLogger(FeedUpdater.class); - public void addFeedETag(String id, String eTag){ - this.eTags.put(id, eTag); + private FeedUpdater(int updateFrequencySeconds, String feedBucket, String bucketFolder) { + LOG.info("Setting feed update to check every {} seconds", updateFrequencySeconds); + DataManager.scheduler.scheduleAtFixedRate(new UpdateFeedsTask(), 0, updateFrequencySeconds, TimeUnit.SECONDS); + this.feedBucket = feedBucket; + this.bucketFolder = bucketFolder; } - public void addFeedETags(Map eTagList){ - this.eTags.putAll(eTagList); + /** + * Create a {@link FeedUpdater} to poll the provided S3 bucket/prefix at the specified interval (in seconds) for + * updated files. The updater's task is run using the {@link DataManager#scheduler}. + * @param updateFrequencySeconds + * @param s3Bucket + * @param s3Prefix + * @return + */ + public static FeedUpdater schedule(int updateFrequencySeconds, String s3Bucket, String s3Prefix) { + return new FeedUpdater(updateFrequencySeconds, s3Bucket, s3Prefix); } - public void cancel(){ - this.timer.cancel(); //Terminate the timer thread - } - - class UpdateFeedsTask implements Runnable { public void run() { - LOG.info("Fetching feeds..."); - LOG.info("Current eTag list " + eTags.toString()); - Map updatedTags = GtfsApiController.registerS3Feeds(eTags, GtfsApiController.feedBucket, GtfsApiController.bucketFolder); - Boolean feedsUpdated = updatedTags.isEmpty() ? false : true; - addFeedETags(updatedTags); - if (!feedsUpdated) { - LOG.info("No feeds updated..."); + Map updatedTags; + try { + LOG.debug("Checking MTC feeds for newly processed versions"); + updatedTags = checkForUpdatedFeeds(); + eTagForFeed.putAll(updatedTags); + if (!updatedTags.isEmpty()) LOG.info("New eTag list: {}", eTagForFeed); + else LOG.debug("No feeds updated (eTags on S3 match current list)."); + } catch (Exception e) { + LOG.error("Error updating feeds {}", e); } - else { - LOG.info("New eTag list " + eTags); - } - // TODO: compare current list of eTags against list in completed folder + } + } - // TODO: load feeds for any feeds with new eTags -// ApiMain.loadFeedFromBucket() + /** + * Check for any updated feeds that have been published to the S3 bucket. This tracks eTagForFeed (AWS file hash) of s3 + * objects in order to keep data-tools application in sync with external processes (for example, MTC RTD). + * @return map of feedIDs to eTag values + */ + private Map checkForUpdatedFeeds() { + if (eTagForFeed == null) { + // If running the check for the first time, instantiate the eTag map. + LOG.info("Running initial check for feeds on S3."); + eTagForFeed = new HashMap<>(); + } + Map newTags = new HashMap<>(); + // iterate over feeds in download_prefix folder and register to gtfsApi (MTC project) + ObjectListing gtfsList = FeedStore.s3Client.listObjects(feedBucket, bucketFolder); + for (S3ObjectSummary objSummary : gtfsList.getObjectSummaries()) { + + String eTag = objSummary.getETag(); + if (!eTagForFeed.containsValue(eTag)) { + String keyName = objSummary.getKey(); + // Don't add object if it is a dir + if (keyName.equals(bucketFolder)) { + continue; + } + String filename = keyName.split("/")[1]; + String feedId = filename.replace(".zip", ""); + // Skip object if the filename is null + if ("null".equals(feedId)) continue; + try { + LOG.warn("New version found for at {}/{}. ETag = {}. Downloading from s3", feedBucket, keyName, eTag); + S3Object object = FeedStore.s3Client.getObject(feedBucket, keyName); + InputStream in = object.getObjectContent(); + File file = new File(FeedStore.basePath, filename); + OutputStream out = new FileOutputStream(file); + ByteStreams.copy(in, out); + String md5 = HashUtils.hashFile(file); + FeedSource feedSource = null; + List properties = Persistence.externalFeedSourceProperties.getFiltered(and(eq("value", feedId), eq("name", AGENCY_ID))); + if (properties.size() > 1) { + StringBuilder b = new StringBuilder(); + properties.forEach(b::append); + LOG.warn("Found multiple feed sources for feedId {}: {}", + feedId, + properties.stream().map(p -> p.feedSourceId).collect(Collectors.joining(","))); + } + for (ExternalFeedSourceProperty prop : properties) { + // FIXME: What if there are multiple props found for different feed sources. This could happen if + // multiple projects have been synced with MTC or if the ExternalFeedSourceProperty for a feed + // source is not deleted properly when the feed source is deleted. + feedSource = Persistence.feedSources.getById(prop.feedSourceId); + } + if (feedSource == null) { + LOG.error("No feed source found for feed ID {}", feedId); + continue; + } + Collection versions = feedSource.retrieveFeedVersions(); + LOG.info("Searching for md5 {} across {} versions for {} ({})", md5, versions.size(), feedSource.name, feedSource.id); + boolean foundMatchingVersion = false; + int count = 0; + for (FeedVersion feedVersion : versions) { + LOG.info("version {} md5: {}", count++, feedVersion.hash); + if (feedVersion.hash.equals(md5)) { + foundMatchingVersion = true; + LOG.info("Found local version that matches latest file on S3 (SQL namespace={})", feedVersion.namespace); + if (!feedVersion.namespace.equals(feedSource.publishedVersionId)) { + LOG.info("Updating published version for feed {} to latest s3 published feed.", feedId); + Persistence.feedSources.updateField(feedSource.id, "publishedVersionId", feedVersion.namespace); + Persistence.feedVersions.updateField(feedVersion.id, "processing", false); + } else { + LOG.info("No need to update published version (published s3 feed already matches feed source's published namespace)."); + } + } + } + if (!foundMatchingVersion) { + LOG.error("Did not find version for feed {} that matched eTag found in s3!!!", feedId); + } + } catch (Exception e) { + LOG.warn("Could not load feed " + keyName, e); + } finally { + // Add new tag to map used for tracking updates. NOTE: this is in a finally block because we still + // need to track the eTags even for feed sources that were not found. Otherwise, the feeds will be + // re-downloaded each time the update task is run, which could cause many unnecessary S3 operations. + newTags.put(feedId, eTag); + } + } } + return newTags; } } diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/FetchProjectFeedsJob.java b/src/main/java/com/conveyal/datatools/manager/jobs/FetchProjectFeedsJob.java index 6512ae335..394532d26 100644 --- a/src/main/java/com/conveyal/datatools/manager/jobs/FetchProjectFeedsJob.java +++ b/src/main/java/com/conveyal/datatools/manager/jobs/FetchProjectFeedsJob.java @@ -1,15 +1,17 @@ package com.conveyal.datatools.manager.jobs; import com.conveyal.datatools.common.status.MonitorableJob; +import com.conveyal.datatools.manager.DataManager; import com.conveyal.datatools.manager.models.FeedSource; import com.conveyal.datatools.manager.models.FeedVersion; import com.conveyal.datatools.manager.models.Project; +import com.conveyal.datatools.manager.persistence.Persistence; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.time.LocalDateTime; import java.time.ZoneId; import java.time.ZonedDateTime; +import java.util.Collection; import java.util.HashMap; import java.util.Map; @@ -18,46 +20,36 @@ */ public class FetchProjectFeedsJob extends MonitorableJob { public static final Logger LOG = LoggerFactory.getLogger(FetchProjectFeedsJob.class); - private Project proj; - public Map result; - private Status status; + public String projectId; - public FetchProjectFeedsJob (Project proj, String owner) { - super(owner, "Fetching feeds for " + proj.name + " project.", JobType.FETCH_PROJECT_FEEDS); - this.proj = proj; - this.status = new Status(); + public FetchProjectFeedsJob (Project project, String owner) { + super(owner, "Fetching feeds for " + project.name + " project.", JobType.FETCH_PROJECT_FEEDS); + this.projectId = project.id; } @Override - public void run() { - LOG.info("Fetch job running for {} project at {}", proj.name, ZonedDateTime.now(ZoneId.of("America/New_York"))); - result = new HashMap<>(); - - for(FeedSource feedSource : proj.getProjectFeedSources()) { - - if (!FeedSource.FeedRetrievalMethod.FETCHED_AUTOMATICALLY.equals(feedSource.retrievalMethod)) - continue; -// LOG.info(); - FetchSingleFeedJob fetchSingleFeedJob = new FetchSingleFeedJob(feedSource, owner); - - new Thread(fetchSingleFeedJob).start(); + public void jobLogic() { + Project project = Persistence.projects.getById(projectId); + if (project == null) { + LOG.error("Fetch feeds job failed because project {} does not exist in database. Clearing the project's scheduled fetch jobs."); + DataManager.autoFetchMap.remove(projectId); + return; } - jobFinished(); - } - - @Override - public Status getStatus() { - synchronized (status) { - return status.clone(); + LOG.info("Fetch job running for {} project at {}", project.name, ZonedDateTime.now(ZoneId.of("America/New_York"))); + Collection projectFeeds = project.retrieveProjectFeedSources(); + for(FeedSource feedSource : projectFeeds) { + // skip feed if not fetched automatically + if (!FeedSource.FeedRetrievalMethod.FETCHED_AUTOMATICALLY.equals(feedSource.retrievalMethod)) { + continue; + } + // No need to track overall status on this FetchProjectFeedsJob. All "child" jobs execute in threadpool, + // so we don't know their status. + FetchSingleFeedJob fetchSingleFeedJob = new FetchSingleFeedJob(feedSource, owner, true); + // Run this in a heavy executor with continueThread = true, so that fetch/process jobs for each + // feed source execute in order (i.e., fetch feed source A, then process; next, fetch feed source b, then + // process). + DataManager.heavyExecutor.execute(fetchSingleFeedJob); } } - @Override - public void handleStatusEvent(Map statusMap) { - synchronized (status) { - status.message = (String) statusMap.get("message"); - status.percentComplete = (double) statusMap.get("percentComplete"); - status.error = (boolean) statusMap.get("error"); - } - } } \ No newline at end of file diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/FetchSingleFeedJob.java b/src/main/java/com/conveyal/datatools/manager/jobs/FetchSingleFeedJob.java index ffad71200..a27427edc 100644 --- a/src/main/java/com/conveyal/datatools/manager/jobs/FetchSingleFeedJob.java +++ b/src/main/java/com/conveyal/datatools/manager/jobs/FetchSingleFeedJob.java @@ -4,54 +4,69 @@ import com.conveyal.datatools.manager.DataManager; import com.conveyal.datatools.manager.models.FeedSource; import com.conveyal.datatools.manager.models.FeedVersion; - -import java.util.Map; +import com.fasterxml.jackson.annotation.JsonProperty; public class FetchSingleFeedJob extends MonitorableJob { private FeedSource feedSource; - public FeedVersion result; - private Status status; + private FeedVersion result; + private boolean continueThread; - public FetchSingleFeedJob (FeedSource feedSource, String owner) { + /** + * Fetch a single feed source by URL + * @param feedSource feed source to be fetched + * @param owner user who owns job + */ + public FetchSingleFeedJob (FeedSource feedSource, String owner, boolean continueThread) { super(owner, "Fetching feed for " + feedSource.name, JobType.FETCH_SINGLE_FEED); this.feedSource = feedSource; this.result = null; - this.status = new Status(); + this.continueThread = continueThread; status.message = "Fetching..."; status.percentComplete = 0.0; status.uploading = true; } - @Override - public void run() { - // TODO: fetch automatically vs. manually vs. in-house - try { - result = feedSource.fetch(eventBus, owner); - jobFinished(); - } catch (Exception e) { - jobFinished(); - // throw any halts that may have prevented this job from finishing - throw e; - } - if (result != null) { - new ProcessSingleFeedJob(result, this.owner).run(); - } + /** + * Getter that allows a client to know the ID of the feed version that will be created as soon as the upload is + * initiated; however, we will not store the FeedVersion in the mongo application database until the upload and + * processing is completed. This prevents clients from manipulating GTFS data before it is entirely imported. + */ + @JsonProperty + public String getFeedVersionId () { + // Feed version result is null unless (and until) fetch is successful. + return result != null ? result.id : null; } - @Override - public Status getStatus() { - synchronized (status) { - return status.clone(); - } + @JsonProperty + public String getFeedSourceId () { + // Feed version result is null unless (and until) fetch is successful. + return result != null ? result.parentFeedSource().id : null; } @Override - public void handleStatusEvent(Map statusMap) { - synchronized (status) { - status.message = (String) statusMap.get("message"); - status.percentComplete = (double) statusMap.get("percentComplete"); - status.error = (boolean) statusMap.get("error"); + public void jobLogic () { + // TODO: fetch automatically vs. manually vs. in-house + result = feedSource.fetch(status); + + // Null result indicates that a fetch was not needed (GTFS has not been modified) + // True failures will throw exceptions. + if (result != null) { + // FetchSingleFeedJob should typically be run in a lightExecutor because it is a fairly lightweight task. + // ProcessSingleFeedJob often follows a fetch and requires significant time to complete, + // so FetchSingleFeedJob ought to be run in the heavyExecutor. Technically, the "fetch" completes + // quickly and the "processing" happens over time. So, we run the processing in a separate thread in order + // to match this user and system expectation. + // + // The exception (continueThread = true) is provided for FetchProjectFeedsJob, when we want the feeds to + // fetch and then process in sequence. + ProcessSingleFeedJob processSingleFeedJob = new ProcessSingleFeedJob(result, this.owner, true); + if (continueThread) { + addNextJob(processSingleFeedJob); + } else { + DataManager.heavyExecutor.execute(processSingleFeedJob); + } } } + } diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/LoadFeedJob.java b/src/main/java/com/conveyal/datatools/manager/jobs/LoadFeedJob.java new file mode 100644 index 000000000..8ae3e7e03 --- /dev/null +++ b/src/main/java/com/conveyal/datatools/manager/jobs/LoadFeedJob.java @@ -0,0 +1,52 @@ +package com.conveyal.datatools.manager.jobs; + +import com.conveyal.datatools.common.status.MonitorableJob; +import com.conveyal.datatools.manager.models.FeedVersion; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Map; + +/** + * Performs the load GTFS into SQL task for a given feed version. If feed version is not new (and using S3 for storage), + * the load step will skip uploading the feed to S3 to avoid overwriting the existing files there (this shouldn't be + * harmful, but it is a waste of time/bandwidth and will overwrite the timestamp on the file which could cause confusion). + */ +public class LoadFeedJob extends MonitorableJob { + public static final Logger LOG = LoggerFactory.getLogger(LoadFeedJob.class); + + private FeedVersion feedVersion; + private final boolean isNewVersion; + + public LoadFeedJob(FeedVersion version, String owner, boolean isNewVersion) { + super(owner, "Loading GTFS", JobType.LOAD_FEED); + feedVersion = version; + this.isNewVersion = isNewVersion; + status.update(false, "Waiting to load feed...", 0); + } + + /** + * Getter that allows a client to know the ID of the feed version that will be created as soon as the upload is + * initiated; however, we will not store the FeedVersion in the mongo application database until the upload and + * processing is completed. This prevents clients from manipulating GTFS data before it is entirely imported. + */ + @JsonProperty + public String getFeedVersionId () { + return feedVersion.id; + } + + @Override + public void jobLogic () { + LOG.info("Running LoadFeedJob for {}", feedVersion.id); + feedVersion.load(status, isNewVersion); + } + + @Override + public void jobFinished () { + if (!status.error) { + status.update(false, "Load stage complete!", 100, true); + } + } + +} diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/LoadGtfsApiFeedJob.java b/src/main/java/com/conveyal/datatools/manager/jobs/LoadGtfsApiFeedJob.java deleted file mode 100644 index da5718e7c..000000000 --- a/src/main/java/com/conveyal/datatools/manager/jobs/LoadGtfsApiFeedJob.java +++ /dev/null @@ -1,33 +0,0 @@ -package com.conveyal.datatools.manager.jobs; - -import com.conveyal.datatools.manager.controllers.api.GtfsApiController; -import com.conveyal.datatools.manager.models.FeedSource; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.util.logging.Logger; - -/** - * Created by landon on 4/30/16. - */ -public class LoadGtfsApiFeedJob implements Runnable { - public static final org.slf4j.Logger LOG = LoggerFactory.getLogger(LoadGtfsApiFeedJob.class); - - public static FeedSource feedSource; - - public LoadGtfsApiFeedJob(FeedSource feedSource) { - this.feedSource = feedSource; - } - - @Override - public void run() { - File latest = feedSource.getLatest() != null ? feedSource.getLatest().getGtfsFile() : null; - if (latest != null) - try { - LOG.info("Loading feed into GTFS api: " + feedSource.id); - GtfsApiController.gtfsApi.registerFeedSource(feedSource.id, latest); - } catch (Exception e) { - e.printStackTrace(); - } - } -} diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/MakePublicJob.java b/src/main/java/com/conveyal/datatools/manager/jobs/MakePublicJob.java index b11742973..b7bbfbe01 100644 --- a/src/main/java/com/conveyal/datatools/manager/jobs/MakePublicJob.java +++ b/src/main/java/com/conveyal/datatools/manager/jobs/MakePublicJob.java @@ -16,32 +16,21 @@ import java.util.Map; /** - * Created by landon on 1/31/17. + * TODO: JAVADOC and RENAME: this seems to be a single purpose run() method but it's just called "public job". + * */ public class MakePublicJob extends MonitorableJob { public Project project; - public Status status; private static final Logger LOG = LoggerFactory.getLogger(MakePublicJob.class); public MakePublicJob(Project project, String owner) { super(owner, "Generating public html for " + project.name, JobType.MAKE_PROJECT_PUBLIC); this.project = project; - status = new Status(); - status.message = "Waiting to begin validation..."; - status.percentComplete = 0; - } - @Override - public Status getStatus() { - return null; - } - - @Override - public void handleStatusEvent(Map statusMap) { - + status.update(false, "Waiting to begin validation...", 0); } @Override - public void run() { + public void jobLogic () { LOG.info("Generating new html for public feeds"); String output; String title = "Public Feeds"; @@ -60,8 +49,8 @@ public void run() { r.append("

" + title + "

\n"); r.append("The following feeds, in GTFS format, are available for download and use.\n"); r.append("
    \n"); - project.getProjectFeedSources().stream() - .filter(fs -> fs.isPublic && fs.getLatest() != null) + project.retrieveProjectFeedSources().stream() + .filter(fs -> fs.isPublic && fs.retrieveLatest() != null) .forEach(fs -> { // generate list item for feed source String url; @@ -71,9 +60,9 @@ public void run() { else { // ensure latest feed is written to the s3 public folder fs.makePublic(); - url = String.join("/", "https://s3.amazonaws.com", DataManager.feedBucket, fs.getPublicKey()); + url = String.join("/", "https://s3.amazonaws.com", DataManager.feedBucket, fs.toPublicKey()); } - FeedVersion latest = fs.getLatest(); + FeedVersion latest = fs.retrieveLatest(); r.append("
  • "); r.append(""); r.append(fs.name); @@ -82,8 +71,8 @@ public void run() { if (fs.url != null && fs.lastFetched != null) { r.append("last checked: " + new SimpleDateFormat("dd MMM yyyy").format(fs.lastFetched) + ", "); } - if (fs.getLastUpdated() != null) { - r.append("last updated: " + new SimpleDateFormat("dd MMM yyyy").format(fs.getLastUpdated()) + ")"); + if (fs.lastUpdated() != null) { + r.append("last updated: " + new SimpleDateFormat("dd MMM yyyy").format(fs.lastUpdated()) + ")"); } r.append("
  • "); }); @@ -93,7 +82,7 @@ public void run() { output = r.toString(); String fileName = "index.html"; String folder = "public/"; - File file = new File(FileUtils.getTempDirectory() + fileName); + File file = new File(String.join("/", FileUtils.getTempDirectory().getAbsolutePath(), fileName)); file.deleteOnExit(); try { FileUtils.writeStringToFile(file, output); @@ -104,7 +93,6 @@ public void run() { FeedStore.s3Client.putObject(DataManager.feedBucket, folder + fileName, file); FeedStore.s3Client.setObjectAcl(DataManager.feedBucket, folder + fileName, CannedAccessControlList.PublicRead); - jobFinished(); LOG.info("Public page updated on s3"); } } diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/MergeProjectFeedsJob.java b/src/main/java/com/conveyal/datatools/manager/jobs/MergeProjectFeedsJob.java new file mode 100644 index 000000000..4710f8567 --- /dev/null +++ b/src/main/java/com/conveyal/datatools/manager/jobs/MergeProjectFeedsJob.java @@ -0,0 +1,259 @@ +package com.conveyal.datatools.manager.jobs; + +import com.conveyal.datatools.common.status.MonitorableJob; +import com.conveyal.datatools.common.utils.Consts; +import com.conveyal.datatools.manager.DataManager; +import com.conveyal.datatools.manager.models.FeedSource; +import com.conveyal.datatools.manager.models.FeedVersion; +import com.conveyal.datatools.manager.models.Project; +import com.conveyal.datatools.manager.persistence.FeedStore; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ArrayNode; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedReader; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.zip.ZipEntry; +import java.util.zip.ZipFile; +import java.util.zip.ZipOutputStream; + +/** + * Created by landon on 9/19/17. + */ +public class MergeProjectFeedsJob extends MonitorableJob { + + private static final Logger LOG = LoggerFactory.getLogger(MergeProjectFeedsJob.class); + public final Project project; + + public MergeProjectFeedsJob(Project project, String owner) { + super(owner, "Merging project feeds for " + project.name, JobType.MERGE_PROJECT_FEEDS); + this.project = project; + status.message = "Merging feeds..."; + } + + @Override + public void jobLogic () throws IOException { + // get feed sources in project + Collection feeds = project.retrieveProjectFeedSources(); + + // create temp merged zip file to add feed content to + File mergedFile = null; + try { + mergedFile = File.createTempFile(project.id + "-merged", ".zip"); + mergedFile.deleteOnExit(); + } catch (IOException e) { + LOG.error("Could not create temp file"); + e.printStackTrace(); + throw e; + } + + // create the zipfile + ZipOutputStream out = new ZipOutputStream(new FileOutputStream(mergedFile)); + + LOG.info("Created project merge file: " + mergedFile.getAbsolutePath()); + + // map of feed versions to table entries contained within version's GTFS + Map feedSourceMap = new HashMap<>(); + + // collect zipFiles for each feedSource before merging tables + for (FeedSource fs : feeds) { + // check if feed source has version (use latest) + FeedVersion version = fs.retrieveLatest(); + if (version == null) { + LOG.info("Skipping {} because it has no feed versions", fs.name); + continue; + } + // modify feed version to use prepended feed id + LOG.info("Adding {} feed to merged zip", fs.name); + try { + File file = version.retrieveGtfsFile(); + if (file == null) { + LOG.error("No file exists for {}", version.id); + continue; + } + ZipFile zipFile = new ZipFile(file); + feedSourceMap.put(fs, zipFile); + } catch(Exception e) { + e.printStackTrace(); + LOG.error("Zipfile for version {} not found", version.id); + } + } + + // loop through GTFS tables + int numberOfTables = DataManager.gtfsConfig.size(); + for(int i = 0; i < numberOfTables; i++) { + JsonNode tableNode = DataManager.gtfsConfig.get(i); + byte[] tableOut = mergeTables(tableNode, feedSourceMap); + + // if at least one feed has the table, include it + if (tableOut != null) { + + String tableName = tableNode.get("name").asText(); + synchronized (status) { + status.message = "Merging " + tableName; + status.percentComplete = Math.round((double) i / numberOfTables * 10000d) / 100d; + } + // create entry for zip file + ZipEntry tableEntry = new ZipEntry(tableName); + try { + out.putNextEntry(tableEntry); + LOG.info("Writing {} to merged feed", tableName); + out.write(tableOut); + out.closeEntry(); + } catch (IOException e) { + LOG.error("Error writing to table {}", tableName); + e.printStackTrace(); + } + } + } + try { + out.close(); + } catch (IOException e) { + LOG.error("Error closing zip file"); + e.printStackTrace(); + } + synchronized (status) { + status.message = "Saving merged feed."; + status.percentComplete = 95.0; + } + // Store the project merged zip locally or on s3 + if (DataManager.useS3) { + String s3Key = "project/" + project.id + ".zip"; + FeedStore.s3Client.putObject(DataManager.feedBucket, s3Key, mergedFile); + LOG.info("Storing merged project feed at s3://{}/{}", DataManager.feedBucket, s3Key); + } else { + try { + FeedVersion.feedStore.newFeed(project.id + ".zip", new FileInputStream(mergedFile), null); + } catch (FileNotFoundException e) { + e.printStackTrace(); + LOG.error("Could not store feed for project {}", project.id); + } + } + // delete temp file + mergedFile.delete(); + + synchronized (status) { + status.message = "Merged feed created successfully."; + status.completed = true; + status.percentComplete = 100.0; + } + } + + /** + * Merge the specified table for multiple GTFS feeds. + * @param tableNode tableNode to merge + * @param feedSourceMap map of feedSources to zipFiles from which to extract the .txt tables + * @return single merged table for feeds + */ + private static byte[] mergeTables(JsonNode tableNode, Map feedSourceMap) throws IOException { + + String tableName = tableNode.get("name").asText(); + ByteArrayOutputStream tableOut = new ByteArrayOutputStream(); + + ArrayNode fieldsNode = (ArrayNode) tableNode.get("fields"); + List headers = new ArrayList<>(); + for (int i = 0; i < fieldsNode.size(); i++) { + JsonNode fieldNode = fieldsNode.get(i); + String fieldName = fieldNode.get("name").asText(); + Boolean notInSpec = fieldNode.has("datatools") && fieldNode.get("datatools").asBoolean(); + if (notInSpec) { + fieldsNode.remove(i); + } + headers.add(fieldName); + } + + try { + // write headers to table + tableOut.write(String.join(",", headers).getBytes()); + tableOut.write("\n".getBytes()); + + // iterate over feed source to zipfile map + for ( Map.Entry mapEntry : feedSourceMap.entrySet()) { + FeedSource fs = mapEntry.getKey(); + ZipFile zipFile = mapEntry.getValue(); + final Enumeration entries = zipFile.entries(); + while (entries.hasMoreElements()) { + final ZipEntry entry = entries.nextElement(); + if(tableName.equals(entry.getName())) { + LOG.info("Adding {} table for {}", entry.getName(), fs.name); + + InputStream inputStream = zipFile.getInputStream(entry); + + BufferedReader in = new BufferedReader(new InputStreamReader(inputStream)); + String line = in.readLine(); + String[] fields = line.split(","); + + List fieldList = Arrays.asList(fields); + + + // iterate over rows in table + while((line = in.readLine()) != null) { + String[] newValues = new String[fieldsNode.size()]; + String[] values = line.split(Consts.COLUMN_SPLIT, -1); + if (values.length == 1) { + LOG.warn("Found blank line. Skipping..."); + continue; + } + for(int v = 0; v < fieldsNode.size(); v++) { + JsonNode fieldNode = fieldsNode.get(v); + String fieldName = fieldNode.get("name").asText(); + + // get index of field from GTFS spec as it appears in feed + int index = fieldList.indexOf(fieldName); + String val = ""; + try { + index = fieldList.indexOf(fieldName); + if(index != -1) { + val = values[index]; + } + } catch (ArrayIndexOutOfBoundsException e) { + LOG.warn("Index {} out of bounds for file {} and feed {}", index, entry.getName(), fs.name); + continue; + } + + String fieldType = fieldNode.get("inputType").asText(); + + // if field is a gtfs identifier, prepend with feed id/name + if (fieldType.contains("GTFS") && !val.isEmpty()) { + newValues[v] = fs.name + ":" + val; + } + else { + newValues[v] = val; + } + } + String newLine = String.join(",", newValues); + + // write line to table (plus new line char) + tableOut.write(newLine.getBytes()); + tableOut.write("\n".getBytes()); + } + } + } + } + } catch (IOException e) { + e.printStackTrace(); + LOG.error( + "Error merging feed sources: {}", + feedSourceMap.keySet().stream().map(fs -> fs.name).collect(Collectors.toList()).toString() + ); + throw e; + } + return tableOut.toByteArray(); + } +} diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/NotifyUsersForSubscriptionJob.java b/src/main/java/com/conveyal/datatools/manager/jobs/NotifyUsersForSubscriptionJob.java index 553c36725..35bc01eca 100644 --- a/src/main/java/com/conveyal/datatools/manager/jobs/NotifyUsersForSubscriptionJob.java +++ b/src/main/java/com/conveyal/datatools/manager/jobs/NotifyUsersForSubscriptionJob.java @@ -1,8 +1,10 @@ package com.conveyal.datatools.manager.jobs; import com.conveyal.datatools.manager.DataManager; +import com.conveyal.datatools.manager.models.Deployment; import com.conveyal.datatools.manager.models.FeedSource; import com.conveyal.datatools.manager.models.Project; +import com.conveyal.datatools.manager.persistence.Persistence; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import org.slf4j.Logger; @@ -17,66 +19,118 @@ * Created by landon on 6/6/16. */ public class NotifyUsersForSubscriptionJob implements Runnable { - private ObjectMapper mapper = new ObjectMapper(); + private static final ObjectMapper mapper = new ObjectMapper(); public static final Logger LOG = LoggerFactory.getLogger(NotifyUsersForSubscriptionJob.class); private String subscriptionType; private String target; private String message; + private static final String DEFAULT_NAME = "Data Tools"; + private static final String APPLICATION_NAME = DataManager.getConfigPropertyAsText("application.title"); + private static final String APPLICATION_URL = DataManager.getConfigPropertyAsText("application.public_url"); - public NotifyUsersForSubscriptionJob(String subscriptionType, String target, String message) { + private NotifyUsersForSubscriptionJob(String subscriptionType, String target, String message) { this.subscriptionType = subscriptionType; this.target = target; this.message = message; } + /** + * Convenience method to create and schedule a notification job to notify subscribed users. + */ + public static void createNotification(String subscriptionType, String target, String message) { + if (APPLICATION_URL == null || !(APPLICATION_URL.startsWith("https://") || APPLICATION_URL.startsWith("http://"))) { + LOG.error("application.public_url (value={}) property must be set to a valid URL in order to send notifications to users.", APPLICATION_URL); + return; + } + NotifyUsersForSubscriptionJob notifyJob = new NotifyUsersForSubscriptionJob(subscriptionType, target, message); + DataManager.lightExecutor.execute(notifyJob); + LOG.info("Notification job scheduled in light executor"); + } + @Override public void run() { notifyUsersForSubscription(); } // TODO: modify method so that it receives both a feed param and a updateFor param? - public void notifyUsersForSubscription() { - if (!DataManager.getConfigProperty("application.notifications_enabled").asBoolean()) { + private void notifyUsersForSubscription() { + if (DataManager.hasConfigProperty("application.notifications_enabled") && !DataManager.getConfigProperty("application.notifications_enabled").asBoolean()) { return; } - String userString = getUsersBySubscription(this.subscriptionType, this.target); + LOG.info("Checking for subscribed users to notify type={} target={}", subscriptionType, target); + String userString = getUsersBySubscription(subscriptionType, target); JsonNode subscribedUsers = null; try { - subscribedUsers = this.mapper.readTree(userString); + subscribedUsers = mapper.readTree(userString); } catch (IOException e) { e.printStackTrace(); } + if (subscribedUsers == null) { + LOG.error("Subscribed users list for type={}, target={} is null. Skipping notification delivery.", subscriptionType, target); + return; + } for (JsonNode user : subscribedUsers) { if (!user.has("email")) { continue; } String email = user.get("email").asText(); Boolean emailVerified = user.get("email_verified").asBoolean(); - LOG.info("sending notification to {}", email); - // only send email if address has been verified - if (emailVerified) { + if (!emailVerified) { + LOG.warn("Skipping notification for user {}. User's email address has not been verified.", email); + } else { + LOG.info("Sending notification to {}", email); try { String subject; - String url; - String bodyAction; + String html = String.format("

    %s

    ", this.message); + String applicationName; + String subscriptionToString = this.subscriptionType.replace("-", " "); + if (APPLICATION_NAME == null) { + LOG.warn("Configuration property \"application.title\" must be set to customize notifications."); + applicationName = DEFAULT_NAME; + } else { + applicationName = APPLICATION_NAME; + } String[] subType = this.subscriptionType.split("-"); switch (subType[0]) { case "feed": - FeedSource fs = FeedSource.get(this.target); - subject = DataManager.getConfigPropertyAsText("application.title")+ " Notification: " + this.subscriptionType.replace("-", " ") + " (" + fs.name + ")"; - url = DataManager.getConfigPropertyAsText("application.public_url"); - bodyAction = "

    View this feed.

    "; - sendNotification(email, subject, "Body", "

    " + this.message + bodyAction); + FeedSource fs = Persistence.feedSources.getById(this.target); + // Format subject header + subject = String.format("%s Notification: %s (%s)", applicationName, subscriptionToString, fs.name); + // Add action text. + html += String.format("

    View this feed.

    ", APPLICATION_URL, fs.id); break; case "project": - Project p = Project.get(this.target); - subject = "Datatools Notification: " + this.subscriptionType.replace("-", " ") + " (" + p.name + ")"; - url = DataManager.getConfigPropertyAsText("application.public_url"); - bodyAction = "

    View this project.

    "; - sendNotification(email, subject, "Body", "

    " + this.message + bodyAction); + Project p = Persistence.projects.getById(this.target); + // Format subject header + subject = String.format("%s Notification: %s (%s)", applicationName, subscriptionToString, p.name); + // Add action text. + html += String.format("

    View this project.

    ", APPLICATION_URL, p.id); + break; + case "deployment": + Deployment deployment = Persistence.deployments.getById(this.target); + // Format subject header + subject = String.format( + "%s Notification: %s (%s)", + applicationName, + subscriptionToString, + deployment.name); + // Add action text. + html += String.format( + "

    View this deployment.

    ", + APPLICATION_URL, + deployment.projectId, + deployment.id); break; + default: + LOG.warn("Notifications not supported for subscription type {}", subType[0]); + return; } + // Add manage subscriptions blurb. + html += String.format( + "

    Manage subscriptions here.

    ", + APPLICATION_URL); + sendNotification(email, subject, "Body", html); } catch (Exception e) { e.printStackTrace(); } diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/ProcessSingleFeedJob.java b/src/main/java/com/conveyal/datatools/manager/jobs/ProcessSingleFeedJob.java index 549abc377..4c1a11182 100644 --- a/src/main/java/com/conveyal/datatools/manager/jobs/ProcessSingleFeedJob.java +++ b/src/main/java/com/conveyal/datatools/manager/jobs/ProcessSingleFeedJob.java @@ -1,52 +1,75 @@ package com.conveyal.datatools.manager.jobs; -import com.conveyal.datatools.editor.jobs.ProcessGtfsSnapshotMerge; -import com.conveyal.datatools.editor.models.Snapshot; -import com.conveyal.datatools.manager.DataManager; +import com.conveyal.datatools.common.status.MonitorableJob; import com.conveyal.datatools.manager.models.FeedVersion; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Process/validate a single GTFS feed + * Process/validate a single GTFS feed. This chains together multiple server jobs. Loading the feed and validating the + * feed are chained regardless. However, depending on which modules are enabled, other jobs may be + * included here if desired. * @author mattwigway * */ -public class ProcessSingleFeedJob implements Runnable { - FeedVersion feedVersion; +public class ProcessSingleFeedJob extends MonitorableJob { + private FeedVersion feedVersion; private String owner; + private final boolean isNewVersion; + private static final Logger LOG = LoggerFactory.getLogger(ProcessSingleFeedJob.class); /** * Create a job for the given feed version. - * @param feedVersion */ - public ProcessSingleFeedJob (FeedVersion feedVersion, String owner) { + public ProcessSingleFeedJob (FeedVersion feedVersion, String owner, boolean isNewVersion) { + super(owner, "Processing GTFS for " + (feedVersion.parentFeedSource() != null ? feedVersion.parentFeedSource().name : "unknown feed source"), JobType.PROCESS_FEED); this.feedVersion = feedVersion; this.owner = owner; -// this.status = new MonitorableJob.Status(); -// status.message = "Fetching..."; -// status.percentComplete = 0.0; -// status.uploading = true; + this.isNewVersion = isNewVersion; + status.update(false, "Processing...", 0); + status.uploading = true; } - public void run() { + /** + * Getter that allows a client to know the ID of the feed version that will be created as soon as the upload is + * initiated; however, we will not store the FeedVersion in the mongo application database until the upload and + * processing is completed. This prevents clients from manipulating GTFS data before it is entirely imported. + */ + @JsonProperty + public String getFeedVersionId () { + return feedVersion.id; + } - // set up the validation job to run first - ValidateFeedJob validateJob = new ValidateFeedJob(feedVersion, owner); + @JsonProperty + public String getFeedSourceId () { + return feedVersion.parentFeedSource().id; + } - // use this FeedVersion to seed Editor DB provided no snapshots for feed already exist - if(DataManager.isModuleEnabled("editor")) { - // chain snapshot-creation job if no snapshots currently exist for feed - if (Snapshot.getSnapshots(feedVersion.feedSourceId).size() == 0) { - ProcessGtfsSnapshotMerge processGtfsSnapshotMergeJob = new ProcessGtfsSnapshotMerge(feedVersion, owner); - validateJob.addNextJob(processGtfsSnapshotMergeJob); - } - } + @Override + public void jobLogic () { + LOG.info("Processing feed for {}", feedVersion.id); - // chain on a network builder job, if applicable - if(DataManager.isModuleEnabled("validator")) { - validateJob.addNextJob(new BuildTransportNetworkJob(feedVersion, owner)); - } + // First, load the feed into database. + addNextJob(new LoadFeedJob(feedVersion, owner, isNewVersion)); + + // Next, validate the feed. + addNextJob(new ValidateFeedJob(feedVersion, owner, isNewVersion)); - new Thread(validateJob).start(); + // TODO: Any other activities that need to be run (e.g., module-specific activities). + } + + @Override + public void jobFinished () { + if (!status.error) { + // Note: storing a new feed version in database is handled at completion of the ValidateFeedJob subtask. + status.update(false, "New version saved.", 100, true); + } else { + // Processing did not complete. Depending on which sub-task this occurred in, + // there may or may not have been a successful load/validation of the feed. + String errorReason = status.exceptionType != null ? String.format("error due to %s", status.exceptionType) : "unknown error"; + LOG.warn("Error processing version {} because of {}.", feedVersion.id, errorReason); + } } } diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/ReadTransportNetworkJob.java b/src/main/java/com/conveyal/datatools/manager/jobs/ReadTransportNetworkJob.java deleted file mode 100644 index 03d186a61..000000000 --- a/src/main/java/com/conveyal/datatools/manager/jobs/ReadTransportNetworkJob.java +++ /dev/null @@ -1,85 +0,0 @@ -package com.conveyal.datatools.manager.jobs; - -import com.conveyal.datatools.common.status.MonitorableJob; -import com.conveyal.datatools.manager.DataManager; -import com.conveyal.datatools.manager.models.FeedVersion; -import com.conveyal.r5.transit.TransportNetwork; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; -import java.util.Map; - -/** - * Created by landon on 10/11/16. - */ -public class ReadTransportNetworkJob extends MonitorableJob { - private static final Logger LOG = LoggerFactory.getLogger(ReadTransportNetworkJob.class); - public FeedVersion feedVersion; - public TransportNetwork result; - public Status status; - - public ReadTransportNetworkJob (FeedVersion feedVersion, String owner) { - super(owner, "Reading in Transport Network for " + feedVersion.getFeedSource().name, JobType.BUILD_TRANSPORT_NETWORK); - this.feedVersion = feedVersion; - this.result = null; - this.status = new Status(); - status.message = "Waiting to begin job..."; - } - - @Override - public void run() { - LOG.info("Reading network"); - File is; - is = feedVersion.getTransportNetworkPath(); - try { - feedVersion.transportNetwork = TransportNetwork.read(is); - // check to see if distance tables are built yet... should be removed once better caching strategy is implemeneted. - if (feedVersion.transportNetwork.transitLayer.stopToVertexDistanceTables == null) { - feedVersion.transportNetwork.transitLayer.buildDistanceTables(null); - } - } catch (Exception e) { - e.printStackTrace(); - } - synchronized (status) { - status.message = "Transport network read successfully!"; - status.percentComplete = 100; - status.completed = true; - } - jobFinished(); - } - - @Override - public Status getStatus() { - synchronized (status) { - return status.clone(); - } - } - - @Override - public void handleStatusEvent(Map statusMap) { - try { - synchronized (status) { - status.message = (String) statusMap.get("message"); - status.percentComplete = (double) statusMap.get("percentComplete"); - status.error = (boolean) statusMap.get("error"); - } - } catch (Exception e) { - e.printStackTrace(); - } - } - -// @Override -// public void handleStatusEvent(StatusEvent statusEvent) { -// synchronized (status) { -// status.message = statusEvent.message; -// status.percentComplete = statusEvent.percentComplete -// status.error = statusEvent.error; -// } -// } - -} diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/ValidateFeedJob.java b/src/main/java/com/conveyal/datatools/manager/jobs/ValidateFeedJob.java index b5921f4bc..6b3534e79 100644 --- a/src/main/java/com/conveyal/datatools/manager/jobs/ValidateFeedJob.java +++ b/src/main/java/com/conveyal/datatools/manager/jobs/ValidateFeedJob.java @@ -1,70 +1,74 @@ package com.conveyal.datatools.manager.jobs; import com.conveyal.datatools.common.status.MonitorableJob; -import com.conveyal.datatools.common.status.StatusEvent; -import com.conveyal.datatools.manager.models.FeedSource; import com.conveyal.datatools.manager.models.FeedVersion; -import com.google.common.eventbus.Subscribe; +import com.conveyal.datatools.manager.persistence.Persistence; +import com.fasterxml.jackson.annotation.JsonProperty; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Map; /** - * Created by demory on 6/16/16. + * This job handles the validation of a given feed version. If the version is not new, it will simply replace the + * existing version with the version object that has updated validation info. */ public class ValidateFeedJob extends MonitorableJob { public static final Logger LOG = LoggerFactory.getLogger(ValidateFeedJob.class); - public FeedVersion feedVersion; - public Status status; + private FeedVersion feedVersion; + private final boolean isNewVersion; - public ValidateFeedJob(FeedVersion version, String owner) { - super(owner, "Validating Feed for " + version.getFeedSource().name, JobType.VALIDATE_FEED); + public ValidateFeedJob(FeedVersion version, String owner, boolean isNewVersion) { + super(owner, "Validating Feed", JobType.VALIDATE_FEED); feedVersion = version; - status = new Status(); - status.message = "Waiting to begin validation..."; - status.percentComplete = 0; + this.isNewVersion = isNewVersion; + status.update(false, "Waiting to begin validation...", 0); } @Override - public Status getStatus() { - synchronized (status) { - return status.clone(); - } + public void jobLogic () { + LOG.info("Running ValidateFeedJob for {}", feedVersion.id); + feedVersion.validate(status); } @Override - public void run() { - LOG.info("Running ValidateFeedJob for {}", feedVersion.id); - synchronized (status) { - status.message = "Running validation..."; - status.percentComplete = 30; - } - feedVersion.setUserById(owner); - feedVersion.validate(eventBus); - feedVersion.save(); - if (!status.error) - synchronized (status) { - if (!status.error) { - status.message = "Validation complete!"; - status.percentComplete = 100; - status.completed = true; + public void jobFinished () { + if (!status.error) { + if (parentJobId != null && JobType.PROCESS_FEED.equals(parentJobType)) { + // Validate stage is happening as part of an overall process feed job. + // At this point all GTFS data has been loaded and validated, so we record + // the FeedVersion into mongo. + // This happens here because otherwise we would have to wait for other jobs, + // such as BuildTransportNetwork, to finish. If those subsequent jobs fail, + // the version won't get loaded into the database (even though it exists in postgres). + feedVersion.storeUser(owner); + if (isNewVersion) { + Persistence.feedVersions.create(feedVersion); + } else { + Persistence.feedVersions.replace(feedVersion.id, feedVersion); + } } + // TODO: If ValidateFeedJob is called without a parent job (e.g., to "re-validate" a feed), we should handle + // storing the updated ValidationResult in Mongo. + + status.update(false, "Validation finished!", 100, true); } - jobFinished(); } - @Override - public void handleStatusEvent(Map statusMap) { - synchronized (status) { - status.message = (String) statusMap.get("message"); - status.percentComplete = (double) statusMap.get("percentComplete"); - status.error = (boolean) statusMap.get("error"); - } + /** + * Getter that allows a client to know the ID of the feed version that will be created as soon as the upload is + * initiated; however, we will not store the FeedVersion in the mongo application database until the upload and + * processing is completed. This prevents clients from manipulating GTFS data before it is entirely imported. + */ + @JsonProperty + public String getFeedVersionId () { + return feedVersion.id; + } + + @JsonProperty + public String getFeedSourceId () { + return feedVersion.parentFeedSource().id; } -// public void handleGTFSValidationEvent(GTFSValidationEvent gtfsValidationEvent) { -// -// } } diff --git a/src/main/java/com/conveyal/datatools/manager/models/Bounds.java b/src/main/java/com/conveyal/datatools/manager/models/Bounds.java new file mode 100644 index 000000000..d7726c703 --- /dev/null +++ b/src/main/java/com/conveyal/datatools/manager/models/Bounds.java @@ -0,0 +1,52 @@ +package com.conveyal.datatools.manager.models; + +import java.awt.geom.Rectangle2D; +import java.io.Serializable; + +/** + * Created by landon on 9/22/17. + */ +public class Bounds implements Serializable { + private static final long serialVersionUID = 1L; + public double north, south, east, west; + + /** + * No-arg constructor for serialization. + */ + public Bounds () {} + + /** + * Create Bounds from java.awt.geom.Rectangle2D + */ + public Bounds (Rectangle2D rectangle2D) { + this.north = rectangle2D.getMaxY(); + this.south = rectangle2D.getMinY(); + this.east = rectangle2D.getMaxX(); + this.west = rectangle2D.getMinX(); + } + + public Rectangle2D.Double toRectangle2D () { + return new Rectangle2D.Double(west, south, + east - west, north - south); + } + + public boolean areValid () { + // Ensure that all of the values are actually numbers. + return !Double.isNaN(south) && + !Double.isNaN(north) && + !Double.isNaN(east) && + !Double.isNaN(west) + // Also ensure that they are set in the correct order. + && south < north && west < east + // And that they are within the lat/lng limits + && south >= -90 && north <= 90 && west >= -180 && east <= 180; + } + + public String toTransitLandString() { + return String.format("%.6f,%.6f,%.6f,%.6f", west, south, east, north); + } + + public String toVexString () { + return String.format("%.6f,%.6f,%.6f,%.6f", south, west, north, east); + } +} diff --git a/src/main/java/com/conveyal/datatools/manager/models/Deployment.java b/src/main/java/com/conveyal/datatools/manager/models/Deployment.java index 8c7e7d784..42ed33bc4 100644 --- a/src/main/java/com/conveyal/datatools/manager/models/Deployment.java +++ b/src/main/java/com/conveyal/datatools/manager/models/Deployment.java @@ -1,7 +1,7 @@ package com.conveyal.datatools.manager.models; import com.conveyal.datatools.manager.DataManager; -import com.conveyal.datatools.manager.persistence.DataStore; +import com.conveyal.datatools.manager.persistence.Persistence; import com.conveyal.datatools.manager.utils.StringUtils; import com.conveyal.datatools.manager.utils.json.JsonManager; import com.fasterxml.jackson.annotation.JsonIgnore; @@ -25,18 +25,25 @@ import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.URL; +import java.nio.charset.StandardCharsets; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Collection; import java.util.Date; import java.util.List; +import java.util.Locale; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; +import com.mongodb.client.FindIterable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static com.mongodb.client.model.Filters.and; +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.client.model.Filters.not; + /** * A deployment of (a given version of) OTP on a given set of feeds. * @author mattwigway @@ -47,37 +54,29 @@ public class Deployment extends Model implements Serializable { private static final long serialVersionUID = 1L; private static final Logger LOG = LoggerFactory.getLogger(Deployment.class); - private static DataStore deploymentStore = new DataStore("deployments"); public String name; /** What server is this currently deployed to? */ public String deployedTo; - public Date dateCreated; - @JsonView(JsonViews.DataDump.class) public String projectId; - @JsonView(JsonViews.UserInterface.class) - public Project getProject () { - return Project.get(projectId); - } - - public void setProject (Project project) { - this.projectId = project.id; + @JsonProperty("project") + public Project parentProject() { + return Persistence.projects.getById(projectId); } @JsonView(JsonViews.DataDump.class) public Collection feedVersionIds; /** All of the feed versions used in this deployment */ - @JsonIgnore - public List getFullFeedVersions () { - ArrayList ret = new ArrayList(feedVersionIds.size()); + public List retrieveFullFeedVersions() { + ArrayList ret = new ArrayList<>(feedVersionIds.size()); for (String id : feedVersionIds) { - FeedVersion v = FeedVersion.get(id); + FeedVersion v = Persistence.feedVersions.getById(id); if (v != null) ret.add(v); else @@ -88,16 +87,19 @@ public List getFullFeedVersions () { } /** All of the feed versions used in this deployment, summarized so that the Internet won't break */ - @JsonView(JsonViews.UserInterface.class) - public List getFeedVersions () { - ArrayList ret = new ArrayList(feedVersionIds.size()); + @JsonProperty("feedVersions") + public List retrieveFeedVersions() { + // return empty array if feedVersionIds is null + if (feedVersionIds == null) return new ArrayList<>(); + + ArrayList ret = new ArrayList<>(feedVersionIds.size()); for (String id : feedVersionIds) { - FeedVersion v = FeedVersion.get(id); + FeedVersion v = Persistence.feedVersions.getById(id); // should never happen but can if someone monkeyed around with dump/restore if (v != null) - ret.add(new SummarizedFeedVersion(FeedVersion.get(id))); + ret.add(new SummarizedFeedVersion(Persistence.feedVersions.getById(id))); else LOG.error("Reference integrity error for deployment {} ({}), feed version {} does not exist", this.name, this.id, id); } @@ -105,8 +107,8 @@ public List getFeedVersions () { return ret; } - public void setFeedVersions (Collection versions) { - feedVersionIds = new ArrayList(versions.size()); + public void storeFeedVersions(Collection versions) { + feedVersionIds = new ArrayList<>(versions.size()); for (FeedVersion version : versions) { feedVersionIds.add(version.id); @@ -119,11 +121,17 @@ public void setFeedVersions (Collection versions) { /** The commit of OTP being used on this deployment */ public String otpCommit; + /** Date when the deployment was last deployed to a server */ + public Date lastDeployed; + /** * The routerId of this deployment */ public String routerId; + public String customBuildConfig; + public String customRouterConfig; + /** * If this deployment is for a single feed source, the feed source this deployment is for. */ @@ -141,14 +149,15 @@ public void setFeedVersions (Collection versions) { */ @JsonView(JsonViews.UserInterface.class) @JsonInclude(Include.ALWAYS) - public List getInvalidFeedSources () { + @JsonProperty("invalidFeedSources") + public List invalidFeedSources () { if (invalidFeedSourceIds == null) return null; ArrayList ret = new ArrayList(invalidFeedSourceIds.size()); for (String id : invalidFeedSourceIds) { - ret.add(FeedSource.get(id)); + ret.add(Persistence.feedSources.getById(id)); } return ret; @@ -159,16 +168,15 @@ public Deployment(FeedSource feedSource) { super(); this.feedSourceId = feedSource.id; - this.setProject(feedSource.getProject()); - this.dateCreated = new Date(); - this.feedVersionIds = new ArrayList(); + this.projectId = feedSource.projectId; + this.feedVersionIds = new ArrayList<>(); DateFormat df = new SimpleDateFormat("yyyyMMdd"); this.name = StringUtils.getCleanName(feedSource.name) + "_" + df.format(dateCreated); // always use the latest, no matter how broken it is, so we can at least see how broken it is - this.feedVersionIds.add(feedSource.getLatestVersionId()); + this.feedVersionIds.add(feedSource.latestVersionId()); this.routerId = StringUtils.getCleanName(feedSource.name) + "_" + feedSourceId; @@ -181,17 +189,15 @@ public Deployment(Project project) { this.feedSourceId = null; - this.setProject(project); - - this.dateCreated = new Date(); + this.projectId = project.id; - this.feedVersionIds = new ArrayList(); - this.invalidFeedSourceIds = new ArrayList(); + this.feedVersionIds = new ArrayList<>(); + this.invalidFeedSourceIds = new ArrayList<>(); - FEEDSOURCE: for (FeedSource s : project.getProjectFeedSources()) { + FEEDSOURCE: for (FeedSource s : project.retrieveProjectFeedSources()) { // only include deployable feeds if (s.deployable) { - FeedVersion latest = s.getLatest(); + FeedVersion latest = s.retrieveLatest(); // find the newest version that can be deployed while (true) { @@ -204,7 +210,7 @@ public Deployment(Project project) { break; } - latest = latest.getPreviousVersion(); + latest = latest.previousVersion(); } // this version is the latest good version @@ -222,38 +228,13 @@ public Deployment() { // do nothing. } - /** Get a deployment by ID */ - public static Deployment get (String id) { - return deploymentStore.getById(id); - } - - /** Save this deployment and commit it */ - public void save () { - this.save(true); - } - - /** Save this deployment */ - public void save (boolean commit) { - if (commit) - deploymentStore.save(id, this); - else - deploymentStore.saveWithoutCommit(id, this); - } - - /** - * Delete this deployment and everything that it contains. - */ - public void delete() { - deploymentStore.delete(this.id); - } - /** Dump this deployment to the given file * @param output the output file * @param includeOsm should an osm.pbf file be included in the dump? * @param includeOtpConfig should OTP build-config.json and router-config.json be included? */ public void dump (File output, boolean includeManifest, boolean includeOsm, boolean includeOtpConfig) throws IOException { - // create the zipfile + // Create the zipfile. ZipOutputStream out; try { out = new ZipOutputStream(new FileOutputStream(output)); @@ -265,169 +246,123 @@ public void dump (File output, boolean includeManifest, boolean includeOsm, bool // save the manifest at the beginning of the file, for read/seek efficiency ZipEntry manifestEntry = new ZipEntry("manifest.json"); out.putNextEntry(manifestEntry); - // create the json manifest JsonManager jsonManifest = new JsonManager(Deployment.class, JsonViews.UserInterface.class); // this mixin gives us full feed validation results, not summarized jsonManifest.addMixin(Deployment.class, DeploymentFullFeedVersionMixin.class); - byte[] manifest = jsonManifest.write(this).getBytes(); - + // Write manifest and close entry. out.write(manifest); - out.closeEntry(); } - // write each of the GTFS feeds - for (FeedVersion v : this.getFullFeedVersions()) { - File feed = v.getGtfsFile(); - + // Write each of the feed version GTFS files into the zip. + for (FeedVersion v : this.retrieveFullFeedVersions()) { + File gtfsFile = v.retrieveGtfsFile(); FileInputStream in; - try { - in = new FileInputStream(feed); + in = new FileInputStream(gtfsFile); } catch (FileNotFoundException e1) { + LOG.error("Could not retrieve file for {}", v.name); throw new RuntimeException(e1); } - - ZipEntry e = new ZipEntry(feed.getName()); + ZipEntry e = new ZipEntry(gtfsFile.getName()); out.putNextEntry(e); - - // copy the zipfile 100k at a time - int bufSize = 100 * 1024; - byte[] buff = new byte[bufSize]; - int readBytes; - - while (true) { - try { - readBytes = in.read(buff); - } catch (IOException e1) { - try { - in.close(); - } catch (IOException e2) { - throw new RuntimeException(e2); - } - throw new RuntimeException(e1); - } - - if (readBytes == -1) - // we've copied the whole file - break; - - out.write(buff, 0, readBytes); - } - + ByteStreams.copy(in, out); try { in.close(); } catch (IOException e1) { - throw new RuntimeException(e1); + LOG.warn("Could not close GTFS file input stream {}", gtfsFile.getName()); + e1.printStackTrace(); } - out.closeEntry(); } if (includeOsm) { - // extract OSM and insert it into the deployment bundle + // Extract OSM and insert it into the deployment bundle ZipEntry e = new ZipEntry("osm.pbf"); out.putNextEntry(e); - InputStream is = getOsmExtract(getProjectBounds()); + InputStream is = downloadOsmExtract(retrieveProjectBounds()); ByteStreams.copy(is, out); try { is.close(); } catch (IOException e1) { + LOG.warn("Could not close OSM input stream"); e1.printStackTrace(); } - out.closeEntry(); } if (includeOtpConfig) { - // write build-config.json and router-config.json - Project proj = this.getProject(); - - if (proj.buildConfig != null) { + // Write build-config.json and router-config.json + Project project = this.parentProject(); + ObjectMapper mapper = new ObjectMapper(); + // Use custom build config if it is not null, otherwise default to project build config. + byte[] buildConfigAsBytes = customBuildConfig != null + ? customBuildConfig.getBytes(StandardCharsets.UTF_8) + : project.buildConfig != null + ? mapper.writer().writeValueAsBytes(project.buildConfig) + : null; + if (buildConfigAsBytes != null) { + // Include build config if not null. ZipEntry buildConfigEntry = new ZipEntry("build-config.json"); out.putNextEntry(buildConfigEntry); - - ObjectMapper mapper = new ObjectMapper(); mapper.setSerializationInclusion(Include.NON_NULL); - byte[] buildConfig = mapper.writer().writeValueAsBytes(proj.buildConfig); - out.write(buildConfig); - + out.write(buildConfigAsBytes); out.closeEntry(); } - // TODO: remove branding url root here and from config.yml - String brandingUrlRoot = DataManager.getConfigPropertyAsText("application.data.branding_public"); - OtpRouterConfig routerConfig = proj.routerConfig; - if (routerConfig == null && brandingUrlRoot != null) { - routerConfig = new OtpRouterConfig(); - } - if (routerConfig != null) { - routerConfig.brandingUrlRoot = brandingUrlRoot; + // Use custom router config if it is not null, otherwise default to project router config. + byte[] routerConfigAsBytes = customRouterConfig != null + ? customRouterConfig.getBytes(StandardCharsets.UTF_8) + : project.routerConfig != null + ? mapper.writer().writeValueAsBytes(project.routerConfig) + : null; + if (routerConfigAsBytes != null) { + // Include router config if not null. ZipEntry routerConfigEntry = new ZipEntry("router-config.json"); out.putNextEntry(routerConfigEntry); - - ObjectMapper mapper = new ObjectMapper(); mapper.setSerializationInclusion(Include.NON_NULL); - out.write(mapper.writer().writeValueAsBytes(routerConfig)); - + out.write(routerConfigAsBytes); out.closeEntry(); } } - + // Finally close the zip output stream. The dump file is now complete. out.close(); } - // Get OSM extract - public static InputStream getOsmExtract(Rectangle2D bounds) { - // call vex server - URL vexUrl = null; - try { - vexUrl = new URL(String.format("%s/?n=%.6f&e=%.6f&s=%.6f&w=%.6f", - DataManager.getConfigPropertyAsText("OSM_VEX"), - bounds.getMaxY(), bounds.getMaxX(), bounds.getMinY(), bounds.getMinX())); - } catch (MalformedURLException e1) { - e1.printStackTrace(); - } - LOG.info("Getting OSM extract at " + vexUrl.toString()); - HttpURLConnection conn = null; - try { - conn = (HttpURLConnection) vexUrl.openConnection(); - } catch (IOException e1) { - e1.printStackTrace(); - } - try { - conn.connect(); - } catch (IOException e1) { - e1.printStackTrace(); - } - - InputStream is = null; - try { - is = conn.getInputStream(); - } catch (IOException e1) { - e1.printStackTrace(); + /** + * Get OSM extract from OSM vex server as input stream. + */ + public static InputStream downloadOsmExtract(Rectangle2D rectangle2D) throws IOException { + Bounds bounds = new Bounds(rectangle2D); + if (!bounds.areValid()) { + throw new IllegalArgumentException(String.format("Provided bounds %s are not valid", bounds.toVexString())); } - return is; - } - - public static Rectangle2D getFeedVersionBounds(FeedVersion version) { - return null; + URL vexUrl = new URL(String.format(Locale.ROOT, "%s/%s.pbf", + DataManager.getConfigPropertyAsText("OSM_VEX"), + bounds.toVexString())); + LOG.info("Getting OSM extract at {}", vexUrl.toString()); + HttpURLConnection conn = (HttpURLConnection) vexUrl.openConnection(); + conn.connect(); + return conn.getInputStream(); } - // Get the union of the bounds of all the feeds in this deployment + /** + * Get the union of the bounds of all the feed versions in this deployment or if using custom bounds, return the + * project's custom bounds. + */ @JsonView(JsonViews.UserInterface.class) - public Rectangle2D getProjectBounds() { + @JsonProperty("projectBounds") + public Rectangle2D retrieveProjectBounds() { - Project proj = this.getProject(); - if(proj.useCustomOsmBounds != null && proj.useCustomOsmBounds) { - Rectangle2D bounds = new Rectangle2D.Double(proj.osmWest, proj.osmSouth, - proj.osmEast - proj.osmWest, proj.osmNorth - proj.osmSouth); - return bounds; + Project project = this.parentProject(); + if(project.useCustomOsmBounds && project.bounds != null) { + // Simply return the project's custom bounds if not determining bounds via feed version bounds. + return project.bounds.toRectangle2D(); } - List versions = getFeedVersions(); + List versions = retrieveFeedVersions(); if (versions.size() == 0) return null; @@ -438,18 +373,19 @@ public Rectangle2D getProjectBounds() { // i = 1 because we've already included bounds 0 for (int i = 0; i < versions.size(); i++) { SummarizedFeedVersion version = versions.get(i); -// return getFeedVersionBounds(version); - if (version.validationResult != null && version.validationResult.bounds != null) { + + // set version bounds from validation result + if (version.boundsAreValid()) { if (!boundsSet) { // set the bounds, don't expand the null bounds - bounds.setRect(versions.get(0).validationResult.bounds); + bounds.setRect(versions.get(0).validationResult.bounds.toRectangle2D()); boundsSet = true; } else { - bounds.add(version.validationResult.bounds); + bounds.add(version.validationResult.bounds.toRectangle2D()); } + } else { + LOG.warn("Feed version {} has no bounds", version.id); } - else - LOG.warn("Feed version %s has no bounds", version); } // expand the bounds by (about) 10 km in every direction @@ -483,37 +419,23 @@ public Rectangle2D getProjectBounds() { } /** - * Commit changes to the datastore + * Get the deployments currently deployed to a particular server and router combination. */ - public static void commit () { - deploymentStore.commit(); + public static FindIterable retrieveDeploymentForServerAndRouterId(String server, String routerId) { + return Persistence.deployments.getMongoCollection().find(and( + eq("deployedTo", server), + eq("routerId", routerId) + )); } - /** - * Get all of the deployments. - */ - public static Collection getAll () { - return deploymentStore.getAll(); - } - - /** - * Get the deployment currently deployed to a particular server. - */ - public static Deployment getDeploymentForServerAndRouterId (String server, String routerId) { - for (Deployment d : getAll()) { - if (d.deployedTo != null && d.deployedTo.equals(server)) { - if ((routerId != null && routerId.equals(d.routerId)) || d.routerId == routerId) { - return d; - } - } - } - - return null; + @JsonProperty("organizationId") + public String organizationId() { + Project project = parentProject(); + return project == null ? null : project.organizationId; } - public String getOrganizationId () { - Project project = getProject(); - return project == null ? null : project.organizationId; + public boolean delete() { + return Persistence.deployments.removeById(this.id); } /** @@ -529,14 +451,21 @@ public static class SummarizedFeedVersion { public int version; public SummarizedFeedVersion (FeedVersion version) { - this.validationResult = new FeedValidationResultSummary(version.validationResult); - this.feedSource = version.getFeedSource(); + this.validationResult = new FeedValidationResultSummary(version.validationResult, version.feedLoadResult); + this.feedSource = version.parentFeedSource(); this.updated = version.updated; this.id = version.id; - this.nextVersionId = version.getNextVersionId(); - this.previousVersionId = version.getPreviousVersionId(); + this.nextVersionId = version.nextVersionId(); + this.previousVersionId = version.previousVersionId(); this.version = version.version; } + + /** + * Determine if the bounds for the summary version exist and are valid. + */ + public boolean boundsAreValid () { + return validationResult != null && validationResult.bounds != null && validationResult.bounds.areValid(); + } } /** @@ -548,10 +477,10 @@ public SummarizedFeedVersion (FeedVersion version) { */ public abstract static class DeploymentFullFeedVersionMixin { @JsonIgnore - public abstract Collection getFeedVersions(); + public abstract Collection retrievefeedVersions(); - @JsonProperty("feedVersions") +// @JsonProperty("feedVersions") @JsonIgnore(false) - public abstract Collection getFullFeedVersions (); + public abstract Collection retrieveFullFeedVersions (); } } diff --git a/src/main/java/com/conveyal/datatools/manager/models/ExternalFeedSourceProperty.java b/src/main/java/com/conveyal/datatools/manager/models/ExternalFeedSourceProperty.java index a19310873..2604ccc11 100644 --- a/src/main/java/com/conveyal/datatools/manager/models/ExternalFeedSourceProperty.java +++ b/src/main/java/com/conveyal/datatools/manager/models/ExternalFeedSourceProperty.java @@ -1,11 +1,7 @@ package com.conveyal.datatools.manager.models; -import com.conveyal.datatools.manager.persistence.DataStore; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.annotation.JsonView; -import java.util.Collection; /** * Created by demory on 3/30/16. @@ -14,76 +10,26 @@ public class ExternalFeedSourceProperty extends Model { private static final long serialVersionUID = 1L; - private static DataStore propertyStore = new DataStore<>("externalFeedSourceProperties"); - - private FeedSource feedSource; - // constructor for data dump load public ExternalFeedSourceProperty() {} public ExternalFeedSourceProperty(FeedSource feedSource, String resourceType, String name, String value) { - this.id = feedSource.id + "_" + resourceType + "_" + name; + this.id = constructId(feedSource, resourceType, name); this.feedSourceId = feedSource.id; this.resourceType = resourceType; this.name = name; this.value = value; } - @JsonProperty - public String getFeedSourceId() { - return feedSource != null ? feedSource.id : feedSourceId; + public static String constructId(FeedSource feedSource, String resourceType, String name) { + return feedSource.id + "_" + resourceType + "_" + name; } public String resourceType; - private String feedSourceId; + public String feedSourceId; public String name; public String value; - - public void save () { - save(true); - } - - public void save (boolean commit) { - if (commit) - propertyStore.save(id, this); - else - propertyStore.saveWithoutCommit(id, this); - } - - /** - * Commit changes to the datastore - */ - public static void commit () { - propertyStore.commit(); - } - - public static ExternalFeedSourceProperty find(FeedSource source, String resourceType, String name) { - return propertyStore.getById(source.id + "_" +resourceType + "_" + name); - } - - public static ExternalFeedSourceProperty updateOrCreate(FeedSource source, String resourceType, String name, String value) { - ExternalFeedSourceProperty prop = - ExternalFeedSourceProperty.find(source, resourceType, name); - - if(prop == null) { - prop = new ExternalFeedSourceProperty(source, resourceType, name, value); - } - else prop.value = value; - - prop.save(); - - return prop; - } - - public static Collection getAll () { - return propertyStore.getAll(); - } - - public void delete() { - propertyStore.delete(this.id); - } - } diff --git a/src/main/java/com/conveyal/datatools/manager/models/FeedDownloadToken.java b/src/main/java/com/conveyal/datatools/manager/models/FeedDownloadToken.java index ed96312bf..23c9c12f1 100644 --- a/src/main/java/com/conveyal/datatools/manager/models/FeedDownloadToken.java +++ b/src/main/java/com/conveyal/datatools/manager/models/FeedDownloadToken.java @@ -1,8 +1,8 @@ package com.conveyal.datatools.manager.models; -import com.conveyal.datatools.editor.models.Snapshot; -import com.conveyal.datatools.manager.persistence.DataStore; -import com.fasterxml.jackson.annotation.JsonIgnore; +import com.conveyal.datatools.manager.models.Snapshot; +import com.conveyal.datatools.manager.persistence.Persistence; +import com.fasterxml.jackson.annotation.JsonProperty; import org.mapdb.Fun; import java.util.Date; @@ -15,61 +15,48 @@ public class FeedDownloadToken extends Model { private static final long serialVersionUID = 1L; - private static DataStore tokenStore = new DataStore("feeddownloadtokens"); - private String feedVersionId; - private Fun.Tuple2 snapshotId; + public String feedVersionId; + public String snapshotId; - private Date timestamp; + public Date timestamp; + + public FeedDownloadToken () { } public FeedDownloadToken (FeedVersion feedVersion) { - super(); feedVersionId = feedVersion.id; timestamp = new Date(); } public FeedDownloadToken (Snapshot snapshot) { - super(); snapshotId = snapshot.id; timestamp = new Date(); } public FeedDownloadToken (Project project) { - super(); feedVersionId = project.id; timestamp = new Date(); } - public static FeedDownloadToken get (String id) { - return tokenStore.getById(id); - } - - @JsonIgnore - public FeedVersion getFeedVersion () { - if (feedVersionId != null) return FeedVersion.get(feedVersionId); + @JsonProperty("feedVersion") + public FeedVersion retrieveFeedVersion() { + if (feedVersionId != null) return Persistence.feedVersions.getById(feedVersionId); else return null; } - @JsonIgnore - public Snapshot getSnapshot () { - if (snapshotId != null) return Snapshot.get(snapshotId); + @JsonProperty("snapshot") + public Snapshot retrieveSnapshot() { + if (snapshotId != null) return Persistence.snapshots.getById(snapshotId); else return null; } - @JsonIgnore - public Project getProject () { - return Project.get(feedVersionId); + // TODO: Need to update feedVersionId field name to be more generic (downloadTargetId) + public Project retrieveProject() { + return Persistence.projects.getById(feedVersionId); } public boolean isValid () { return true; } - public void save () { - tokenStore.save(id, this); - } - - public void delete () { - tokenStore.delete(id); - } } diff --git a/src/main/java/com/conveyal/datatools/manager/models/FeedSource.java b/src/main/java/com/conveyal/datatools/manager/models/FeedSource.java index 41f2b3d3c..9e0188d7a 100644 --- a/src/main/java/com/conveyal/datatools/manager/models/FeedSource.java +++ b/src/main/java/com/conveyal/datatools/manager/models/FeedSource.java @@ -1,80 +1,75 @@ package com.conveyal.datatools.manager.models; import com.amazonaws.services.s3.model.CannedAccessControlList; -import com.amazonaws.services.s3.model.DeleteObjectRequest; import com.amazonaws.services.s3.model.DeleteObjectsRequest; -import com.conveyal.datatools.editor.datastore.FeedTx; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.conveyal.datatools.common.status.MonitorableJob; import com.conveyal.datatools.editor.datastore.GlobalTx; import com.conveyal.datatools.editor.datastore.VersionedDataStore; import com.conveyal.datatools.manager.DataManager; -import com.conveyal.datatools.manager.auth.Auth0UserProfile; import com.conveyal.datatools.manager.jobs.NotifyUsersForSubscriptionJob; -import com.conveyal.datatools.manager.persistence.DataStore; import com.conveyal.datatools.manager.persistence.FeedStore; +import com.conveyal.datatools.manager.persistence.Persistence; import com.conveyal.datatools.manager.utils.HashUtils; +import com.conveyal.gtfs.validator.ValidationResult; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonView; -import com.google.common.eventbus.EventBus; -import org.mapdb.Fun; +import com.mongodb.client.FindIterable; +import com.mongodb.client.model.Sorts; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import spark.HaltException; import java.io.File; import java.io.IOException; -import java.io.InvalidClassException; import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.URL; -import java.util.*; -import java.util.stream.Collectors; +import java.util.Collection; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import static com.conveyal.datatools.manager.utils.StringUtils.getCleanName; -import static spark.Spark.halt; +import static com.mongodb.client.model.Filters.eq; /** * Created by demory on 3/22/16. */ @JsonIgnoreProperties(ignoreUnknown = true) public class FeedSource extends Model implements Cloneable { + private static final long serialVersionUID = 1L; public static final Logger LOG = LoggerFactory.getLogger(FeedSource.class); - private static DataStore sourceStore = new DataStore("feedsources"); - /** * The collection of which this feed is a part */ //@JsonView(JsonViews.DataDump.class) public String projectId; - public String[] regions = {"1"}; +// public String[] regions = {"1"}; /** * Get the Project of which this feed is a part */ - @JsonIgnore - public Project getProject () { - return projectId != null ? Project.get(projectId) : null; + public Project retrieveProject() { + return projectId != null ? Persistence.projects.getById(projectId) : null; } - public String getOrganizationId () { - Project project = getProject(); + @JsonProperty("organizationId") + public String organizationId () { + Project project = retrieveProject(); return project == null ? null : project.organizationId; } - @JsonIgnore - public List getRegionList () { - return Region.getAll().stream().filter(r -> Arrays.asList(regions).contains(r.id)).collect(Collectors.toList()); - } - - public void setProject(Project proj) { - this.projectId = proj.id; - this.save(); - proj.save(); - } + // TODO: Add back in regions once they have been refactored +// public List retrieveRegionList () { +// return Region.retrieveAll().stream().filter(r -> Arrays.asList(regions).contains(r.id)).collect(Collectors.toList()); +// } /** The name of this feed source, e.g. MTA New York City Subway */ public String name; @@ -99,8 +94,9 @@ public void setProject(Project proj) { /** * When was this feed last updated? + * FIXME: this is currently dynamically determined by lastUpdated() with calls retrieveLatest(). */ - public transient Date lastUpdated; +// public transient Date lastUpdated; /** * From whence is this feed fetched? @@ -121,6 +117,8 @@ public void setProject(Project proj) { public String publishedVersionId; + public String editorNamespace; + /** * Create a new feed. */ @@ -138,62 +136,59 @@ public FeedSource () { this(null); } + + public FeedVersion fetch (MonitorableJob.Status status) { + return fetch(status, null); + } /** - * Fetch the latest version of the feed. + * Fetch the latest version of the feed. Optionally provide an override URL from which to fetch the feed. This + * optional URL is used for a one-level deep recursive call of fetch when a redirect is encountered. + * + * FIXME: Should the FeedSource fetch URL field be updated if a recursive call with new URL is successful? + * + * @return the fetched FeedVersion if a new version is available or null if nothing needs to be updated. */ - public FeedVersion fetch (EventBus eventBus, String fetchUser) { - Map statusMap = new HashMap<>(); - statusMap.put("message", "Downloading file"); - statusMap.put("percentComplete", 20.0); - statusMap.put("error", false); - eventBus.post(statusMap); - - FeedVersion latest = getLatest(); + public FeedVersion fetch (MonitorableJob.Status status, String optionalUrlOverride) { + status.message = "Downloading file"; // We create a new FeedVersion now, so that the fetched date is (milliseconds) before // fetch occurs. That way, in the highly unlikely event that a feed is updated while we're // fetching it, we will not miss a new feed. FeedVersion version = new FeedVersion(this); + version.retrievalMethod = FeedRetrievalMethod.FETCHED_AUTOMATICALLY; // build the URL from which to fetch - URL url = this.url; + URL url = null; + try { + // If an optional URL is provided (in the case of a recursive fetch) use that. Otherwise, use the fetch URL + url = optionalUrlOverride != null ? new URL(optionalUrlOverride) : this.url; + } catch (MalformedURLException e) { + e.printStackTrace(); + status.fail(String.format("Could not connect to bad redirect URL %s", optionalUrlOverride)); + } LOG.info("Fetching from {}", url.toString()); // make the request, using the proper HTTP caching headers to prevent refetch, if applicable HttpURLConnection conn; try { conn = (HttpURLConnection) url.openConnection(); - } catch (IOException e) { + // Set user agent request header in order to avoid 403 Forbidden response from some servers. + // https://stackoverflow.com/questions/13670692/403-forbidden-with-java-but-not-web-browser + conn.setRequestProperty( + "User-Agent", + "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.95 Safari/537.11" + ); + } catch (Exception e) { String message = String.format("Unable to open connection to %s; not fetching feed %s", url, this.name); LOG.error(message); - statusMap.put("message", message); - statusMap.put("percentComplete", 0.0); - statusMap.put("error", true); - eventBus.post(statusMap); - halt(400, message); - return null; - } catch (ClassCastException e) { - String message = String.format("Unable to open connection to %s; not fetching %s feed", url, this.name); - LOG.error(message); - statusMap.put("message", message); - statusMap.put("percentComplete", 0.0); - statusMap.put("error", true); - eventBus.post(statusMap); - halt(400, message); - return null; - } catch (NullPointerException e) { - String message = String.format("Unable to open connection to %s; not fetching %s feed", url, this.name); - LOG.error(message); - statusMap.put("message", message); - statusMap.put("percentComplete", 0.0); - statusMap.put("error", true); - eventBus.post(statusMap); - halt(400, message); + // TODO use this update function throughout this class + status.update(true, message, 0); return null; } conn.setDefaultUseCaches(true); - + // Get latest version to check that the fetched version does not duplicate a feed already loaded. + FeedVersion latest = retrieveLatest(); // lastFetched is set to null when the URL changes and when latest feed version is deleted if (latest != null && this.lastFetched != null) conn.setIfModifiedSince(Math.min(latest.updated.getTime(), this.lastFetched.getTime())); @@ -202,88 +197,90 @@ public FeedVersion fetch (EventBus eventBus, String fetchUser) { try { conn.connect(); - - if (conn.getResponseCode() == HttpURLConnection.HTTP_NOT_MODIFIED) { - String message = String.format("Feed %s has not been modified", this.name); - LOG.warn(message); - statusMap.put("message", message); - statusMap.put("percentComplete", 100.0); - statusMap.put("error", true); - eventBus.post(statusMap); - halt(304, message); - return null; - } - - // TODO: redirects - else if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) { - String message = String.format("Saving %s feed.", this.name); - LOG.info(message); - statusMap.put("message", message); - statusMap.put("percentComplete", 75.0); - statusMap.put("error", false); - eventBus.post(statusMap); - newGtfsFile = version.newGtfsFile(conn.getInputStream()); - } - - else { - String message = String.format("HTTP status %s retrieving %s feed", conn.getResponseMessage(), this.name); - LOG.error(message); - statusMap.put("message", message); - statusMap.put("percentComplete", 100.0); - statusMap.put("error", true); - eventBus.post(statusMap); - halt(400, message); - return null; + String message; + int responseCode = conn.getResponseCode(); + LOG.info("Fetch feed response code={}", responseCode); + switch (responseCode) { + case HttpURLConnection.HTTP_NOT_MODIFIED: + message = String.format("Feed %s has not been modified", this.name); + LOG.warn(message); + status.update(false, message, 100.0); + return null; + case HttpURLConnection.HTTP_OK: + // Response is OK. Continue on to save the GTFS file. + message = String.format("Saving %s feed.", this.name); + LOG.info(message); + status.update(false, message, 75.0); + newGtfsFile = version.newGtfsFile(conn.getInputStream()); + break; + case HttpURLConnection.HTTP_MOVED_TEMP: + case HttpURLConnection.HTTP_MOVED_PERM: + case HttpURLConnection.HTTP_SEE_OTHER: + // Get redirect url from "location" header field + String newUrl = conn.getHeaderField("Location"); + if (optionalUrlOverride != null) { + // Only permit recursion one level deep. If more than one redirect is detected, fail the job and + // suggest that user try again with new URL. + message = String.format("More than one redirects for fetch URL detected. Please try fetch again with latest URL: %s", newUrl); + LOG.error(message); + status.fail(message); + return null; + } else { + // If override URL is null, this is the zeroth fetch. Recursively call fetch, but only one time + // to prevent multiple (possibly infinite?) redirects. Any more redirects than one should + // probably be met with user action to update the fetch URL. + LOG.info("Recursively calling fetch feed with new URL: {}", newUrl); + return fetch(status, newUrl); + } + default: + // Any other HTTP codes result in failure. + // FIXME Are there "success" codes we're not accounting for? + message = String.format("HTTP status (%d: %s) retrieving %s feed", responseCode, conn.getResponseMessage(), this.name); + LOG.error(message); + status.fail(message); + return null; } } catch (IOException e) { String message = String.format("Unable to connect to %s; not fetching %s feed", url, this.name); LOG.error(message); - statusMap.put("message", message); - statusMap.put("percentComplete", 100.0); - statusMap.put("error", true); - eventBus.post(statusMap); + status.fail(message); e.printStackTrace(); - halt(400, message); return null; - } catch (HaltException e) { - LOG.warn("Halt thrown", e); - throw e; } // note that anything other than a new feed fetched successfully will have already returned from the function -// version.hash(); version.hash = HashUtils.hashFile(newGtfsFile); if (latest != null && version.hash.equals(latest.hash)) { + // If new version hash equals the hash for the latest version, do not error. Simply indicate that server + // operators should add If-Modified-Since support to avoid wasting bandwidth. String message = String.format("Feed %s was fetched but has not changed; server operators should add If-Modified-Since support to avoid wasting bandwidth", this.name); LOG.warn(message); - newGtfsFile.delete(); - version.delete(); - statusMap.put("message", message); - statusMap.put("percentComplete", 100.0); - statusMap.put("error", true); - eventBus.post(statusMap); - halt(304); + String filePath = newGtfsFile.getAbsolutePath(); + if (newGtfsFile.delete()) { + LOG.info("Deleting redundant GTFS file: {}", filePath); + } else { + LOG.warn("Failed to delete unneeded GTFS file at: {}", filePath); + } + status.update(false, message, 100.0, true); return null; } else { version.userId = this.userId; - this.lastFetched = version.updated; - this.save(); + // Update last fetched value for feed source. + Persistence.feedSources.updateField(this.id, "lastFetched", version.updated); - NotifyUsersForSubscriptionJob notifyFeedJob = new NotifyUsersForSubscriptionJob("feed-updated", this.id, "New feed version created for " + this.name); - Thread notifyThread = new Thread(notifyFeedJob); - notifyThread.start(); + // Set file timestamp according to last modified header from connection + version.fileTimestamp = conn.getLastModified(); + NotifyUsersForSubscriptionJob.createNotification( + "feed-updated", + this.id, + String.format("New feed version created for %s.", this.name)); String message = String.format("Fetch complete for %s", this.name); LOG.info(message); - statusMap.put("message", message); - statusMap.put("percentComplete", 100.0); - statusMap.put("error", false); - eventBus.post(statusMap); - version.setUserById(fetchUser); - version.fileTimestamp = conn.getLastModified(); + status.update(false, message, 100.0); return version; } } @@ -296,39 +293,26 @@ public String toString () { return ""; } - public void save () { - save(true); - } - public void setName(String name){ - this.name = name; - this.save(); - } - public void save (boolean commit) { - if (commit) - sourceStore.save(this.id, this); - else - sourceStore.saveWithoutCommit(this.id, this); - } - /** * Get the latest version of this feed * @return the latest version of this feed */ @JsonIgnore - public FeedVersion getLatest () { - FeedVersion v = FeedVersion.versionStore.findFloor("version", new Fun.Tuple2(this.id, Fun.HI)); - - // the ID doesn't necessarily match, because it will fall back to the previous source in the store if there are no versions for this source - if (v == null || !v.feedSourceId.equals(this.id)) + public FeedVersion retrieveLatest() { + FeedVersion newestVersion = Persistence.feedVersions + .getOneFiltered(eq("feedSourceId", this.id), Sorts.descending("version")); + if (newestVersion == null) { + // Is this what happens if there are none? return null; - - return v; + } + return newestVersion; } @JsonInclude(JsonInclude.Include.NON_NULL) @JsonView(JsonViews.UserInterface.class) - public String getLatestVersionId () { - FeedVersion latest = getLatest(); + @JsonProperty("latestVersionId") + public String latestVersionId() { + FeedVersion latest = retrieveLatest(); return latest != null ? latest.id : null; } @@ -340,44 +324,49 @@ public String getLatestVersionId () { // TODO: use summarized feed source here. requires serious refactoring on client side. @JsonInclude(JsonInclude.Include.NON_NULL) @JsonView(JsonViews.UserInterface.class) - public Date getLastUpdated() { - FeedVersion latest = getLatest(); + @JsonProperty("lastUpdated") + public Date lastUpdated() { + FeedVersion latest = retrieveLatest(); return latest != null ? latest.updated : null; } @JsonInclude(JsonInclude.Include.NON_NULL) @JsonView(JsonViews.UserInterface.class) - public FeedValidationResultSummary getLatestValidation () { - FeedVersion latest = getLatest(); - FeedValidationResult result = latest != null ? latest.validationResult : null; - return result != null ?new FeedValidationResultSummary(result) : null; + @JsonProperty("latestValidation") + public FeedValidationResultSummary latestValidation() { + FeedVersion latest = retrieveLatest(); + ValidationResult result = latest != null ? latest.validationResult : null; + return result != null ?new FeedValidationResultSummary(result, latest.feedLoadResult) : null; } - @JsonInclude(JsonInclude.Include.NON_NULL) - @JsonView(JsonViews.UserInterface.class) - public boolean getEditedSinceSnapshot() { -// FeedTx tx; -// try { -// tx = VersionedDataStore.getFeedTx(id); -// } catch (Exception e) { -// -// } -// return tx.editedSinceSnapshot.get(); - return false; - } + // TODO: figure out some way to indicate whether feed has been edited since last snapshot (i.e, there exist changes) +// @JsonInclude(JsonInclude.Include.NON_NULL) +// @JsonView(JsonViews.UserInterface.class) +// public boolean getEditedSinceSnapshot() { +//// FeedTx tx; +//// try { +//// tx = VersionedDataStore.getFeedTx(id); +//// } catch (Exception e) { +//// +//// } +//// return tx.editedSinceSnapshot.retrieveById(); +// return false; +// } @JsonInclude(JsonInclude.Include.NON_NULL) @JsonView(JsonViews.UserInterface.class) - public Map> getExternalProperties() { + @JsonProperty("externalProperties") + public Map> externalProperties() { Map> resourceTable = new HashMap<>(); for(String resourceType : DataManager.feedResources.keySet()) { Map propTable = new HashMap<>(); - ExternalFeedSourceProperty.getAll().stream() - .filter(prop -> prop.getFeedSourceId().equals(this.id)) + // FIXME: use mongo filters instead + Persistence.externalFeedSourceProperties.getAll().stream() + .filter(prop -> prop.feedSourceId.equals(this.id)) .forEach(prop -> propTable.put(prop.name, prop.value)); resourceTable.put(resourceType, propTable); @@ -385,67 +374,92 @@ public Map> getExternalProperties() { return resourceTable; } - public static FeedSource get(String id) { - return sourceStore.getById(id); + /** + * Get all of the feed versions for this source + * @return collection of feed versions + */ + @JsonIgnore + public Collection retrieveFeedVersions() { + return Persistence.feedVersions.getFiltered(eq("feedSourceId", this.id)); } - public static Collection getAll() { - return sourceStore.getAll(); + /** + * Get all of the snapshots for this source + * @return collection of snapshots + */ + @JsonIgnore + public Collection retrieveSnapshots() { + return Persistence.snapshots.getFiltered(eq(Snapshot.FEED_SOURCE_REF, this.id)); } /** - * Get all of the feed versions for this source - * @return collection of feed versions + * Get all of the test deployments for this feed source. + * @return collection of deloyments */ @JsonIgnore - public Collection getFeedVersions() { - // TODO Indices - return FeedVersion.getAll().stream() - .filter(v -> this.id.equals(v.feedSourceId)) - .collect(Collectors.toCollection(ArrayList::new)); + public Collection retrieveDeployments () { + return Persistence.deployments.getFiltered(eq(Snapshot.FEED_SOURCE_REF, this.id)); } - @JsonView(JsonViews.UserInterface.class) - public int getFeedVersionCount() { - return getFeedVersions().size(); +// @JsonView(JsonViews.UserInterface.class) +// @JsonProperty("feedVersionCount") + public int feedVersionCount() { + return retrieveFeedVersions().size(); } @JsonView(JsonViews.UserInterface.class) - public int getNoteCount() { + @JsonProperty("noteCount") + public int noteCount() { return this.noteIds != null ? this.noteIds.size() : 0; } - public String getPublicKey () { + public String toPublicKey() { return "public/" + getCleanName(this.name) + ".zip"; } public void makePublic() { String sourceKey = FeedStore.s3Prefix + this.id + ".zip"; - String publicKey = getPublicKey(); - if (FeedStore.s3Client.doesObjectExist(DataManager.feedBucket, sourceKey)) { - LOG.info("copying feed {} to s3 public folder", this); - FeedStore.s3Client.setObjectAcl(DataManager.feedBucket, sourceKey, CannedAccessControlList.PublicRead); - FeedStore.s3Client.copyObject(DataManager.feedBucket, sourceKey, DataManager.feedBucket, publicKey); - FeedStore.s3Client.setObjectAcl(DataManager.feedBucket, publicKey, CannedAccessControlList.PublicRead); - } else { - LOG.warn("Could not locate latest feed source {} on s3 at {}. Using latest version instead.", this, sourceKey); - String versionId = this.getLatestVersionId(); - String latestVersionKey = FeedStore.s3Prefix + versionId; - if (FeedStore.s3Client.doesObjectExist(DataManager.feedBucket, latestVersionKey)) { - LOG.info("copying feed version {} to s3 public folder", versionId); - FeedStore.s3Client.setObjectAcl(DataManager.feedBucket, latestVersionKey, CannedAccessControlList.PublicRead); - FeedStore.s3Client.copyObject(DataManager.feedBucket, latestVersionKey, DataManager.feedBucket, publicKey); - FeedStore.s3Client.setObjectAcl(DataManager.feedBucket, publicKey, CannedAccessControlList.PublicRead); + String publicKey = toPublicKey(); + String versionId = this.latestVersionId(); + String latestVersionKey = FeedStore.s3Prefix + versionId; - // also copy latest version to feedStore latest - FeedStore.s3Client.copyObject(DataManager.feedBucket, latestVersionKey, DataManager.feedBucket, sourceKey); + // only deploy to public if storing feeds on s3 (no mechanism for downloading/publishing + // them otherwise) + if (DataManager.useS3) { + boolean sourceExists = FeedStore.s3Client.doesObjectExist(DataManager.feedBucket, sourceKey); + ObjectMetadata sourceMetadata = sourceExists + ? FeedStore.s3Client.getObjectMetadata(DataManager.feedBucket, sourceKey) + : null; + boolean latestExists = FeedStore.s3Client.doesObjectExist(DataManager.feedBucket, latestVersionKey); + ObjectMetadata latestVersionMetadata = latestExists + ? FeedStore.s3Client.getObjectMetadata(DataManager.feedBucket, latestVersionKey) + : null; + boolean latestVersionMatchesSource = sourceMetadata != null && + latestVersionMetadata != null && + sourceMetadata.getETag().equals(latestVersionMetadata.getETag()); + if (sourceExists && latestVersionMatchesSource) { + LOG.info("copying feed {} to s3 public folder", this); + FeedStore.s3Client.setObjectAcl(DataManager.feedBucket, sourceKey, CannedAccessControlList.PublicRead); + FeedStore.s3Client.copyObject(DataManager.feedBucket, sourceKey, DataManager.feedBucket, publicKey); + FeedStore.s3Client.setObjectAcl(DataManager.feedBucket, publicKey, CannedAccessControlList.PublicRead); + } else { + LOG.warn("Latest feed source {} on s3 at {} does not exist or does not match latest version. Using latest version instead.", this, sourceKey); + if (FeedStore.s3Client.doesObjectExist(DataManager.feedBucket, latestVersionKey)) { + LOG.info("copying feed version {} to s3 public folder", versionId); + FeedStore.s3Client.setObjectAcl(DataManager.feedBucket, latestVersionKey, CannedAccessControlList.PublicRead); + FeedStore.s3Client.copyObject(DataManager.feedBucket, latestVersionKey, DataManager.feedBucket, publicKey); + FeedStore.s3Client.setObjectAcl(DataManager.feedBucket, publicKey, CannedAccessControlList.PublicRead); + + // also copy latest version to feedStore latest + FeedStore.s3Client.copyObject(DataManager.feedBucket, latestVersionKey, DataManager.feedBucket, sourceKey); + } } } } public void makePrivate() { String sourceKey = FeedStore.s3Prefix + this.id + ".zip"; - String publicKey = getPublicKey(); + String publicKey = toPublicKey(); if (FeedStore.s3Client.doesObjectExist(DataManager.feedBucket, sourceKey)) { LOG.info("removing feed {} from s3 public folder", this); FeedStore.s3Client.setObjectAcl(DataManager.feedBucket, sourceKey, CannedAccessControlList.AuthenticatedRead); @@ -453,6 +467,34 @@ public void makePrivate() { } } + // TODO don't number the versions just timestamp them + // FIXME for a brief moment feed version numbers are incoherent. Do this in a single operation or eliminate feed version numbers. + public void renumberFeedVersions() { + int i = 1; + FindIterable orderedFeedVersions = Persistence.feedVersions.getMongoCollection() + .find(eq("feedSourceId", this.id)) + .sort(Sorts.ascending("updated")); + for (FeedVersion feedVersion : orderedFeedVersions) { + // Yes it's ugly to pass in a string, but we need to change the parameter type of update to take a Document. + Persistence.feedVersions.update(feedVersion.id, String.format("{version:%d}", i)); + i += 1; + } + } + + // TODO don't number the snapshots just timestamp them + // FIXME for a brief moment snapshot numbers are incoherent. Do this in a single operation or eliminate snapshot version numbers. + public void renumberSnapshots() { + int i = 1; + FindIterable orderedSnapshots = Persistence.snapshots.getMongoCollection() + .find(eq(Snapshot.FEED_SOURCE_REF, this.id)) + .sort(Sorts.ascending("snapshotTime")); + for (Snapshot snapshot : orderedSnapshots) { + // Yes it's ugly to pass in a string, but we need to change the parameter type of update to take a Document. + Persistence.snapshots.updateField(snapshot.id, "version", i); + i += 1; + } + } + /** * Represents ways feeds can be retrieved */ @@ -462,61 +504,35 @@ public enum FeedRetrievalMethod { PRODUCED_IN_HOUSE // produced in-house in a GTFS Editor instance } - public static void commit() { - sourceStore.commit(); - } - /** * Delete this feed source and everything that it contains. + * + * FIXME: Use a Mongo transaction to handle the deletion of these related objects. */ - public void delete() { - getFeedVersions().forEach(FeedVersion::delete); - - // delete latest copy of feed source - if (DataManager.useS3) { - DeleteObjectsRequest delete = new DeleteObjectsRequest(DataManager.feedBucket); - delete.withKeys("public/" + this.name + ".zip", FeedStore.s3Prefix + this.id + ".zip"); - FeedStore.s3Client.deleteObjects(delete); - } - - // Delete editor feed mapdb - // TODO: does the mapdb folder need to be deleted separately? - GlobalTx gtx = VersionedDataStore.getGlobalTx(); - if (!gtx.feeds.containsKey(id)) { - gtx.rollback(); - } - else { - gtx.feeds.remove(id); - gtx.commit(); - } - - ExternalFeedSourceProperty.getAll().stream() - .filter(prop -> prop.getFeedSourceId().equals(this.id)) - .forEach(ExternalFeedSourceProperty::delete); + public boolean delete() { + try { + retrieveFeedVersions().forEach(FeedVersion::delete); - // TODO: add delete for osm extract and r5 network (maybe that goes with version) + // Delete latest copy of feed source on S3. + if (DataManager.useS3) { + DeleteObjectsRequest delete = new DeleteObjectsRequest(DataManager.feedBucket); + delete.withKeys("public/" + this.name + ".zip", FeedStore.s3Prefix + this.id + ".zip"); + FeedStore.s3Client.deleteObjects(delete); + } + // Remove all external properties for this feed source. + Persistence.externalFeedSourceProperties.removeFiltered(eq("feedSourceId", this.id)); - sourceStore.delete(this.id); - } + // FIXME: Should this delete related feed versions from the SQL database (for both published versions and + // editor snapshots)? - /*@JsonIgnore - public AgencyBranding getAgencyBranding(String agencyId) { - if(branding != null) { - for (AgencyBranding agencyBranding : branding) { - if (agencyBranding.agencyId.equals(agencyId)) return agencyBranding; - } + // Finally, delete the feed source mongo document. + return Persistence.feedSources.removeById(this.id); + } catch (Exception e) { + LOG.error("Could not delete feed source", e); + return false; } - return null; } - @JsonIgnore - public void addAgencyBranding(AgencyBranding agencyBranding) { - if(branding == null) { - branding = new ArrayList<>(); - } - branding.add(agencyBranding); - }*/ - public FeedSource clone () throws CloneNotSupportedException { return (FeedSource) super.clone(); } diff --git a/src/main/java/com/conveyal/datatools/manager/models/FeedValidationResult.java b/src/main/java/com/conveyal/datatools/manager/models/FeedValidationResult.java index 6a10fafc4..3a1b92bc0 100644 --- a/src/main/java/com/conveyal/datatools/manager/models/FeedValidationResult.java +++ b/src/main/java/com/conveyal/datatools/manager/models/FeedValidationResult.java @@ -1,16 +1,20 @@ package com.conveyal.datatools.manager.models; -import com.conveyal.gtfs.GTFSFeed; -import com.conveyal.gtfs.model.ValidationResult; -import com.conveyal.gtfs.stats.FeedStats; -import com.conveyal.gtfs.validator.json.LoadStatus; +import com.conveyal.gtfs.loader.Feed; +import com.conveyal.gtfs.loader.FeedLoadResult; +import com.conveyal.gtfs.loader.TableReader; +import com.conveyal.gtfs.model.Agency; +import com.conveyal.gtfs.model.Stop; +import com.conveyal.gtfs.validator.ValidationResult; import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.collect.Iterators; +import java.awt.geom.Point2D; import java.awt.geom.Rectangle2D; import java.io.Serializable; import java.time.LocalDate; +import java.util.ArrayList; import java.util.Collection; -import java.util.stream.Collectors; /** * Created by landon on 5/10/16. @@ -20,7 +24,6 @@ public class FeedValidationResult implements Serializable { @JsonProperty public LoadStatus loadStatus; public String loadFailureReason; - public Collection agencies; public int agencyCount; public int routeCount; public int tripCount; @@ -28,57 +31,88 @@ public class FeedValidationResult implements Serializable { public int errorCount; public LocalDate startDate; public LocalDate endDate; - public Rectangle2D bounds; + public Bounds bounds; public long avgDailyRevenueTime; // legacy fields included for backwards compatibility (not currently used) public String feedFileName; - public ValidationResult routes; - public ValidationResult stops; - public ValidationResult trips; - public ValidationResult shapes; +// public ValidationResult routes; +// public ValidationResult stops; +// public ValidationResult trips; +// public ValidationResult shapes; // constructor for data dump load public FeedValidationResult() {} - public FeedValidationResult(GTFSFeed feed, FeedStats stats) { - this.agencies = stats.getAllAgencies().stream().map(agency -> agency.agency_id).collect(Collectors.toList()); - this.agencyCount = stats.getAgencyCount(); - this.routeCount = stats.getRouteCount(); - this.bounds = stats.getBounds(); - LocalDate calDateStart = stats.getCalendarDateStart(); - LocalDate calSvcStart = stats.getCalendarServiceRangeStart(); - - LocalDate calDateEnd = stats.getCalendarDateEnd(); - LocalDate calSvcEnd = stats.getCalendarServiceRangeEnd(); + // constructor for bad feed load + public FeedValidationResult(LoadStatus loadStatus, String loadFailureReason) { + this.loadStatus = loadStatus; + this.loadFailureReason = loadFailureReason; + } - if (calDateStart == null && calSvcStart == null) - // no service . . . this is bad - this.startDate = null; - else if (calDateStart == null) - this.startDate = calSvcStart; - else if (calSvcStart == null) - this.startDate = calDateStart; - else - this.startDate = calDateStart.isBefore(calSvcStart) ? calDateStart : calSvcStart; + // TODO: construct FeedValidationResult from sql-loader Feed (or FeedInfo) - if (calDateEnd == null && calSvcEnd == null) - // no service . . . this is bad - this.endDate = null; - else if (calDateEnd == null) - this.endDate = calSvcEnd; - else if (calSvcEnd == null) - this.endDate = calDateEnd; - else - this.endDate = calDateEnd.isAfter(calSvcEnd) ? calDateEnd : calSvcEnd; + public FeedValidationResult(ValidationResult validationResult, FeedLoadResult feedLoadResult) { + this.agencyCount = feedLoadResult.agency.rowCount; + this.routeCount = feedLoadResult.routes.rowCount; + this.tripCount = feedLoadResult.trips.rowCount; + this.errorCount = feedLoadResult.errorCount; + // FIXME: add back in. +// this.bounds = new Bounds(calculateBounds(feed.stops)); +// LocalDate calDateStart = stats.getCalendarDateStart(); +// LocalDate calSvcStart = stats.getCalendarServiceRangeStart(); +// +// LocalDate calDateEnd = stats.getCalendarDateEnd(); +// LocalDate calSvcEnd = stats.getCalendarServiceRangeEnd(); +// +// if (calDateStart == null && calSvcStart == null) +// // no service . . . this is bad +// this.startDate = null; +// else if (calDateStart == null) +// this.startDate = calSvcStart; +// else if (calSvcStart == null) +// this.startDate = calDateStart; +// else +// this.startDate = calDateStart.isBefore(calSvcStart) ? calDateStart : calSvcStart; +// +// if (calDateEnd == null && calSvcEnd == null) +// // no service . . . this is bad +// this.endDate = null; +// else if (calDateEnd == null) +// this.endDate = calSvcEnd; +// else if (calSvcEnd == null) +// this.endDate = calDateEnd; +// else +// this.endDate = calDateEnd.isAfter(calSvcEnd) ? calDateEnd : calSvcEnd; - // get revenue time in seconds for Tuesdays in feed - this.avgDailyRevenueTime = stats.getAverageDailyRevenueTime(2); + // FIXME add back in. +// try { +// // retrieve revenue time in seconds for Tuesdays in feed +// this.avgDailyRevenueTime = stats.getAverageDailyRevenueTime(2); +// } catch (Exception e) { +// // temporarily catch errors in calculating this stat +// this.avgDailyRevenueTime = -1L; +// } this.loadStatus = LoadStatus.SUCCESS; - this.tripCount = stats.getTripCount(); - this.stopTimesCount = stats.getStopTimesCount(); - this.errorCount = feed.errors.size(); + } + + private Rectangle2D calculateBounds (TableReader stops) { + Rectangle2D bounds = null; + for (Stop stop : stops) { + // FIXME add back in +// // skip over stops that don't have any stop times +// if (!feed.stopCountByStopTime.containsKey(stop.stop_id)) { +// continue; +// } + if (bounds == null) { + bounds = new Rectangle2D.Double(stop.stop_lon, stop.stop_lat, 0, 0); + } + else { + bounds.add(new Point2D.Double(stop.stop_lon, stop.stop_lat)); + } + } + return bounds; } } \ No newline at end of file diff --git a/src/main/java/com/conveyal/datatools/manager/models/FeedValidationResultSummary.java b/src/main/java/com/conveyal/datatools/manager/models/FeedValidationResultSummary.java index e328bb46a..535161871 100644 --- a/src/main/java/com/conveyal/datatools/manager/models/FeedValidationResultSummary.java +++ b/src/main/java/com/conveyal/datatools/manager/models/FeedValidationResultSummary.java @@ -1,12 +1,11 @@ package com.conveyal.datatools.manager.models; -import java.awt.geom.Rectangle2D; import java.io.Serializable; -import java.time.ZoneId; +import java.time.LocalDate; import java.util.Collection; -import java.util.Date; -import com.conveyal.gtfs.validator.json.LoadStatus; +import com.conveyal.gtfs.loader.FeedLoadResult; +import com.conveyal.gtfs.validator.ValidationResult; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonInclude.Include; @@ -34,37 +33,50 @@ public class FeedValidationResultSummary implements Serializable { /** The first date the feed has service, either in calendar.txt or calendar_dates.txt */ @JsonInclude(Include.ALWAYS) - public Date startDate; + public LocalDate startDate; /** The last date the feed has service, either in calendar.txt or calendar_dates.txt */ @JsonInclude(Include.ALWAYS) - public Date endDate; + public LocalDate endDate; @JsonInclude(Include.ALWAYS) - public Rectangle2D bounds; + public Bounds bounds; /** * Construct a summarized version of the given FeedValidationResult. - * @param result + * @param validationResult */ - public FeedValidationResultSummary (FeedValidationResult result) { - if (result != null) { - this.loadStatus = result.loadStatus; - this.loadFailureReason = result.loadFailureReason; - this.agencies = result.agencies; - + public FeedValidationResultSummary (ValidationResult validationResult, FeedLoadResult feedLoadResult) { + if (validationResult != null) { + this.loadStatus = validationResult.fatalException == null + ? LoadStatus.SUCCESS + : LoadStatus.OTHER_FAILURE; + this.loadFailureReason = validationResult.fatalException; if (loadStatus == LoadStatus.SUCCESS) { - this.errorCount = result.errorCount; - this.agencyCount = result.agencyCount; - this.routeCount = result.routeCount; - this.tripCount = result.tripCount; - this.stopTimesCount = result.stopTimesCount; - this.startDate = result.startDate != null ? Date.from(result.startDate.atStartOfDay(ZoneId.systemDefault()).toInstant()) : null; - this.endDate = result.endDate != null ? Date.from(result.endDate.atStartOfDay(ZoneId.systemDefault()).toInstant()) : null; - this.bounds = result.bounds; - this.avgDailyRevenueTime = result.avgDailyRevenueTime; + if (feedLoadResult == null) { + feedLoadResult = new FeedLoadResult(true); + } + this.errorCount = validationResult.errorCount; + this.agencyCount = feedLoadResult.agency.rowCount; + this.routeCount = feedLoadResult.routes.rowCount; + this.tripCount = feedLoadResult.trips.rowCount; + this.stopTimesCount = feedLoadResult.stopTimes.rowCount; + this.startDate = validationResult.firstCalendarDate; + this.endDate = validationResult.lastCalendarDate; + this.bounds = boundsFromValidationResult(validationResult); + // FIXME: compute avg revenue time +// this.avgDailyRevenueTime = validationResult.avgDailyRevenueTime; } } } + + private static Bounds boundsFromValidationResult (ValidationResult result) { + Bounds bounds = new Bounds(); + bounds.north = result.fullBounds.maxLat; + bounds.south = result.fullBounds.minLat; + bounds.east = result.fullBounds.maxLon; + bounds.west = result.fullBounds.minLon; + return bounds; + } } \ No newline at end of file diff --git a/src/main/java/com/conveyal/datatools/manager/models/FeedVersion.java b/src/main/java/com/conveyal/datatools/manager/models/FeedVersion.java index 6e90355f4..c337559eb 100644 --- a/src/main/java/com/conveyal/datatools/manager/models/FeedVersion.java +++ b/src/main/java/com/conveyal/datatools/manager/models/FeedVersion.java @@ -3,58 +3,38 @@ import java.awt.geom.Rectangle2D; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; -import java.io.OutputStream; import java.io.Serializable; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.time.LocalDate; -import java.util.ArrayList; -import java.util.Collection; import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import com.amazonaws.AmazonServiceException; -import com.amazonaws.services.s3.model.AmazonS3Exception; -import com.amazonaws.services.s3.model.GetObjectRequest; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.S3Object; + +import com.conveyal.datatools.common.status.MonitorableJob; import com.conveyal.datatools.manager.DataManager; -import com.conveyal.datatools.manager.controllers.api.GtfsApiController; -import com.conveyal.datatools.manager.persistence.DataStore; import com.conveyal.datatools.manager.persistence.FeedStore; +import com.conveyal.datatools.manager.persistence.Persistence; import com.conveyal.datatools.manager.utils.HashUtils; -import com.conveyal.gtfs.GTFSFeed; -import com.conveyal.gtfs.validator.json.LoadStatus; -import com.conveyal.gtfs.stats.FeedStats; -import com.conveyal.r5.common.R5Version; -import com.conveyal.r5.point_to_point.builder.TNBuilderConfig; -import com.conveyal.r5.transit.TransportNetwork; +import com.conveyal.gtfs.BaseGTFSCache; +import com.conveyal.gtfs.GTFS; +import com.conveyal.gtfs.loader.Feed; +import com.conveyal.gtfs.loader.FeedLoadResult; +import com.conveyal.gtfs.validator.ValidationResult; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.eventbus.EventBus; -import com.vividsolutions.jts.geom.Geometry; -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.IOUtils; -import org.geotools.geojson.geom.GeometryJSON; -import org.mapdb.Fun.Tuple2; - -import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.bson.codecs.pojo.annotations.BsonProperty; + import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonInclude.Include; import com.fasterxml.jackson.annotation.JsonView; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static com.conveyal.datatools.manager.models.Deployment.getOsmExtract; import static com.conveyal.datatools.manager.utils.StringUtils.getCleanName; -import static spark.Spark.halt; +import static com.mongodb.client.model.Filters.and; +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.client.model.Updates.pull; /** * Represents a version of a feed. @@ -65,16 +45,11 @@ @JsonIgnoreProperties(ignoreUnknown = true) public class FeedVersion extends Model implements Serializable { private static final long serialVersionUID = 1L; - private static ObjectMapper mapper = new ObjectMapper(); - public static final Logger LOG = LoggerFactory.getLogger(FeedVersion.class); - public static final String validationSubdir = "validation/"; - static DataStore versionStore = new DataStore<>("feedversions"); - private static FeedStore feedStore = new FeedStore(); - - static { - // set up indexing on feed versions by feed source, indexed by - versionStore.secondaryKey("version", (key, fv) -> new Tuple2(fv.feedSourceId, fv.version)); - } + private static final String VERSION_ID_DATE_FORMAT = "yyyyMMdd'T'HHmmssX"; + private static final String HUMAN_READABLE_TIMESTAMP_FORMAT = "MM/dd/yyyy H:mm"; + private static final Logger LOG = LoggerFactory.getLogger(FeedVersion.class); + // FIXME: move this out of FeedVersion (also, it should probably not be public)? + public static FeedStore feedStore = new FeedStore(); /** * We generate IDs manually, but we need a bit of information to do so @@ -82,23 +57,19 @@ public class FeedVersion extends Model implements Serializable { public FeedVersion(FeedSource source) { this.updated = new Date(); this.feedSourceId = source.id; + this.name = formattedTimestamp() + " Version"; + this.id = generateFeedVersionId(source); + int count = source.feedVersionCount(); + this.version = count + 1; + } + private String generateFeedVersionId(FeedSource source) { // ISO time - DateFormat df = new SimpleDateFormat("yyyyMMdd'T'HHmmssX"); + DateFormat df = new SimpleDateFormat(VERSION_ID_DATE_FORMAT); // since we store directly on the file system, this lets users look at the DB directly - this.id = getCleanName(source.name) + "-" + df.format(this.updated) + "-" + source.id + ".zip"; - - // infer the version -// FeedVersion prev = source.getLatest(); -// if (prev != null) { -// this.version = prev.version + 1; -// } -// else { -// this.version = 1; -// } - int count = source.getFeedVersionCount(); - this.version = count + 1; + // TODO: no need to BaseGTFSCache.cleanId once we rely on GTFSCache to store the feed. + return BaseGTFSCache.cleanId(getCleanName(source.name) + "-" + df.format(this.updated) + "-" + source.id) + ".zip"; } /** @@ -114,33 +85,49 @@ public FeedVersion() { @JsonView(JsonViews.DataDump.class) public String feedSourceId; - @JsonIgnore - public transient TransportNetwork transportNetwork; + public FeedSource.FeedRetrievalMethod retrievalMethod; @JsonView(JsonViews.UserInterface.class) - public FeedSource getFeedSource() { - return FeedSource.get(feedSourceId); + @JsonProperty("feedSource") + public FeedSource parentFeedSource() { + return Persistence.feedSources.getById(feedSourceId); } - @JsonIgnore - public FeedVersion getPreviousVersion() { - return versionStore.find("version", new Tuple2(this.feedSourceId, this.version - 1)); + /** + * Finds the previous version (i.e., the version loaded directly before the current version in time order). + * @return the previous feed version or null if this is the first version + */ + public FeedVersion previousVersion() { + return Persistence.feedVersions.getOneFiltered(and( + eq("version", this.version - 1), eq("feedSourceId", this.feedSourceId)), null); } + /** + * JSON view to show the previous version ID. + */ @JsonView(JsonViews.UserInterface.class) - public String getPreviousVersionId() { - FeedVersion p = getPreviousVersion(); + @JsonProperty("previousVersionId") + public String previousVersionId() { + FeedVersion p = previousVersion(); return p != null ? p.id : null; } - @JsonIgnore - public FeedVersion getNextVersion() { - return versionStore.find("version", new Tuple2(this.feedSourceId, this.version + 1)); + /** + * Finds the next version (i.e., the version loaded directly after the current version in time order). + * @return the next feed version or null if this is the latest version + */ + public FeedVersion nextVersion() { + return Persistence.feedVersions.getOneFiltered(and( + eq("version", this.version + 1), eq("feedSourceId", this.feedSourceId)), null); } + /** + * JSON view to show the next version ID. + */ @JsonView(JsonViews.UserInterface.class) - public String getNextVersionId() { - FeedVersion p = getNextVersion(); + @JsonProperty("nextVersionId") + public String nextVersionId() { + FeedVersion p = nextVersion(); return p != null ? p.id : null; } @@ -150,44 +137,39 @@ public String getNextVersionId() { @JsonView(JsonViews.DataDump.class) public String hash; - @JsonIgnore - public File getGtfsFile() { + public File retrieveGtfsFile() { return feedStore.getFeed(id); } public File newGtfsFile(InputStream inputStream) { - File file = feedStore.newFeed(id, inputStream, getFeedSource()); + File file = feedStore.newFeed(id, inputStream, parentFeedSource()); + // fileSize field will not be stored until new FeedVersion is stored in MongoDB (usually in + // the final steps of ValidateFeedJob). this.fileSize = file.length(); - this.save(); - LOG.info("New GTFS file saved: {}", id); + LOG.info("New GTFS file saved: {} ({} bytes)", id, this.fileSize); return file; } - public File newGtfsFile(InputStream inputStream, Long lastModified) { - File file = newGtfsFile(inputStream); - if (lastModified != null) { - this.fileTimestamp = lastModified; - file.setLastModified(lastModified); - } - else { - this.fileTimestamp = file.lastModified(); + + /** + * Construct a connection to the SQL tables for this feed version's namespace to access its stored GTFS data. + */ + public Feed retrieveFeed() { + if (feedLoadResult != null) { + return new Feed(DataManager.GTFS_DATA_SOURCE, feedLoadResult.uniqueIdentifier); + } else { + return null; } - this.save(); - return file; - } - @JsonIgnore - public GTFSFeed getGtfsFeed() { - String apiId = id.replace(".zip", ""); -// return DataManager.gtfsCache.get(apiId); - return GtfsApiController.gtfsApi.getFeedSource(apiId).feed; } /** The results of validating this feed */ - @JsonView(JsonViews.DataDump.class) - public FeedValidationResult validationResult; + public ValidationResult validationResult; + + public FeedLoadResult feedLoadResult; @JsonView(JsonViews.UserInterface.class) - public FeedValidationResultSummary getValidationSummary() { - return new FeedValidationResultSummary(validationResult); + @BsonProperty("validationSummary") + public FeedValidationResultSummary validationSummary() { + return new FeedValidationResultSummary(validationResult, feedLoadResult); } @@ -200,225 +182,125 @@ public FeedValidationResultSummary getValidationSummary() { /** A name for this version. Defaults to creation date if not specified by user */ public String name; + /** The size of the original GTFS file uploaded/fetched */ public Long fileSize; + /** The last modified timestamp of the original GTFS file uploaded/fetched */ public Long fileTimestamp; - public String getName() { - return name != null ? name : (getFormattedTimestamp() + " Version"); - } + /** SQL namespace for GTFS data */ + public String namespace; - @JsonIgnore - public String getFormattedTimestamp() { - SimpleDateFormat format = new SimpleDateFormat("MM/dd/yyyy H:mm"); - return format.format(this.updated); - } - - public static FeedVersion get(String id) { - return versionStore.getById(id); - } + /** + * Indicates whether a feed version is pending published status, a check that is currently performed in + * {@link com.conveyal.datatools.manager.jobs.FeedUpdater}. This field is currently in use only for the MTC extension. + * */ + public boolean processing; - public static Collection getAll() { - return versionStore.getAll(); + public String formattedTimestamp() { + SimpleDateFormat format = new SimpleDateFormat(HUMAN_READABLE_TIMESTAMP_FORMAT); + return format.format(this.updated); } - public void validate(EventBus eventBus) { - if (eventBus == null) { - eventBus = new EventBus(); - } - Map statusMap = new HashMap<>(); - GTFSFeed gtfsFeed; + public void load(MonitorableJob.Status status, boolean isNewVersion) { + File gtfsFile; + // STEP 1. LOAD GTFS feed into relational database try { - statusMap.put("message", "Unpacking feed..."); - statusMap.put("percentComplete", 15.0); - statusMap.put("error", false); - eventBus.post(statusMap); - - /* First getGtfsFeed() call triggers feed load from zip file into gtfsCache - This may take a while for large feeds */ - gtfsFeed = getGtfsFeed(); + status.update(false,"Unpacking feed...", 15.0); + // Get SQL schema namespace for the feed version. This is needed for reconnecting with feeds + // in the database. + gtfsFile = retrieveGtfsFile(); + if (gtfsFile.length() == 0) { + throw new IOException("Empty GTFS file supplied"); + } + // If feed version has not been hashed, hash it here. + if (hash == null) hash = HashUtils.hashFile(gtfsFile); + String gtfsFilePath = gtfsFile.getPath(); + this.feedLoadResult = GTFS.load(gtfsFilePath, DataManager.GTFS_DATA_SOURCE); + // FIXME? duplication of namespace (also stored as feedLoadResult.uniqueIdentifier) + this.namespace = feedLoadResult.uniqueIdentifier; + LOG.info("Loaded GTFS into SQL {}", feedLoadResult.uniqueIdentifier); } catch (Exception e) { - String errorString = String.format("No GTFS feed exists for version: %s", this.id); - LOG.warn(errorString); - statusMap.put("message", errorString); - statusMap.put("percentComplete", 0.0); - statusMap.put("error", true); - eventBus.post(statusMap); + String errorString = String.format("Error loading GTFS feed for version: %s", this.id); + LOG.warn(errorString, e); + status.update(true, errorString, 0); + // FIXME: Delete local copy of feed version after failed load? return; } - if(gtfsFeed == null) { - String errorString = String.format("Could not get GTFSFeed object for FeedVersion %s", id); - LOG.warn(errorString); -// eventBus.post(new StatusEvent(errorString, 0, true)); - statusMap.put("message", errorString); - statusMap.put("percentComplete", 0.0); - statusMap.put("error", true); - eventBus.post(statusMap); + // FIXME: is this the right approach? + // if load was unsuccessful, update status and return + if(this.feedLoadResult == null) { + String errorString = String.format("Could not load GTFS for FeedVersion %s", id); + LOG.error(errorString); + status.update(true, errorString, 0); + // FIXME: Delete local copy of feed version after failed load? return; } - Map tripsPerDate; - - try { - // make feed public... this shouldn't take very long - FeedSource fs = this.getFeedSource(); - if (fs.isPublic) { - fs.makePublic(); - } -// eventBus.post(new StatusEvent("Validating feed...", 30, false)); - statusMap.put("message", "Validating feed..."); - statusMap.put("percentComplete", 30.0); - statusMap.put("error", false); - eventBus.post(statusMap); - LOG.info("Beginning validation..."); - gtfsFeed.validate(); - LOG.info("Calculating stats..."); - FeedStats stats = gtfsFeed.calculateStats(); - validationResult = new FeedValidationResult(gtfsFeed, stats); - LOG.info("Total errors after validation: {}", validationResult.errorCount); + // STEP 2. Upload GTFS to S3 (storage on local machine is done when feed is fetched/uploaded) + if (DataManager.useS3) { try { - // This may take a while for very large feeds. - LOG.info("Calculating # of trips per date of service"); - tripsPerDate = stats.getTripCountPerDateOfService(); - - // get revenue time in seconds for Tuesdays in feed - stats.getAverageDailyRevenueTime(2); - }catch (Exception e) { - e.printStackTrace(); - statusMap.put("message", "Unable to validate feed."); - statusMap.put("percentComplete", 0.0); - statusMap.put("error", true); - eventBus.post(statusMap); + boolean fileUploaded = false; + if (isNewVersion) { + // Only upload file to S3 if it is a new version (otherwise, it would have been downloaded from here. + fileUploaded = FeedVersion.feedStore.uploadToS3(gtfsFile, this.id, this.parentFeedSource()); + } + if (fileUploaded || !isNewVersion) { + // Note: If feed is not a new version, it is presumed to already exist on S3, so uploading is not required. + // Delete local copy of feed version after successful s3 upload + boolean fileDeleted = gtfsFile.delete(); + if (fileDeleted) { + LOG.info("Local GTFS file deleted after s3 upload"); + } else { + LOG.error("Local GTFS file failed to delete. Server may encounter storage capacity issues!"); + } + } else { + LOG.error("Local GTFS file not uploaded not successfully to s3!"); + } + // FIXME: should this happen here? + FeedSource fs = parentFeedSource(); + if (fs.isPublic) { + // make feed version public... this shouldn't take very long + fs.makePublic(); + } + } catch (Exception e) { + LOG.error("Could not upload version {} to s3 bucket", this.id); e.printStackTrace(); -// this.validationResult = null; - validationResult.loadStatus = LoadStatus.OTHER_FAILURE; - return; } - } catch (Exception e) { - LOG.error("Unable to validate feed {}", this.id); -// eventBus.post(new StatusEvent("Unable to validate feed.", 0, true)); - statusMap.put("message", "Unable to validate feed."); - statusMap.put("percentComplete", 0.0); - statusMap.put("error", true); - eventBus.post(statusMap); - e.printStackTrace(); -// this.validationResult = null; - validationResult.loadStatus = LoadStatus.OTHER_FAILURE; -// halt(400, "Error validating feed..."); - return; - } - - File tempFile = null; - try { -// eventBus.post(new StatusEvent("Saving validation results...", 80, false)); - statusMap.put("message", "Saving validation results..."); - statusMap.put("percentComplete", 80.0); - statusMap.put("error", false); - eventBus.post(statusMap); - // Use tempfile - tempFile = File.createTempFile(this.id, ".json"); - tempFile.deleteOnExit(); - Map validation = new HashMap<>(); - validation.put("errors", gtfsFeed.errors); - validation.put("tripsPerDate", tripsPerDate); - GeometryJSON g = new GeometryJSON(); - Geometry buffers = gtfsFeed.getMergedBuffers(); - validation.put("mergedBuffers", buffers != null ? g.toString(buffers) : null); - mapper.writeValue(tempFile, validation); - } catch (IOException e) { - e.printStackTrace(); } - saveValidationResult(tempFile); } - @JsonIgnore - public JsonNode getValidationResult(boolean revalidate) { - if (revalidate) { - LOG.warn("Revalidation requested. Validating feed."); - this.validate(); - this.save(); - halt(503, "Try again later. Validating feed"); - } - String keyName = validationSubdir + this.id + ".json"; - InputStream objectData = null; - if (DataManager.feedBucket != null && DataManager.useS3) { - try { - LOG.info("Getting validation results from s3"); - S3Object object = FeedStore.s3Client.getObject(new GetObjectRequest(DataManager.feedBucket, keyName)); - objectData = object.getObjectContent(); - } catch (AmazonS3Exception e) { - // if json file does not exist, validate feed. - this.validate(); - this.save(); - halt(503, "Try again later. Validating feed"); - } catch (AmazonServiceException ase) { - LOG.error("Error downloading from s3"); - ase.printStackTrace(); - } + /** + * Validate a version of GTFS. This method actually does a little more processing than just validation. + * Because validate() is run on all GTFS feeds whether they're fetched, created from an editor snapshot, + * uploaded manually, or god knows however else, we need a single function to handle loading a feed into + * the relational database, validating the data, and storing that validation result. Since those + * processes more or less happen in a tight sequence, we handle all of that here. + * + * This function is called in the job logic of a MonitorableJob. When the job is complete, the validated + * FeedVersion will be stored in MongoDB along with the ValidationResult and other fields populated during + * validation. + */ + public void validate(MonitorableJob.Status status) { - } - // if s3 upload set to false - else { - File file = new File(FeedStore.basePath + "/" + keyName); - try { - objectData = new FileInputStream(file); - } catch (Exception e) { - LOG.warn("Validation does not exist. Validating feed."); - this.validate(); - this.save(); - halt(503, "Try again later. Validating feed"); - } - } - return ensureValidationIsCurrent(objectData); - } + // Sometimes this method is called when no status object is available. + if (status == null) status = new MonitorableJob.Status(); - private JsonNode ensureValidationIsCurrent(InputStream objectData) { - JsonNode n; - // Process the objectData stream. + // VALIDATE GTFS feed try { - n = mapper.readTree(objectData); - if (!n.has("errors") || !n.has("tripsPerDate")) { - throw new Exception("Validation for feed version not up to date"); - } - return n; - } catch (IOException e) { - // if json file does not exist, validate feed. - this.validate(); - this.save(); - halt(503, "Try again later. Validating feed"); + LOG.info("Beginning validation..."); + // run validation on feed version + // FIXME: pass status to validate? Or somehow listen to events? + status.update("Validating feed...", 33); + validationResult = GTFS.validate(feedLoadResult.uniqueIdentifier, DataManager.GTFS_DATA_SOURCE); } catch (Exception e) { - e.printStackTrace(); - this.validate(); - this.save(); - halt(503, "Try again later. Validating feed"); - } - return null; - } - - private void saveValidationResult(File file) { - String keyName = validationSubdir + this.id + ".json"; - - // upload to S3, if we have bucket name and use s3 storage - if(DataManager.feedBucket != null && DataManager.useS3) { - try { - LOG.info("Uploading validation json to S3"); - FeedStore.s3Client.putObject(new PutObjectRequest( - DataManager.feedBucket, keyName, file)); - } catch (AmazonServiceException ase) { - LOG.error("Error uploading validation json to S3", ase); - } - } - // save to validation directory in gtfs folder - else { - File validationDir = new File(FeedStore.basePath + "/" + validationSubdir); - // ensure directory exists - validationDir.mkdir(); - try { - FileUtils.copyFile(file, new File(FeedStore.basePath + "/" + keyName)); - } catch (IOException e) { - LOG.error("Error saving validation json to local disk", e); - } + String message = String.format("Unable to validate feed %s", this.id); + LOG.error(message, e); + status.update(true, message, 100, true); + // FIXME create validation result with new constructor? + validationResult = new ValidationResult(); + validationResult.fatalException = "failure!"; } } @@ -426,220 +308,92 @@ public void validate() { validate(null); } - public void save () { - save(true); - } - - public void save(boolean commit) { - if (commit) - versionStore.save(this.id, this); - else - versionStore.saveWithoutCommit(this.id, this); - } - public void hash () { - this.hash = HashUtils.hashFile(getGtfsFile()); - } - - public static void commit() { - versionStore.commit(); - } - public TransportNetwork buildTransportNetwork(EventBus eventBus) { - // return null if validation result is null (probably means something went wrong with validation, plus we won't have feed bounds). - if (this.validationResult == null) { - return null; - } - - if (eventBus == null) { - eventBus = new EventBus(); - } - - // Fetch OSM extract - Map statusMap = new HashMap<>(); - statusMap.put("message", "Fetching OSM extract..."); - statusMap.put("percentComplete", 10.0); - statusMap.put("error", false); - eventBus.post(statusMap); - - Rectangle2D bounds = this.validationResult.bounds; - - if (bounds == null) { - String message = String.format("Could not build network for %s because feed bounds are unknown.", this.id); - LOG.warn(message); - statusMap.put("message", message); - statusMap.put("percentComplete", 10.0); - statusMap.put("error", true); - eventBus.post(statusMap); - return null; - } - - File osmExtract = getOSMFile(bounds); - if (!osmExtract.exists()) { - InputStream is = getOsmExtract(this.validationResult.bounds); - OutputStream out; - try { - out = new FileOutputStream(osmExtract); - IOUtils.copy(is, out); - is.close(); - } catch (IOException e) { - e.printStackTrace(); - } - } - - // Create/save r5 network - statusMap.put("message", "Creating transport network..."); - statusMap.put("percentComplete", 50.0); - statusMap.put("error", false); - eventBus.post(statusMap); - - List feedList = new ArrayList<>(); - feedList.add(getGtfsFeed()); - TransportNetwork tn; - try { - tn = TransportNetwork.fromFeeds(osmExtract.getAbsolutePath(), feedList, TNBuilderConfig.defaultConfig()); - } catch (Exception e) { - String message = String.format("Unknown error encountered while building network for %s.", this.id); - LOG.warn(message); - statusMap.put("message", message); - statusMap.put("percentComplete", 100.0); - statusMap.put("error", true); - eventBus.post(statusMap); - e.printStackTrace(); - return null; - } - this.transportNetwork = tn; - this.transportNetwork.transitLayer.buildDistanceTables(null); - File tnFile = getTransportNetworkPath(); - try { - tn.write(tnFile); - return transportNetwork; - } catch (IOException e) { - e.printStackTrace(); - } - return null; + this.hash = HashUtils.hashFile(retrieveGtfsFile()); } - @JsonIgnore - public static File getOSMFile(Rectangle2D bounds) { + /** + * Get the OSM file for the given bounds if it exists on disk. + * + * FIXME: Use osm-lib to handle caching OSM data. + */ + private static File retrieveCachedOSMFile(Rectangle2D bounds) { if (bounds != null) { String baseDir = FeedStore.basePath.getAbsolutePath() + File.separator + "osm"; File osmPath = new File(String.format("%s/%.6f_%.6f_%.6f_%.6f", baseDir, bounds.getMaxX(), bounds.getMaxY(), bounds.getMinX(), bounds.getMinY())); if (!osmPath.exists()) { osmPath.mkdirs(); } - File osmFile = new File(osmPath.getAbsolutePath() + "/data.osm.pbf"); - return osmFile; - } - else { + return new File(osmPath.getAbsolutePath() + "/data.osm.pbf"); + } else { return null; } } - public TransportNetwork buildTransportNetwork() { - return buildTransportNetwork(null); - } - - /** * Does this feed version have any critical errors that would prevent it being loaded to OTP? - * @return + * @return whether feed version has critical errors */ public boolean hasCriticalErrors() { - if (hasCriticalErrorsExceptingDate() || (LocalDate.now()).isAfter(validationResult.endDate)) - return true; - - else - return false; + return hasCriticalErrorsExceptingDate() || + validationResult.lastCalendarDate == null || + (LocalDate.now()).isAfter(validationResult.lastCalendarDate); } /** * Does this feed have any critical errors other than possibly being expired? + * @return whether feed version has critical errors (outside of expiration) */ - public boolean hasCriticalErrorsExceptingDate () { + private boolean hasCriticalErrorsExceptingDate() { if (validationResult == null) return true; - if (validationResult.loadStatus != LoadStatus.SUCCESS) - return true; - - if (validationResult.stopTimesCount == 0 || validationResult.tripCount == 0 || validationResult.agencyCount == 0) - return true; + return validationResult.fatalException != null || + !validationSummary().bounds.areValid() || + feedLoadResult.stopTimes.rowCount == 0 || + feedLoadResult.trips.rowCount == 0 || + feedLoadResult.agency.rowCount == 0; - return false; } @JsonView(JsonViews.UserInterface.class) - public int getNoteCount() { + @JsonProperty("noteCount") + public int noteCount() { return this.noteIds != null ? this.noteIds.size() : 0; } - @JsonInclude(Include.NON_NULL) - @JsonView(JsonViews.UserInterface.class) - public Long getFileTimestamp() { - if (fileTimestamp != null) { - return fileTimestamp; - } - - this.fileTimestamp = feedStore.getFeedLastModified(id); - this.save(); - - return this.fileTimestamp; - } - - @JsonInclude(Include.NON_NULL) - @JsonView(JsonViews.UserInterface.class) - public Long getFileSize() { - if (fileSize != null) { - return fileSize; - } - - this.fileSize = feedStore.getFeedSize(id); - this.save(); - - return fileSize; - } - /** - * Delete this feed version. + * Delete this feed version and clean up, removing references to it and derived objects and state. + * Steps: + * 1. If we are deleting the latest version, change the memoized "last fetched" value in the FeedSource. + * 2. Delete the GTFS Zip file locally or on S3 + * 3. Remove this feed version from all Deployments [shouldn't we be updating the version rather than deleting it?] + * 4. Remove the transport network file from the local disk + * 5. Finally delete the version object from the database. */ public void delete() { try { // reset lastModified if feed is latest version - System.out.println("deleting version"); + LOG.info("Deleting feed version {}", this.id); String id = this.id; - FeedSource fs = getFeedSource(); - FeedVersion latest = fs.getLatest(); + FeedSource fs = parentFeedSource(); + FeedVersion latest = fs.retrieveLatest(); if (latest != null && latest.id.equals(this.id)) { - fs.lastFetched = null; - fs.save(); + // Even if there are previous feed versions, we set to null to allow re-fetching the version that was just deleted + // TODO instead, set it to the fetch time of the previous feed version + Persistence.feedSources.update(fs.id, "{lastFetched:null}"); } feedStore.deleteFeed(id); - - for (Deployment d : Deployment.getAll()) { - d.feedVersionIds.remove(this.id); - } - - getTransportNetworkPath().delete(); - - - versionStore.delete(this.id); + // Remove this FeedVersion from all Deployments associated with this FeedVersion's FeedSource's Project + // TODO TEST THOROUGHLY THAT THIS UPDATE EXPRESSION IS CORRECT + // Although outright deleting the feedVersion from deployments could be surprising and shouldn't be done anyway. + Persistence.deployments.getMongoCollection().updateMany(eq("projectId", this.parentFeedSource().projectId), + pull("feedVersionIds", this.id)); + Persistence.feedVersions.removeById(this.id); + this.parentFeedSource().renumberFeedVersions(); LOG.info("Version {} deleted", id); } catch (Exception e) { LOG.warn("Error deleting version", e); } } - @JsonIgnore - public String getR5Path () { - // r5 networks MUST be stored in separate directories (in this case under feed source ID - // because of shared osm.mapdb used by r5 networks placed in same dir - File r5 = new File(String.join(File.separator, FeedStore.basePath.getAbsolutePath(), this.feedSourceId)); - if (!r5.exists()) { - r5.mkdirs(); - } - return r5.getAbsolutePath(); - } - @JsonIgnore - public File getTransportNetworkPath () { - return new File(String.join(File.separator, getR5Path(), id + "_" + R5Version.describe + "_network.dat")); - } } diff --git a/src/main/java/com/conveyal/datatools/manager/models/LoadStatus.java b/src/main/java/com/conveyal/datatools/manager/models/LoadStatus.java new file mode 100644 index 000000000..5034b74c3 --- /dev/null +++ b/src/main/java/com/conveyal/datatools/manager/models/LoadStatus.java @@ -0,0 +1,6 @@ +package com.conveyal.datatools.manager.models; + +/** Why a GTFS feed failed to load */ +public enum LoadStatus { + SUCCESS, INVALID_ZIP_FILE, OTHER_FAILURE, MISSING_REQUIRED_FIELD, INCORRECT_FIELD_COUNT_IMPROPER_QUOTING; +} diff --git a/src/main/java/com/conveyal/datatools/manager/models/Model.java b/src/main/java/com/conveyal/datatools/manager/models/Model.java index de918f9e5..d9e073c60 100644 --- a/src/main/java/com/conveyal/datatools/manager/models/Model.java +++ b/src/main/java/com/conveyal/datatools/manager/models/Model.java @@ -1,11 +1,15 @@ package com.conveyal.datatools.manager.models; +import com.conveyal.datatools.manager.auth.Auth0Connection; import com.conveyal.datatools.manager.auth.Auth0Users; +import com.conveyal.datatools.manager.persistence.Persistence; import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonView; import java.io.Serializable; import java.util.ArrayList; +import java.util.Date; import java.util.List; import java.util.UUID; @@ -18,7 +22,7 @@ * @author mattwigway */ -@MappedSuperclass +@MappedSuperclass // applies mapping information to the subclassed entities FIXME remove? public abstract class Model implements Serializable { private static final long serialVersionUID = 1L; @@ -26,10 +30,17 @@ public Model () { // This autogenerates an ID // this is OK for dump/restore, because the ID will simply be overridden this.id = UUID.randomUUID().toString(); + this.lastUpdated = new Date(); + this.dateCreated = new Date(); } public String id; + // FIXME: should this be stored here? Should we use lastUpdated as a nonce to protect against race conditions in DB + // writes? + public Date lastUpdated; + public Date dateCreated; + /** * The ID of the user who owns this object. * For accountability, every object is owned by a user. @@ -37,7 +48,8 @@ public Model () { @JsonView(JsonViews.DataDump.class) public String userId; - private String userEmail; + @JsonView(JsonViews.DataDump.class) + public String userEmail; /** * Notes on this object @@ -50,12 +62,12 @@ public Model () { */ // notes are handled through a separate controller and in a separate DB @JsonIgnore - public List getNotes() { - ArrayList ret = new ArrayList(noteIds != null ? noteIds.size() : 0); + public List retrieveNotes() { + ArrayList ret = new ArrayList<>(noteIds != null ? noteIds.size() : 0); if (noteIds != null) { for (String id : noteIds) { - ret.add(Note.get(id)); + ret.add(Persistence.notes.getById(id)); } } @@ -66,35 +78,38 @@ public List getNotes() { * Get the user who owns this object. * @return the String user_id */ - @JsonView(JsonViews.UserInterface.class) - public String getUser () { + @JsonProperty("user") + public String user () { return this.userEmail; } /** * Set the owner of this object */ - public void setUser (Auth0UserProfile profile) { + public void storeUser(Auth0UserProfile profile) { userId = profile.getUser_id(); userEmail = profile.getEmail(); } /** - * Set the owner of this object by Id + * Set the owner of this object by ID. */ - public void setUserById (String id) { + public void storeUser(String id) { userId = id; - Auth0UserProfile profile = Auth0Users.getUserById(userId); - userEmail = profile != null ? profile.getEmail() : null; + if (!Auth0Connection.authDisabled()) { + Auth0UserProfile profile = Auth0Users.getUserById(userId); + userEmail = profile != null ? profile.getEmail() : null; + } else { + userEmail = "no_auth@conveyal.com"; + } + } public void addNote(Note n) { if (noteIds == null) { - noteIds = new ArrayList(); + noteIds = new ArrayList<>(); } noteIds.add(n.id); n.objectId = this.id; } - - public abstract void save(); } diff --git a/src/main/java/com/conveyal/datatools/manager/models/Note.java b/src/main/java/com/conveyal/datatools/manager/models/Note.java index 00a1c4f15..1d7e10a8c 100644 --- a/src/main/java/com/conveyal/datatools/manager/models/Note.java +++ b/src/main/java/com/conveyal/datatools/manager/models/Note.java @@ -1,12 +1,10 @@ package com.conveyal.datatools.manager.models; -import com.conveyal.datatools.manager.persistence.DataStore; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonInclude.Include; import java.io.Serializable; -import java.util.Collection; import java.util.Date; /** @@ -19,8 +17,6 @@ public class Note extends Model implements Serializable { private static final long serialVersionUID = 1L; - private static DataStore noteStore = new DataStore<>("notes"); - /** The content of the note */ public String body; @@ -35,33 +31,10 @@ public class Note extends Model implements Serializable { /** When was this comment made? */ public Date date; - public void save () { - save(true); - } - - public void save (boolean commit) { - if (commit) - noteStore.save(id, this); - else - noteStore.saveWithoutCommit(id, this); - } - - public static Note get (String id) { - return noteStore.getById(id); - } - /** * The types of object that can have notes recorded on them. */ public static enum NoteType { FEED_VERSION, FEED_SOURCE } - - public static Collection getAll() { - return noteStore.getAll(); - } - - public static void commit() { - noteStore.commit(); - } } diff --git a/src/main/java/com/conveyal/datatools/manager/models/Organization.java b/src/main/java/com/conveyal/datatools/manager/models/Organization.java index a7321d0d8..381d7cf07 100644 --- a/src/main/java/com/conveyal/datatools/manager/models/Organization.java +++ b/src/main/java/com/conveyal/datatools/manager/models/Organization.java @@ -1,25 +1,29 @@ package com.conveyal.datatools.manager.models; -import com.conveyal.datatools.manager.persistence.DataStore; -import com.fasterxml.jackson.annotation.JsonIgnore; +import com.conveyal.datatools.manager.persistence.Persistence; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.fasterxml.jackson.annotation.JsonView; +import com.fasterxml.jackson.annotation.JsonProperty; import java.io.Serializable; -import java.util.ArrayList; import java.util.Collection; import java.util.Date; import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; +import static com.mongodb.client.model.Filters.eq; + /** - * Created by landon on 1/30/17. + * An organization represents a group of users and projects (with contained feed sources). Currently, a user can only + * belong to one organization (although a future aim may be to extend this) and a project can only belong to one + * organization. + * + * Organizations are primarily intended to help organize multi-tenant instances where there needs to be separation + * between administrative control over users and projects. */ @JsonIgnoreProperties(ignoreUnknown = true) public class Organization extends Model implements Serializable { private static final long serialVersionUID = 1L; - private static DataStore organizationStore = new DataStore<>("organizations"); public String name; public String logoUrl; @@ -31,43 +35,18 @@ public class Organization extends Model implements Serializable { public Organization () {} - public void save () { - save(true); - } - - public void save(boolean commit) { - if (commit) - organizationStore.save(id, this); - else - organizationStore.saveWithoutCommit(id, this); - } - - public static Organization get (String id) { - return organizationStore.getById(id); - } - - public static Collection getAll() { - return organizationStore.getAll(); - } - - public static void commit() { - organizationStore.commit(); - } - - public void delete() { - organizationStore.delete(this.id); - } - - public Collection getProjects() { - return Project.getAll().stream().filter(p -> id.equals(p.organizationId)).collect(Collectors.toList()); + @JsonProperty("projects") + public Collection projects() { + return Persistence.projects.getFiltered(eq("organizationId", id)); } - public long getTotalServiceSeconds () { - return getProjects().stream() - .map(p -> p.getProjectFeedSources()) - .flatMap(p -> p.stream()) - .filter(fs -> fs.getLatestValidation() != null) - .map(fs -> fs.getLatestValidation().avgDailyRevenueTime) + @JsonProperty("totalServiceSeconds") + public long totalServiceSeconds() { + return projects().stream() + .map(Project::retrieveProjectFeedSources) + .flatMap(Collection::stream) + .filter(fs -> fs.latestValidation() != null) + .map(fs -> fs.latestValidation().avgDailyRevenueTime) .mapToLong(Long::longValue) .sum(); } @@ -75,7 +54,7 @@ public long getTotalServiceSeconds () { /** * Created by landon on 1/30/17. */ - public static enum Extension { + public enum Extension { GTFS_PLUS, DEPLOYMENT, VALIDATOR, @@ -86,7 +65,7 @@ public static enum Extension { /** * Created by landon on 1/30/17. */ - public static enum UsageTier { + public enum UsageTier { LOW, MEDIUM, HIGH diff --git a/src/main/java/com/conveyal/datatools/manager/models/OtpBuildConfig.java b/src/main/java/com/conveyal/datatools/manager/models/OtpBuildConfig.java index 3db73c992..760a0a11f 100644 --- a/src/main/java/com/conveyal/datatools/manager/models/OtpBuildConfig.java +++ b/src/main/java/com/conveyal/datatools/manager/models/OtpBuildConfig.java @@ -1,15 +1,19 @@ package com.conveyal.datatools.manager.models; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; + import java.io.Serializable; /** * Created by demory on 3/8/15. */ - +@JsonIgnoreProperties(ignoreUnknown = true) public class OtpBuildConfig implements Serializable { private static final long serialVersionUID = 1L; public Boolean fetchElevationUS; + // FIXME: elevation bucket causing NPE issue if missing values when deploying to OTP + // public S3Bucket elevationBucket; public Boolean stationTransfers; @@ -17,4 +21,13 @@ public class OtpBuildConfig implements Serializable { /** Currently only supports no-configuration fares, e.g. New York or San Francisco */ public String fares; + + public OtpBuildConfig() {} + + public static class S3Bucket implements Serializable { + private static final long serialVersionUID = 1L; + public String accessKey; + public String secretKey; + public String bucketName; + } } diff --git a/src/main/java/com/conveyal/datatools/manager/models/OtpRouterConfig.java b/src/main/java/com/conveyal/datatools/manager/models/OtpRouterConfig.java index f1fa3c770..1a5b342a5 100644 --- a/src/main/java/com/conveyal/datatools/manager/models/OtpRouterConfig.java +++ b/src/main/java/com/conveyal/datatools/manager/models/OtpRouterConfig.java @@ -1,12 +1,14 @@ package com.conveyal.datatools.manager.models; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; + import java.io.Serializable; import java.util.Collection; /** * Created by demory on 3/8/15. */ - +@JsonIgnoreProperties(ignoreUnknown = true) public class OtpRouterConfig implements Serializable { private static final long serialVersionUID = 1L; public Integer numItineraries; @@ -20,7 +22,7 @@ public class OtpRouterConfig implements Serializable { public Collection updaters; public static class Updater implements Serializable { - + private static final long serialVersionUID = 1L; public String type; public Integer frequencySec; @@ -31,8 +33,6 @@ public static class Updater implements Serializable { public String defaultAgencyId; } - - public String brandingUrlRoot; - + public String requestLogFile; } diff --git a/src/main/java/com/conveyal/datatools/manager/models/OtpServer.java b/src/main/java/com/conveyal/datatools/manager/models/OtpServer.java index 490f76344..b9065ce4d 100644 --- a/src/main/java/com/conveyal/datatools/manager/models/OtpServer.java +++ b/src/main/java/com/conveyal/datatools/manager/models/OtpServer.java @@ -14,4 +14,14 @@ public class OtpServer implements Serializable { public Boolean admin; public String s3Bucket; public String s3Credentials; + + /** + * Convert the name field into a string with no special characters. + * + * FIXME: This is currently used to keep track of which deployments have been deployed to which servers (it is used + * for the {@link Deployment#deployedTo} field), but we should likely. + */ + public String target() { + return name != null ? name.replaceAll("[^a-zA-Z0-9]", "_") : null; + } } diff --git a/src/main/java/com/conveyal/datatools/manager/models/Project.java b/src/main/java/com/conveyal/datatools/manager/models/Project.java index 890b72936..dd65d9850 100644 --- a/src/main/java/com/conveyal/datatools/manager/models/Project.java +++ b/src/main/java/com/conveyal/datatools/manager/models/Project.java @@ -1,36 +1,34 @@ package com.conveyal.datatools.manager.models; -import com.conveyal.datatools.manager.persistence.DataStore; -import com.fasterxml.jackson.annotation.JsonIgnore; +import com.conveyal.datatools.manager.persistence.Persistence; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonInclude.Include; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import java.util.*; +import java.util.Collection; +import java.util.List; import java.util.stream.Collectors; +import static com.mongodb.client.model.Filters.eq; + /** * Represents a collection of feed sources that can be made into a deployment. * Generally, this would represent one agency that is managing the data. - * For now, there is one FeedCollection per instance of GTFS data manager, but + * For now, there is one Project per instance of GTFS data manager, but * we're trying to write the code in such a way that this is not necessary. * * @author mattwigway * */ -@JsonInclude(Include.ALWAYS) @JsonIgnoreProperties(ignoreUnknown = true) public class Project extends Model { private static final long serialVersionUID = 1L; + private static final Logger LOG = LoggerFactory.getLogger(Project.class); - private static DataStore projectStore = new DataStore<>("projects"); - - /** The name of this feed collection, e.g. NYSDOT. */ + /** The name of this project, e.g. NYSDOT. */ public String name; - public Boolean useCustomOsmBounds; - - public Double osmNorth, osmSouth, osmEast, osmWest; + public boolean useCustomOsmBounds; public OtpBuildConfig buildConfig; @@ -40,30 +38,30 @@ public class Project extends Model { public String organizationId; - @JsonIgnore - public OtpServer getServer (String name) { + /** + * Locate and return an OTP server contained within the project that matches the name argument. + */ + public OtpServer retrieveServer(String name) { + if (name == null) return null; for (OtpServer otpServer : otpServers) { - if (otpServer.name.equals(name)) { + if (otpServer.name == null) continue; + if (name.equals(otpServer.name) || name.equals(otpServer.target())) { return otpServer; } } + LOG.warn("Could not find OTP server with name {}", name); return null; } public String defaultTimeZone; - - public String defaultLanguage; - - //@JsonView - public Collection feedSources; - - public Double defaultLocationLat, defaultLocationLon; - public Boolean autoFetchFeeds; + public boolean autoFetchFeeds; public int autoFetchHour, autoFetchMinute; -// public Map boundingBox = new HashMap<>(); + public transient Collection feedSources; - public Double north, south, east, west; + // Bounds is used for either OSM custom deployment bounds (if useCustomOsmBounds is true) + // and/or for applying a geographic filter when syncing with external feed registries. + public Bounds bounds; public Project() { this.buildConfig = new OtpBuildConfig(); @@ -72,75 +70,46 @@ public Project() { } /** - * Get all of the FeedCollections that are defined + * Get all the feed sources for this project. */ - public static Collection getAll () { - return projectStore.getAll(); - } - - public static Project get(String id) { - return projectStore.getById(id); - } - - public void save() { - save(true); - } - - public void save(boolean commit) { - if (commit) - projectStore.save(this.id, this); - else - projectStore.saveWithoutCommit(this.id, this); - } - - public void delete() { - for (FeedSource s : getProjectFeedSources()) { - s.delete(); - } - for (Deployment d : getProjectDeployments()) { - d.delete(); - } - - projectStore.delete(this.id); - } - - public static void commit () { - projectStore.commit(); + public Collection retrieveProjectFeedSources() { + // TODO: use index, but not important for now because we generally only have one FeedCollection + return Persistence.feedSources.getAll().stream() + .filter(fs -> this.id.equals(fs.projectId)) + .collect(Collectors.toList()); } - /** - * Get all the feed sources for this feed collection - */ - @JsonIgnore - public Collection getProjectFeedSources() { -// ArrayList ret = new ArrayList<>(); - - // TODO: use index, but not important for now because we generally only have one FeedCollection - return FeedSource.getAll().stream().filter(fs -> this.id.equals(fs.projectId)).collect(Collectors.toList()); + // Note: Previously a numberOfFeeds() dynamic Jackson JsonProperty was in place here. But when the number of projects + // in the database grows large, the efficient calculation of this field does not scale. - } - public int getNumberOfFeeds () { - return FeedSource.getAll().stream().filter(fs -> this.id.equals(fs.projectId)).collect(Collectors.toList()).size(); - } /** - * Get all the deployments for this feed collection + * Get all the deployments for this project. */ - - @JsonIgnore - public Collection getProjectDeployments() { - ArrayList ret = Deployment.getAll().stream() - .filter(d -> this.id.equals(d.projectId)) - .collect(Collectors.toCollection(ArrayList::new)); - - return ret; + public Collection retrieveDeployments() { + List deployments = Persistence.deployments + .getFiltered(eq("projectId", this.id)); + return deployments; } - @JsonIgnore - public Organization getOrganization() { + // TODO: Does this need to be returned with JSON API response + public Organization retrieveOrganization() { if (organizationId != null) { - return Organization.get(organizationId); + return Persistence.organizations.getById(organizationId); } else { return null; } } + + public boolean delete() { + // FIXME: Handle this in a Mongo transaction. See https://docs.mongodb.com/master/core/transactions/#transactions-and-mongodb-drivers +// ClientSession clientSession = Persistence.startSession(); +// clientSession.startTransaction(); + + // Delete each feed source in the project (which in turn deletes each feed version). + retrieveProjectFeedSources().forEach(FeedSource::delete); + // Delete each deployment in the project. + retrieveDeployments().forEach(Deployment::delete); + // Finally, delete the project. + return Persistence.projects.removeById(this.id); + } } diff --git a/src/main/java/com/conveyal/datatools/manager/models/Region.java b/src/main/java/com/conveyal/datatools/manager/models/Region.java deleted file mode 100644 index f0edd7991..000000000 --- a/src/main/java/com/conveyal/datatools/manager/models/Region.java +++ /dev/null @@ -1,92 +0,0 @@ -package com.conveyal.datatools.manager.models; - -import com.conveyal.datatools.manager.persistence.DataStore; -import com.fasterxml.jackson.annotation.JsonIgnore; - -import java.util.Arrays; -import java.util.Collection; -import java.util.stream.Collectors; - -/** - * Created by landon on 4/15/16. - */ -public class Region extends Model { - private static final long serialVersionUID = 1L; - - private static DataStore regionStore = new DataStore<>("region"); - - /** The name of this region, e.g. Atlanta. */ - public String name; - - // Polygon geometry of region as GeoJSON string - @JsonIgnore - public String geometry; - public Double lat, lon; - // hierarchical order of region: country, 1st order admin, or region - public String order; - - public Boolean isPublic; - public String defaultLanguage; - public String defaultTimeZone; - - //@JsonView - public Collection feedSources; - - public Double north, south, east, west; - - public Region() { - - } - - /** - * Get all of the FeedCollections that are defined - */ - public static Collection getAll () { - return regionStore.getAll(); - } - - public static void deleteAll () { - Region.getAll().forEach(region -> region.delete()); - } - - public static Region get(String id) { - return regionStore.getById(id); - } - - public void save() { - save(true); - } - - public void save(boolean commit) { - if (commit) - regionStore.save(this.id, this); - else - regionStore.saveWithoutCommit(this.id, this); - } - - public void delete() { -// for (FeedSource fs : getRegionFeedSources()) { -// Arrays.asList(fs.regions).remove(this.id); -// fs.save(); -// } - - regionStore.delete(this.id); - } - - public static void commit () { - regionStore.commit(); - } - - /** - * Get all the feed sources for this feed collection - */ - - @JsonIgnore - public Collection getRegionFeedSources() { - - // TODO: use index, but not important for now because we generally only have one FeedCollection -// if (this.id != null && fs.regions != null) - return FeedSource.getAll().stream().filter(fs -> Arrays.asList(fs.regions).contains(this.id)).collect(Collectors.toList()); - - } -} diff --git a/src/main/java/com/conveyal/datatools/manager/models/Snapshot.java b/src/main/java/com/conveyal/datatools/manager/models/Snapshot.java new file mode 100644 index 000000000..e9401979c --- /dev/null +++ b/src/main/java/com/conveyal/datatools/manager/models/Snapshot.java @@ -0,0 +1,76 @@ +package com.conveyal.datatools.manager.models; + +import com.conveyal.gtfs.loader.FeedLoadResult; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonAlias; + +import java.util.Date; + +/** + * Represents a snapshot of an agency database. + * @author mattwigway + * + */ +@JsonIgnoreProperties(ignoreUnknown = true) +public class Snapshot extends Model { + public static final long serialVersionUID = 1L; + public static final String FEED_SOURCE_REF = "feedSourceId"; + + /** Is this snapshot the current snapshot - the most recently created or restored (i.e. the most current view of what's in master */ + public boolean current; + + /** The version of this snapshot */ + public int version; + + /** The name of this snapshot */ + public String name; + + /** The comment of this snapshot */ + public String comment; + + /** The feed source associated with this. NOTE: this field is NOT named feedSourceId to match legacy editor snapshots. */ + @JsonAlias({"feedId", "feedSourceId"}) + public String feedSourceId; + + /** The feed version this snapshot was generated from or published to, if any */ + public String feedVersionId; + + /** The namespace this snapshot is a copy of */ + public String snapshotOf; + + /** The namespace the snapshot copied tables to */ + public String namespace; + + public FeedLoadResult feedLoadResult; + + /** the date/time this snapshot was taken (millis since epoch) */ + public long snapshotTime; + + /** Used for deserialization */ + public Snapshot() {} + + public Snapshot(String feedSourceId, int version, String snapshotOf, FeedLoadResult feedLoadResult) { + this.feedSourceId = feedSourceId; + this.version = version; + this.snapshotOf = snapshotOf; + this.namespace = feedLoadResult.uniqueIdentifier; + this.feedLoadResult = feedLoadResult; + snapshotTime = System.currentTimeMillis(); + } + + public Snapshot(String name, String feedSourceId, String snapshotOf) { + this.name = name; + this.feedSourceId = feedSourceId; + this.snapshotOf = snapshotOf; + snapshotTime = System.currentTimeMillis(); + } + + public Snapshot(String feedSourceId, String snapshotOf) { + this(null, feedSourceId, snapshotOf); + generateName(); + } + + public void generateName() { + this.name = "New snapshot " + new Date().toString(); + } +} diff --git a/src/main/java/com/conveyal/datatools/manager/persistence/DataStore.java b/src/main/java/com/conveyal/datatools/manager/persistence/DataStore.java deleted file mode 100644 index b876a781d..000000000 --- a/src/main/java/com/conveyal/datatools/manager/persistence/DataStore.java +++ /dev/null @@ -1,173 +0,0 @@ -package com.conveyal.datatools.manager.persistence; - -import java.io.File; -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -import com.conveyal.datatools.manager.DataManager; -import org.mapdb.BTreeKeySerializer; -import org.mapdb.BTreeMap; -import org.mapdb.Bind; -import org.mapdb.DB; -import org.mapdb.DBMaker; -import org.mapdb.Fun; -import org.mapdb.Fun.Function2; -import org.mapdb.Pump; -import org.mapdb.Fun.Tuple2; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DataStore { - private static final Logger LOG = LoggerFactory.getLogger(DataStore.class); - - DB db; - BTreeMap map; - - public DataStore(String dataFile) { - this(new File(DataManager.getConfigPropertyAsText("application.data.mapdb")), dataFile); - } - - public DataStore(File directory, String dataFile) { - - if(!directory.exists()) - directory.mkdirs(); - - try { - LOG.info(String.join("/", directory.getCanonicalPath(), dataFile)); - } catch (IOException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - - db = DBMaker.newFileDB(new File(directory, dataFile + ".db")) - .closeOnJvmShutdown() - .make(); - - DB.BTreeMapMaker maker = db.createTreeMap(dataFile); - maker.valueSerializer(new ClassLoaderSerializer()); - map = maker.makeOrGet(); - } - - public DataStore(File directory, String dataFile, List>inputData) { - - if(!directory.exists()) - directory.mkdirs(); - - try { - LOG.info(String.join("/", directory.getCanonicalPath(), dataFile)); - } catch (IOException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - - db = DBMaker.newFileDB(new File(directory, dataFile + ".db")) - .transactionDisable() - .closeOnJvmShutdown() - .make(); - - Comparator> comparator = (o1, o2) -> o1.a.compareTo(o2.a); - - // need to reverse sort list - Iterator> iter = Pump.sort(inputData.iterator(), - true, 100000, - Collections.reverseOrder(comparator), //reverse order comparator - db.getDefaultSerializer() - ); - - - BTreeKeySerializer keySerializer = BTreeKeySerializer.STRING; - - map = db.createTreeMap(dataFile) - .pumpSource(iter) - .pumpPresort(100000) - .keySerializer(keySerializer) - .make(); - - - - // close/flush db - db.close(); - - // re-connect with transactions enabled - db = DBMaker.newFileDB(new File(directory, dataFile + ".db")) - .closeOnJvmShutdown() - .make(); - - map = db.getTreeMap(dataFile); - } - - public void save(String id, T obj) { - map.put(id, obj); - db.commit(); - } - - public void saveWithoutCommit(String id, T obj) { - map.put(id, obj); - } - - public void commit() { - db.commit(); - } - - public void delete(String id) { - map.remove(id); - db.commit(); - } - - public T getById(String id) { - return map.get(id); - } - - /** - * Does an object with this ID exist in this data store? - * @param id - * @return boolean indicating result - */ - public boolean hasId(String id) { - return map.containsKey(id); - } - - public Collection getAll() { - return map.values(); - } - - public Integer size() { - return map.keySet().size(); - } - - /** Create a secondary (unique) key */ - public void secondaryKey (String name, Function2 fun) { - Map index = db.getTreeMap(name); - Bind.secondaryKey(map, index, fun); - } - - /** search using a secondary unique key */ - public T find(String name, K2 value) { - Map index = db.getTreeMap(name); - - String id = index.get(value); - - if (id == null) - return null; - - return map.get(id); - } - - /** find the value with largest key less than or equal to key */ - public T findFloor (String name, K2 floor) { - BTreeMap index = db.getTreeMap(name); - - Entry key = index.floorEntry(floor); - - if (key == null || key.getValue() == null) - return null; - - return map.get(key.getValue()); - } -} diff --git a/src/main/java/com/conveyal/datatools/manager/persistence/FeedStore.java b/src/main/java/com/conveyal/datatools/manager/persistence/FeedStore.java index a85744e39..b18c80883 100644 --- a/src/main/java/com/conveyal/datatools/manager/persistence/FeedStore.java +++ b/src/main/java/com/conveyal/datatools/manager/persistence/FeedStore.java @@ -1,17 +1,18 @@ package com.conveyal.datatools.manager.persistence; -import java.io.*; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.StandardCopyOption; -import java.util.ArrayList; -import java.util.List; - import com.amazonaws.AmazonClientException; import com.amazonaws.AmazonServiceException; -import com.amazonaws.services.s3.model.*; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; +import com.amazonaws.auth.profile.ProfileCredentialsProvider; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import com.amazonaws.services.s3.model.CopyObjectRequest; +import com.amazonaws.services.s3.model.GetObjectRequest; +import com.amazonaws.services.s3.model.PutObjectRequest; +import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.transfer.TransferManager; +import com.amazonaws.services.s3.transfer.TransferManagerBuilder; import com.amazonaws.services.s3.transfer.Upload; import com.conveyal.datatools.manager.DataManager; import com.conveyal.datatools.manager.models.FeedSource; @@ -19,14 +20,19 @@ import gnu.trove.list.array.TLongArrayList; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; - -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; -import com.amazonaws.auth.profile.ProfileCredentialsProvider; -import com.amazonaws.services.s3.AmazonS3Client; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.List; + +import static com.conveyal.datatools.manager.DataManager.hasConfigProperty; + /** * Store a feed on the file system or s3 * @author mattwigway @@ -40,13 +46,15 @@ public class FeedStore { public static final File basePath = new File(DataManager.getConfigPropertyAsText("application.data.gtfs")); private final File path; /** An optional AWS S3 bucket to store the feeds */ - private String s3Bucket; + private static String s3Bucket; public static final String s3Prefix = "gtfs/"; - public static AmazonS3Client s3Client; + // FIXME: this should not be static most likely + public static AmazonS3 s3Client; /** An AWS credentials file to use when uploading to S3 */ - private static final String s3CredentialsFilename = DataManager.getConfigPropertyAsText("application.data.s3_credentials_file"); + private static final String S3_CREDENTIALS_FILENAME = DataManager.getConfigPropertyAsText("application.data.s3_credentials_file"); + private static final String S3_CONFIG_FILENAME = DataManager.getConfigPropertyAsText("application.data.s3_credentials_file"); public FeedStore() { this(null); @@ -62,19 +70,40 @@ public FeedStore(String subdir) { String pathString = basePath.getAbsolutePath(); if (subdir != null) pathString += File.separator + subdir; path = getPath(pathString); + } + static { // s3 storage - if (DataManager.useS3){ - this.s3Bucket = DataManager.getConfigPropertyAsText("application.data.gtfs_s3_bucket"); - s3Client = new AmazonS3Client(getAWSCreds()); + if (DataManager.useS3 || hasConfigProperty("modules.gtfsapi.use_extension")){ + s3Bucket = DataManager.getConfigPropertyAsText("application.data.gtfs_s3_bucket"); + AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard() + .withCredentials(getAWSCreds()); + + // if region configuration string is provided, use that + // otherwise default to ~/.aws/config + // NOTE: if this is missing + String s3Region = DataManager.getConfigPropertyAsText("application.data.s3_region"); + if (s3Region != null) { + LOG.info("Using S3 region {}", s3Region); + builder.withRegion(s3Region); + } + try { + s3Client = builder.build(); + } catch (Exception e) { + LOG.error("S3 client not initialized correctly. Must provide config property application.data.s3_region or specify region in ~/.aws/config", e); + } + // TODO: check for this?? + if (s3Client == null || s3Bucket == null) { + throw new IllegalArgumentException("Fatal error initializing s3Bucket or s3Client"); + } } } private static File getPath (String pathString) { File path = new File(pathString); if (!path.exists() || !path.isDirectory()) { - path = null; - throw new IllegalArgumentException("Not a directory or not found: " + path.getAbsolutePath()); + LOG.error("Directory does not exist {}", pathString); + throw new IllegalArgumentException("Not a directory or not found: " + pathString); } return path; } @@ -129,16 +158,16 @@ public Long getFeedSize (String id) { } } - private AWSCredentials getAWSCreds () { - if (this.s3CredentialsFilename != null) { - return new ProfileCredentialsProvider(this.s3CredentialsFilename, "default").getCredentials(); + private static AWSCredentialsProvider getAWSCreds () { + if (S3_CREDENTIALS_FILENAME != null) { + return new ProfileCredentialsProvider(S3_CREDENTIALS_FILENAME, "default"); } else { // default credentials providers, e.g. IAM role - return new DefaultAWSCredentialsProviderChain().getCredentials(); + return new DefaultAWSCredentialsProviderChain(); } } - private String getS3Key (String id) { + private static String getS3Key (String id) { return s3Prefix + id; } @@ -147,22 +176,26 @@ private String getS3Key (String id) { */ public File getFeed (String id) { // local storage - if (!DataManager.useS3) { - File feed = new File(path, id); - // don't let folks get feeds outside of the directory - if (feed.getParentFile().equals(path) && feed.exists()) return feed; + File feed = new File(path, id); + // Whether storing locally or on s3, return local version if it exists. + // Also, don't let folks retrieveById feeds outside of the directory + if (feed.getParentFile().equals(path) && feed.exists()) { + return feed; } + // s3 storage - else { + if (DataManager.useS3) { + String key = getS3Key(id); try { - LOG.info("Downloading feed from s3"); + LOG.info("Downloading feed from s3://{}/{}", s3Bucket, key); S3Object object = s3Client.getObject( - new GetObjectRequest(s3Bucket, getS3Key(id))); + new GetObjectRequest(s3Bucket, key)); InputStream objectData = object.getObjectContent(); + // FIXME: Figure out how to manage temp files created here. Currently, deleteOnExit is called in createTempFile return createTempFile(id, objectData); } catch (AmazonServiceException ase) { - LOG.error("Error downloading from s3"); + LOG.error("Error downloading s3://{}/{}", s3Bucket, key); ase.printStackTrace(); } catch (IOException e) { e.printStackTrace(); @@ -187,9 +220,9 @@ private File storeFeedLocally(String id, InputStream inputStream, FeedSource fee } catch (IOException e) { e.printStackTrace(); } - if (feedSource != null) { + if (feedSource != null && !DataManager.useS3) { + // Store latest as feed-source-id.zip if feedSource provided and if not using s3 try { - // store latest as feed-source-id.zip if feedSource provided copyVersionToLatest(feed, feedSource); } catch (Exception e) { e.printStackTrace(); @@ -210,25 +243,20 @@ private void copyVersionToLatest(File version, FeedSource feedSource) { } private File writeFileUsingInputStream(String filename, InputStream inputStream) throws IOException { - OutputStream output = null; File out = new File(path, filename); - try { - LOG.info("Writing file to {}/{}", path, filename); - output = new FileOutputStream(out); + LOG.info("Writing file to {}/{}", path, filename); + try (OutputStream output = new FileOutputStream(out)) { byte[] buf = new byte[1024]; int bytesRead; while ((bytesRead = inputStream.read(buf)) > 0) { output.write(buf, 0, bytesRead); } - } catch (FileNotFoundException e) { - e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } finally { inputStream.close(); - output.close(); - return out; } + return out; } private File createTempFile (String name, InputStream in) throws IOException { @@ -242,19 +270,15 @@ private File createTempFile (String name, InputStream in) throws IOException { return tempFile; } - private File uploadToS3 (InputStream inputStream, String id, FeedSource feedSource) { - if(this.s3Bucket != null) { + public boolean uploadToS3 (File gtfsFile, String s3FileName, FeedSource feedSource) { + if (s3Bucket != null) { try { - // Use tempfile - LOG.info("Creating temp file for {}", id); - File tempFile = createTempFile(id, inputStream); - - LOG.info("Uploading feed {} to S3 from tempfile", id); - TransferManager tm = new TransferManager(getAWSCreds()); - PutObjectRequest request = new PutObjectRequest(s3Bucket, getS3Key(id), tempFile); + LOG.info("Uploading feed {} to S3 from {}", s3FileName, gtfsFile.getAbsolutePath()); + TransferManager tm = TransferManagerBuilder.standard().withS3Client(s3Client).build(); + PutObjectRequest request = new PutObjectRequest(s3Bucket, getS3Key(s3FileName), gtfsFile); // Subscribe to the event and provide event handler. TLongList transferredBytes = new TLongArrayList(); - long totalBytes = tempFile.length(); + long totalBytes = gtfsFile.length(); LOG.info("Total kilobytes: {}", totalBytes / 1000); request.setGeneralProgressListener(progressEvent -> { if (transferredBytes.size() == 75) { @@ -273,13 +297,15 @@ private File uploadToS3 (InputStream inputStream, String id, FeedSource feedSour try { // You can block and wait for the upload to finish upload.waitForCompletion(); - } catch (AmazonClientException amazonClientException) { - System.out.println("Unable to upload file, upload aborted."); - amazonClientException.printStackTrace(); - } catch (InterruptedException e) { - e.printStackTrace(); + } catch (AmazonClientException | InterruptedException e) { + LOG.error("Unable to upload file, upload aborted.", e); + return false; } -// s3Client.putObject(); + + // Shutdown the Transfer Manager, but don't shut down the underlying S3 client. + // The default behavior for shutdownNow shut's down the underlying s3 client + // which will cause any following s3 operations to fail. + tm.shutdownNow(false); if (feedSource != null){ LOG.info("Copying feed on s3 to latest version"); @@ -287,18 +313,15 @@ private File uploadToS3 (InputStream inputStream, String id, FeedSource feedSour // copy to [feedSourceId].zip String copyKey = s3Prefix + feedSource.id + ".zip"; CopyObjectRequest copyObjRequest = new CopyObjectRequest( - this.s3Bucket, getS3Key(id), this.s3Bucket, copyKey); + s3Bucket, getS3Key(s3FileName), s3Bucket, copyKey); s3Client.copyObject(copyObjRequest); } - return tempFile; - } catch (AmazonServiceException ase) { - LOG.error("Error uploading feed to S3"); - ase.printStackTrace(); - return null; - } catch (IOException e) { - e.printStackTrace(); + return true; + } catch (AmazonServiceException e) { + LOG.error("Error uploading feed to S3", e); + return false; } } - return null; + return false; } } \ No newline at end of file diff --git a/src/main/java/com/conveyal/datatools/manager/persistence/Persistence.java b/src/main/java/com/conveyal/datatools/manager/persistence/Persistence.java new file mode 100644 index 000000000..9359340d3 --- /dev/null +++ b/src/main/java/com/conveyal/datatools/manager/persistence/Persistence.java @@ -0,0 +1,103 @@ +package com.conveyal.datatools.manager.persistence; + +import com.conveyal.datatools.manager.DataManager; +import com.conveyal.datatools.manager.codec.IntArrayCodec; +import com.conveyal.datatools.manager.codec.LocalDateCodec; +import com.conveyal.datatools.manager.codec.URLCodec; +import com.conveyal.datatools.manager.models.Deployment; +import com.conveyal.datatools.manager.models.ExternalFeedSourceProperty; +import com.conveyal.datatools.manager.models.FeedDownloadToken; +import com.conveyal.datatools.manager.models.FeedSource; +import com.conveyal.datatools.manager.models.FeedVersion; +import com.conveyal.datatools.manager.models.Note; +import com.conveyal.datatools.manager.models.Organization; +import com.conveyal.datatools.manager.models.Project; +import com.conveyal.datatools.manager.models.Snapshot; +import com.mongodb.MongoClient; +import com.mongodb.MongoClientOptions; +import com.mongodb.MongoClientURI; +import com.mongodb.client.MongoDatabase; +import org.bson.codecs.configuration.CodecRegistries; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.pojo.PojoCodecProvider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; + +/** + * Groups together a bunch of TypedPersistence abstractions around MongoDB Collections. + */ +public class Persistence { + + private static final Logger LOG = LoggerFactory.getLogger(Persistence.class); + private static final String MONGO_URI = "MONGO_URI"; + private static final String MONGO_DB_NAME = "MONGO_DB_NAME"; + + private static MongoClient mongo; + private static MongoDatabase mongoDatabase; + private static CodecRegistry pojoCodecRegistry; + + // One abstracted Mongo collection for each class of persisted objects + public static TypedPersistence feedSources; + public static TypedPersistence deployments; + public static TypedPersistence projects; + public static TypedPersistence feedVersions; + public static TypedPersistence notes; + public static TypedPersistence organizations; + public static TypedPersistence externalFeedSourceProperties; + public static TypedPersistence tokens; + public static TypedPersistence snapshots; + + public static void initialize () { + + PojoCodecProvider pojoCodecProvider = PojoCodecProvider.builder() + .register("com.conveyal.datatools.manager.models") + .register("com.conveyal.gtfs.loader") + .register("com.conveyal.gtfs.validator") + .automatic(true) + .build(); + + // Register our custom codecs which cannot be properly auto-built by reflection + CodecRegistry customRegistry = CodecRegistries.fromCodecs( + new IntArrayCodec(), + new URLCodec(), + new LocalDateCodec()); + + pojoCodecRegistry = fromRegistries(MongoClient.getDefaultCodecRegistry(), + customRegistry, + fromProviders(pojoCodecProvider)); + + MongoClientOptions.Builder builder = MongoClientOptions.builder() +// .sslEnabled(true) + .codecRegistry(pojoCodecRegistry); + + if (DataManager.hasConfigProperty(MONGO_URI)) { + mongo = new MongoClient(new MongoClientURI(DataManager.getConfigPropertyAsText(MONGO_URI), builder)); + LOG.info("Connecting to remote MongoDB instance"); + } else { + LOG.info("Connecting to local MongoDB instance"); + mongo = new MongoClient("localhost", builder.build()); + } + + mongoDatabase = mongo.getDatabase(DataManager.getConfigPropertyAsText(MONGO_DB_NAME)); + + feedSources = new TypedPersistence(mongoDatabase, FeedSource.class); + projects = new TypedPersistence(mongoDatabase, Project.class); + feedVersions = new TypedPersistence(mongoDatabase, FeedVersion.class); + deployments = new TypedPersistence(mongoDatabase, Deployment.class); + notes = new TypedPersistence(mongoDatabase, Note.class); + organizations = new TypedPersistence(mongoDatabase, Organization.class); + externalFeedSourceProperties = new TypedPersistence(mongoDatabase, ExternalFeedSourceProperty.class); + tokens = new TypedPersistence(mongoDatabase, FeedDownloadToken.class); + snapshots = new TypedPersistence(mongoDatabase, Snapshot.class); + + // TODO: Set up indexes on feed versions by feedSourceId, version #? deployments, feedSources by projectId. +// deployments.getMongoCollection().createIndex(Indexes.descending("projectId")); +// feedSources.getMongoCollection().createIndex(Indexes.descending("projectId")); +// feedVersions.getMongoCollection().createIndex(Indexes.descending("feedSourceId", "version")); +// snapshots.getMongoCollection().createIndex(Indexes.descending("feedSourceId", "version")); + } + +} diff --git a/src/main/java/com/conveyal/datatools/manager/persistence/TypedPersistence.java b/src/main/java/com/conveyal/datatools/manager/persistence/TypedPersistence.java new file mode 100644 index 000000000..71a426afc --- /dev/null +++ b/src/main/java/com/conveyal/datatools/manager/persistence/TypedPersistence.java @@ -0,0 +1,194 @@ +package com.conveyal.datatools.manager.persistence; + +import com.conveyal.datatools.manager.auth.Auth0UserProfile; +import com.conveyal.datatools.manager.models.Model; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.FindOneAndUpdateOptions; +import com.mongodb.client.model.ReturnDocument; +import com.mongodb.client.result.DeleteResult; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.lang.reflect.Constructor; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.client.model.Updates.set; + +/** + * This provides some abstraction over the Mongo Java driver for storing a particular kind of POJO. + * + * When performing an update (in our case with findOneAndUpdate) the Document of updates + * may contain extra fields beyond those in the Java model class, or values of a type that + * do not match the Java model class. The update will nonetheless add these extra fields + * and wrong-typed values to MongoDB, which is not shocking considering its schemaless + * nature. Of course a retrieved Java object will not contain these extra values + * because it simply doesn't have a field to hold the values. If a value of the wrong + * type has been stored in the database, deserialization will just fail with + * "org.bson.codecs.configuration.CodecConfigurationException: Failed to decode X." + * + * This means clients have the potential to stuff any amount of garbage in our MongoDB + * and trigger deserialization errors during application execution unless we perform + * type checking and clean the incoming documents. There is probably a configuration + * option to force schema adherence, which would prevent long-term compatibility but + * would give us more safety in the short term. + * + * PojoCodecImpl does not seem to have any hooks to throw errors when unexpected fields + * are encountered (see else clause of + * org.bson.codecs.pojo.PojoCodecImpl#decodePropertyModel). We could make our own + * function to imitate the PropertyModel checking and fail early when unexpected fields + * are present in a document. + */ +public class TypedPersistence { + + private static final Logger LOG = LoggerFactory.getLogger(TypedPersistence.class); + + private MongoCollection mongoCollection; + private Constructor noArgConstructor; + private String collectionName; + private final FindOneAndUpdateOptions findOneAndUpdateOptions = new FindOneAndUpdateOptions(); + + public TypedPersistence(MongoDatabase mongoDatabase, Class clazz) { + mongoCollection = mongoDatabase.getCollection(clazz.getSimpleName(), clazz); + collectionName = clazz.getSimpleName(); + try { + noArgConstructor = clazz.getConstructor(new Class[0]); + } catch (NoSuchMethodException ex) { + throw new RuntimeException("Could not get no-arg constructor for class " + clazz.getName(), ex); + } + // set options for findOneAndUpdate (return document should match document after update, not before) + findOneAndUpdateOptions.returnDocument(ReturnDocument.AFTER); + + // TODO: can we merge update and create into createOrUpdate function using upsert option? +// findOneAndUpdateOptions.upsert(true); + } + + public T create (String updateJson) { + T item = null; + try { + // Keeping our own reference to the constructor here is a little shady. + // FIXME: We should try to use some Mongo codec method for this, e.g. inserting an empty document. + item = noArgConstructor.newInstance(); + } catch (Exception ex) { + throw new RuntimeException("Could not use no-arg constructor to instantiate class.", ex); + } + mongoCollection.insertOne(item); + T updatedItem = update(item.id, updateJson); + return updatedItem; + } + + /** + * TODO maybe merge this with the other create implementation above, passing in the base object and the updates. + */ + public void create (T newObject) { + // What happens if an object already exists with the same ID? + mongoCollection.insertOne(newObject); + } + + public void replace (String id, T replaceObject) { + mongoCollection.replaceOne(eq(id), replaceObject); + } + + /** + * Primary method to update Mongo object with provided document. This sets the lastUpdated field to the current time. + */ + public T update (String id, Document updateDocument) { + // Set last updated. + updateDocument.put("lastUpdated", new Date()); + return mongoCollection.findOneAndUpdate(eq(id), new Document("$set", updateDocument), findOneAndUpdateOptions); + } + + /** + * Update Mongo object by ID with the provided JSON string. + */ + public T update (String id, String updateJson) { + return update(id, Document.parse(updateJson)); + } + + /** + * Update the field with the provided value for the Mongo object referenced by ID. + */ + public T updateField (String id, String fieldName, Object value) { + return update(id, new Document(fieldName, value)); + } + + public T updateUser (String id, Auth0UserProfile profile) { + String updateJson = String.format("{userId: %s, userEmail: %s}", profile.getUser_id(), profile.getEmail()); + Document updateDocument = Document.parse(updateJson); + return mongoCollection.findOneAndUpdate(eq(id), new Document("$set", updateDocument), findOneAndUpdateOptions); + } + + public T getById (String id) { + return mongoCollection.find(eq(id)).first(); + } + + /** + * This is not memory efficient. + * TODO: Always use iterators / streams, always perform selection of subsets on the Mongo server side ("where clause"). + */ + public List getAll () { + return mongoCollection.find().into(new ArrayList<>()); + } + + /** + * Get all objects satisfying the supplied Mongo filter. + * This ties our persistence directly to Mongo for now but is expedient. + * We should really have a bit more abstraction here. + */ + public List getFiltered (Bson filter) { + return mongoCollection.find(filter).into(new ArrayList()); + } + + /** + * Expose the internal MongoCollection to the caller. + * This ties our persistence directly to Mongo for now but is expedient. + * We will write all the queries we need in the calling methods, then make an abstraction here on TypedPersistence + * once we see everything we need to support. + */ + public MongoCollection getMongoCollection () { + return this.mongoCollection; + } + + /** + * Get all objects satisfying the supplied Mongo filter. + * This ties our persistence directly to Mongo for now but is expedient. + * We should really have a bit more abstraction here. + */ + public T getOneFiltered (Bson filter, Bson sortBy) { + if (sortBy != null) + return mongoCollection.find(filter).sort(sortBy).first(); + else + return mongoCollection.find(filter).first(); + } + + public boolean removeById (String id) { + DeleteResult result = mongoCollection.deleteOne(eq(id)); + if (result.getDeletedCount() == 1) { + LOG.info("Deleted object id={} type={}", id, collectionName); + return true; + } else if (result.getDeletedCount() > 1) { + LOG.error("Deleted more than one {} for ID {}",collectionName, id); + } else { + LOG.error("Could not delete {}: {}", collectionName, id); + } + return false; + } + + public boolean removeFiltered (Bson filter) { + DeleteResult result = mongoCollection.deleteMany(filter); + long count = result.getDeletedCount(); + if (count >= 1) { + LOG.info("Deleted {} objects of type {}", count, collectionName); + return true; + } else { + LOG.warn("No objects to delete for filter"); + } + return false; + } + +} diff --git a/src/main/java/com/conveyal/datatools/manager/utils/HashUtils.java b/src/main/java/com/conveyal/datatools/manager/utils/HashUtils.java index cda3ebb8e..63948d9ef 100644 --- a/src/main/java/com/conveyal/datatools/manager/utils/HashUtils.java +++ b/src/main/java/com/conveyal/datatools/manager/utils/HashUtils.java @@ -27,50 +27,36 @@ public static String hashString(String input) { } } - + /** + * Get MD5 hash for the specified file. + */ public static String hashFile(File file) { - try { - MessageDigest md = MessageDigest.getInstance("MD5"); - FileInputStream fis = new FileInputStream(file); - DigestInputStream dis = new DigestInputStream(fis, md); - - // hash the size dis.read(ByteBuffer.allocate(8).putLong(file.length()).array()); - - // hash first 1000 bytes int i = 0; while (dis.read() != -1 && i < 1000) { i++; - }; - + } // hash 5000 bytes starting in the middle or the remainder of the file if under 10000 if(file.length() > 10000) { dis.skip(file.length() / 2); - i = 0; while (dis.read() != -1 && i < 5000) { i++; - }; + } } else { - while (dis.read() != -1) { - }; + while (dis.read() != -1) { } } - dis.close(); - return new String(Hex.encodeHex(md.digest())); - - } catch(Exception e) { - return ""; } } diff --git a/src/main/java/com/conveyal/datatools/manager/utils/StringUtils.java b/src/main/java/com/conveyal/datatools/manager/utils/StringUtils.java index d3808e31d..4b345349d 100644 --- a/src/main/java/com/conveyal/datatools/manager/utils/StringUtils.java +++ b/src/main/java/com/conveyal/datatools/manager/utils/StringUtils.java @@ -7,6 +7,6 @@ public class StringUtils { * @return a new name with weird letters removed/transliterated. */ public static String getCleanName (String name) { - return name.replace(' ', '_').replaceAll("[^A-Za-z0-9_-]", ""); + return name != null ? name.replace(' ', '_').replaceAll("[^A-Za-z0-9_-]", "") : name; } } diff --git a/src/main/java/com/conveyal/datatools/manager/utils/json/InvalidValueMixIn.java b/src/main/java/com/conveyal/datatools/manager/utils/json/InvalidValueMixIn.java deleted file mode 100644 index 45e7e99ec..000000000 --- a/src/main/java/com/conveyal/datatools/manager/utils/json/InvalidValueMixIn.java +++ /dev/null @@ -1,20 +0,0 @@ -package com.conveyal.datatools.manager.utils.json; - -import com.conveyal.gtfs.model.Priority; -import com.fasterxml.jackson.annotation.JsonProperty; - -/** - * Specify the annotations needed to construct an InvalidValue. This is a Jackson mixin so we don't need to - * add a default constructor, etc. - */ -public abstract class InvalidValueMixIn { - InvalidValueMixIn ( - @JsonProperty("affectedEntity") String affectedEntity, - @JsonProperty("affectedField") String affectedField, - @JsonProperty("affectedEntityId") String affectedEntityId, - @JsonProperty("problemType") String problemType, - @JsonProperty("problemDescription") String problemDescription, - @JsonProperty("problemData") Object problemData, - @JsonProperty("priority") Priority priority - ) {}; -} diff --git a/src/main/java/com/conveyal/datatools/manager/utils/json/JsonManager.java b/src/main/java/com/conveyal/datatools/manager/utils/json/JsonManager.java index 6ca38c47a..753d7a50c 100644 --- a/src/main/java/com/conveyal/datatools/manager/utils/json/JsonManager.java +++ b/src/main/java/com/conveyal/datatools/manager/utils/json/JsonManager.java @@ -8,8 +8,6 @@ import com.conveyal.datatools.editor.models.transit.GtfsRouteType; import com.conveyal.datatools.editor.utils.JacksonSerializers; -import com.conveyal.geojson.GeoJsonModule; -import com.conveyal.gtfs.model.InvalidValue; import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonProcessingException; @@ -38,9 +36,9 @@ public class JsonManager { public JsonManager (Class theClass, Class view) { this.theClass = theClass; this.om = new ObjectMapper(); - om.addMixIn(InvalidValue.class, InvalidValueMixIn.class); + // previous model for gtfs validation errors +// om.addMixIn(InvalidValue.class, InvalidValueMixIn.class); om.addMixIn(Rectangle2D.class, Rectangle2DMixIn.class); - om.registerModule(new GeoJsonModule()); SimpleModule deser = new SimpleModule(); deser.addDeserializer(LocalDate.class, new JacksonSerializers.LocalDateDeserializer()); diff --git a/src/main/java/com/conveyal/gtfs/GraphQLController.java b/src/main/java/com/conveyal/gtfs/GraphQLController.java new file mode 100644 index 000000000..468181806 --- /dev/null +++ b/src/main/java/com/conveyal/gtfs/GraphQLController.java @@ -0,0 +1,84 @@ +package com.conveyal.gtfs; + +import com.conveyal.gtfs.graphql.GTFSGraphQL; +import com.fasterxml.jackson.databind.JsonNode; +import graphql.ExecutionInput; +import graphql.ExecutionResult; +import graphql.introspection.IntrospectionQuery; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import spark.Request; +import spark.Response; + +import java.io.IOException; +import java.util.Map; + +import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage; + +/** + * This Spark Controller contains methods to provide HTTP responses to GraphQL queries, including a query for the + * GraphQL schema. + */ +public class GraphQLController { + private static final Logger LOG = LoggerFactory.getLogger(GraphQLController.class); + + /** + * A Spark Controller that responds to a GraphQL query in HTTP GET query parameters. + */ + public static Map get (Request request, Response response) { + JsonNode varsJson = null; + try { + varsJson = GraphQLMain.mapper.readTree(request.queryParams("variables")); + } catch (IOException e) { + LOG.warn("Error processing variables", e); + haltWithMessage(request, 400, "Malformed JSON"); + } + String queryJson = request.queryParams("query"); + return doQuery(varsJson, queryJson, response); + } + + /** + * A Spark Controller that responds to a GraphQL query in an HTTP POST body. + */ + public static Map post (Request req, Response response) { + JsonNode node = null; + try { + node = GraphQLMain.mapper.readTree(req.body()); + } catch (IOException e) { + LOG.warn("Error processing POST body JSON", e); + haltWithMessage(req, 400, "Malformed JSON"); + } + JsonNode vars = node.get("variables"); + String query = node.get("query").asText(); + return doQuery(vars, query, response); + } + + /** + * Execute a GraphQL query and return result that fully complies with the GraphQL specification. + */ + private static Map doQuery (JsonNode varsJson, String queryJson, Response response) { + long startTime = System.currentTimeMillis(); + if (varsJson == null && queryJson == null) { + return getSchema(null, null); + } + Map variables = GraphQLMain.mapper.convertValue(varsJson, Map.class); + ExecutionInput executionInput = ExecutionInput.newExecutionInput() + .query(queryJson) + .variables(variables) + .build(); + ExecutionResult result = GTFSGraphQL.getGraphQl().execute(executionInput); + long endTime = System.currentTimeMillis(); + LOG.info("Query took {} msec", endTime - startTime); + return result.toSpecification(); + } + + + /** + * A Spark Controller that returns the GraphQL schema. + */ + static Map getSchema(Request req, Response res) { + return GTFSGraphQL.getGraphQl().execute(IntrospectionQuery.INTROSPECTION_QUERY).toSpecification(); + } + + +} diff --git a/src/main/java/com/conveyal/gtfs/GraphQLMain.java b/src/main/java/com/conveyal/gtfs/GraphQLMain.java new file mode 100644 index 000000000..e0b326a23 --- /dev/null +++ b/src/main/java/com/conveyal/gtfs/GraphQLMain.java @@ -0,0 +1,68 @@ +package com.conveyal.gtfs; + +import com.conveyal.datatools.common.utils.CorsFilter; +import com.conveyal.gtfs.graphql.GTFSGraphQL; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import spark.ResponseTransformer; + +import javax.sql.DataSource; + +import static spark.Spark.after; +import static spark.Spark.get; +import static spark.Spark.post; + +/** + * Test main method to set up a new-style (as of June 2017) GraphQL API + * + * What we're trying to provide is this: + * The queries that analysis-ui makes are at https://github.com/conveyal/analysis-ui/blob/dev/lib/graphql/query.js ; + * note that feeds are wrapped in bundles in analysis-ui (we wrap the GTFS API types) + * GraphQL queries for datatools-ui are at https://github.com/catalogueglobal/datatools-ui/blob/dev/lib/gtfs/util/graphql.js. + * + * We will eventually want to replace some of the REST-ish endpoints in datatools-ui, including: + * stops/routes by bounding box + * stop/routes by text string search (route_long_name/route_short_name, stop_name/stop_id/stop_code) + * Feeds - to get a list of the feed_ids that have been loaded into the gtfs-api + * + * Here are some sample database URLs + * H2_FILE_URL = "jdbc:h2:file:~/test-db"; // H2 memory does not seem faster than file + * SQLITE_FILE_URL = "jdbc:sqlite:/Users/abyrd/test-db"; + * POSTGRES_LOCAL_URL = "jdbc:postgresql://localhost/catalogue"; + */ +public class GraphQLMain { + // Shared object mapper with GraphQLController. + protected static final ObjectMapper mapper = new ObjectMapper(); + private static final Logger LOG = LoggerFactory.getLogger(GraphQLMain.class); + + /** + * @param args to use the local postgres database, jdbc:postgresql://localhost/gtfs + */ + public static void main (String[] args) { + String databaseUrl = args[0]; + String apiPrefix = "/"; + if (args.length > 1) { + apiPrefix = args[1]; + } + DataSource dataSource = GTFS.createDataSource(databaseUrl, null, null); + initialize(dataSource, apiPrefix); + CorsFilter.apply(); + after((request, response) -> response.header("Content-Encoding", "gzip")); + } + + /** + * DataSource created with GTFS::createDataSource (see main() for example) + * API prefix should begin and end with "/", e.g. "/api/" + */ + public static void initialize (DataSource dataSource, String apiPrefix) { + LOG.info("Initialized GTFS GraphQL API at localhost:port{}", apiPrefix); + GTFSGraphQL.initialize(dataSource); + get(apiPrefix + "graphql", GraphQLController::get, mapper::writeValueAsString); + post(apiPrefix + "graphql", GraphQLController::post, mapper::writeValueAsString); + get(apiPrefix + "graphql/schema", GraphQLController::getSchema, mapper::writeValueAsString); + post(apiPrefix + "graphql/schema", GraphQLController::getSchema, mapper::writeValueAsString); + } + +} + diff --git a/src/main/java/com/conveyal/gtfs/JsonTransformer.java b/src/main/java/com/conveyal/gtfs/JsonTransformer.java new file mode 100644 index 000000000..9e95acf6a --- /dev/null +++ b/src/main/java/com/conveyal/gtfs/JsonTransformer.java @@ -0,0 +1,54 @@ +package com.conveyal.gtfs; + +import com.conveyal.gtfs.model.Frequency; +import com.conveyal.gtfs.model.Pattern; +import com.conveyal.gtfs.model.Service; +import com.conveyal.gtfs.model.Shape; +import com.conveyal.gtfs.model.Trip; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Joiner; +import spark.Request; +import spark.Response; +import spark.ResponseTransformer; + +import java.util.Map; + +/** + * Serve output as json. + */ + +public class JsonTransformer implements ResponseTransformer { + private static final ObjectMapper objectMapper = new ObjectMapper(); + + @Override + public String render(Object o) throws Exception { + objectMapper.addMixIn(Trip.class, TripMixIn.class); + objectMapper.addMixIn(Frequency.class, FrequencyMixIn.class); + objectMapper.addMixIn(Pattern.class, PatternMixin.class); + return objectMapper.writeValueAsString(o); + } + + /** set the content type */ + public void type (Request request, Response response) { + response.type("application/json"); + } + + public abstract class TripMixIn { + @JsonIgnore public Map shape_points; + @JsonIgnore public Service service; + } + + public abstract class FrequencyMixIn { + @JsonIgnore public Trip trip; + } + + public abstract class PatternMixin { +// @JsonSerialize(using = GeometrySerializer.class) +// @JsonDeserialize(using = GeometryDeserializer.class) +// public LineString geometry; + + @JsonIgnore + public Joiner joiner; + } +} diff --git a/src/main/java/com/conveyal/gtfs/validator/json/LoadStatus.java b/src/main/java/com/conveyal/gtfs/validator/json/LoadStatus.java new file mode 100644 index 000000000..9d71290b6 --- /dev/null +++ b/src/main/java/com/conveyal/gtfs/validator/json/LoadStatus.java @@ -0,0 +1,13 @@ +package com.conveyal.gtfs.validator.json; + +/** + * Created by landon on 7/31/17. + * Note: this is effectively a placeholder class to override the + * conveyal/gtfs-validator class in the same package (com.conveyal.gtfs.validator.json). + * gtfs-validator has been removed from this application, but things won't + * deserialize without this enum class (in this package). + */ +/** Why a GTFS feed failed to load */ +public enum LoadStatus { + SUCCESS, INVALID_ZIP_FILE, OTHER_FAILURE, MISSING_REQUIRED_FIELD, INCORRECT_FIELD_COUNT_IMPROPER_QUOTING +} diff --git a/src/main/resources/.properties b/src/main/resources/.properties new file mode 100644 index 000000000..63784232a --- /dev/null +++ b/src/main/resources/.properties @@ -0,0 +1 @@ +repo_url=${project.scm.url} \ No newline at end of file diff --git a/src/main/resources/gtfs/gtfsplus.yml b/src/main/resources/gtfs/gtfsplus.yml index 6d961ff66..f8608352a 100644 --- a/src/main/resources/gtfs/gtfsplus.yml +++ b/src/main/resources/gtfs/gtfsplus.yml @@ -173,6 +173,8 @@ text: Far side of intersection - value: AT text: Stop is at position + - value: MB + text: Mid-block - value: OP text: Stop is across street columnWidth: 2 diff --git a/src/main/resources/logback.xml b/src/main/resources/logback.xml new file mode 100644 index 000000000..4f32e6d54 --- /dev/null +++ b/src/main/resources/logback.xml @@ -0,0 +1,11 @@ + + + + + %d{MMM dd HH:mm:ss.SSS} [%thread] %-5level %logger{36}:%L - %msg%n + + + + + + \ No newline at end of file diff --git a/src/main/resources/public/auth0-silent-callback.html b/src/main/resources/public/auth0-silent-callback.html new file mode 100644 index 000000000..b89083643 --- /dev/null +++ b/src/main/resources/public/auth0-silent-callback.html @@ -0,0 +1,14 @@ + + + + + + + diff --git a/src/main/resources/public/index.html b/src/main/resources/public/index.html index a626c051e..d35a7572f 100644 --- a/src/main/resources/public/index.html +++ b/src/main/resources/public/index.html @@ -9,7 +9,7 @@ -
    +
    diff --git a/src/test/java/com/conveyal/datatools/DatatoolsTest.java b/src/test/java/com/conveyal/datatools/DatatoolsTest.java index 2d2200b73..51caf6afc 100644 --- a/src/test/java/com/conveyal/datatools/DatatoolsTest.java +++ b/src/test/java/com/conveyal/datatools/DatatoolsTest.java @@ -1,7 +1,7 @@ package com.conveyal.datatools; import com.conveyal.datatools.manager.DataManager; -import org.junit.Before; +import org.junit.jupiter.api.BeforeAll; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -14,13 +14,13 @@ public abstract class DatatoolsTest { private static final Logger LOG = LoggerFactory.getLogger(DatatoolsTest.class); private static boolean setUpIsDone = false; - @Before - public void setUp() { + @BeforeAll + public static void setUp() { if (setUpIsDone) { return; } LOG.info("DatatoolsTest setup"); - String[] args = {"../configurations/gtfs.works/dev/settings.yml", "../configurations/gtfs.works/dev/server.yml"}; + String[] args = {"configurations/default/env.yml.tmp", "configurations/default/server.yml.tmp"}; try { DataManager.main(args); setUpIsDone = true; diff --git a/src/test/java/com/conveyal/datatools/LoadFeedTest.java b/src/test/java/com/conveyal/datatools/LoadFeedTest.java index e8b57b448..60e28aaad 100644 --- a/src/test/java/com/conveyal/datatools/LoadFeedTest.java +++ b/src/test/java/com/conveyal/datatools/LoadFeedTest.java @@ -2,44 +2,35 @@ import com.conveyal.datatools.manager.models.FeedSource; import com.conveyal.datatools.manager.models.FeedVersion; -import org.junit.Before; +import org.junit.jupiter.api.BeforeAll; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.InputStream; - /** * Created by landon on 2/24/17. */ -public abstract class LoadFeedTest extends DatatoolsTest { +public abstract class LoadFeedTest { private static final Logger LOG = LoggerFactory.getLogger(LoadFeedTest.class); public static FeedSource source; public static FeedVersion version; - private static boolean setUpIsDone = false; - @Before + @BeforeAll public void setUp() { - if (setUpIsDone) { - return; - } - super.setUp(); + DatatoolsTest.setUp(); LOG.info("ProcessGtfsSnapshotMergeTest setup"); - File caltrainGTFS = new File(LoadFeedTest.class.getResource("caltrain_gtfs.zip").getFile()); - source = new FeedSource("test"); - source.save(); - version = new FeedVersion(source); - InputStream is = null; - try { - is = new FileInputStream(caltrainGTFS); - } catch (FileNotFoundException e) { - e.printStackTrace(); - } - version.newGtfsFile(is); - version.save(); - setUpIsDone = true; +// File caltrainGTFS = new File(LoadFeedTest.class.getResource("caltrain_gtfs.zip").getFile()); +// source = new FeedSource("test"); +// source.save(); +// version = new FeedVersion(source); +// InputStream is = null; +// try { +// is = new FileInputStream(caltrainGTFS); +// } catch (FileNotFoundException e) { +// e.printStackTrace(); +// } +// version.newGtfsFile(is); +// version.save(); +// setUpIsDone = true; } } diff --git a/src/test/java/com/conveyal/datatools/editor/ProcessGtfsSnapshotMergeTest.java b/src/test/java/com/conveyal/datatools/editor/ProcessGtfsSnapshotMergeTest.java index eb77568f2..45717fb2c 100644 --- a/src/test/java/com/conveyal/datatools/editor/ProcessGtfsSnapshotMergeTest.java +++ b/src/test/java/com/conveyal/datatools/editor/ProcessGtfsSnapshotMergeTest.java @@ -1,68 +1,54 @@ package com.conveyal.datatools.editor; -import com.conveyal.datatools.DatatoolsTest; -import com.conveyal.datatools.LoadFeedTest; -import com.conveyal.datatools.editor.datastore.FeedTx; -import com.conveyal.datatools.editor.datastore.VersionedDataStore; import com.conveyal.datatools.editor.jobs.ProcessGtfsSnapshotMerge; -import com.conveyal.datatools.manager.models.FeedSource; -import com.conveyal.datatools.manager.models.FeedVersion; -import org.junit.Before; -import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.InputStream; - -import static org.junit.Assert.assertEquals; - /** * Created by landon on 2/24/17. */ -public class ProcessGtfsSnapshotMergeTest extends LoadFeedTest { +public class ProcessGtfsSnapshotMergeTest { private static final Logger LOG = LoggerFactory.getLogger(ProcessGtfsSnapshotMergeTest.class); static ProcessGtfsSnapshotMerge snapshotMerge; private static boolean setUpIsDone = false; - @Before - public void setUp() { - if (setUpIsDone) { - return; - } - super.setUp(); - LOG.info("ProcessGtfsSnapshotMergeTest setup"); - - snapshotMerge = new ProcessGtfsSnapshotMerge(super.version, "test@conveyal.com"); - snapshotMerge.run(); - setUpIsDone = true; - } - - @Test - public void countRoutes() { - FeedTx feedTx = VersionedDataStore.getFeedTx(source.id); - assertEquals(feedTx.routes.size(), 3); - } - - @Test - public void countStops() { - FeedTx feedTx = VersionedDataStore.getFeedTx(source.id); - assertEquals(feedTx.stops.size(), 31); - } - - @Test - public void countTrips() { - FeedTx feedTx = VersionedDataStore.getFeedTx(source.id); - assertEquals(feedTx.trips.size(), 252); - } - - @Test - public void countFares() { - FeedTx feedTx = VersionedDataStore.getFeedTx(source.id); - assertEquals(feedTx.fares.size(), 6); - } + // TODO: add back in test once editor load is working +// @Before +// public void setUp() { +// if (setUpIsDone) { +// return; +// } +// super.setUp(); +// LOG.info("ProcessGtfsSnapshotMergeTest setup"); +// +// snapshotMerge = new ProcessGtfsSnapshotMerge(super.version, "test@conveyal.com"); +// snapshotMerge.run(); +// setUpIsDone = true; +// } +// +// @Test +// public void countRoutes() { +// FeedTx feedTx = VersionedDataStore.getFeedTx(source.id); +// assertEquals(feedTx.routes.size(), 3); +// } +// +// @Test +// public void countStops() { +// FeedTx feedTx = VersionedDataStore.getFeedTx(source.id); +// assertEquals(feedTx.stops.size(), 31); +// } +// +// @Test +// public void countTrips() { +// FeedTx feedTx = VersionedDataStore.getFeedTx(source.id); +// assertEquals(feedTx.trips.size(), 252); +// } +// +// @Test +// public void countFares() { +// FeedTx feedTx = VersionedDataStore.getFeedTx(source.id); +// assertEquals(feedTx.fares.size(), 6); +// } // @Test // public void duplicateStops() { diff --git a/src/test/java/com/conveyal/datatools/manager/controllers/api/AppInfoControllerTest.java b/src/test/java/com/conveyal/datatools/manager/controllers/api/AppInfoControllerTest.java new file mode 100644 index 000000000..66a1b9d43 --- /dev/null +++ b/src/test/java/com/conveyal/datatools/manager/controllers/api/AppInfoControllerTest.java @@ -0,0 +1,43 @@ +package com.conveyal.datatools.manager.controllers.api; + +import com.conveyal.datatools.DatatoolsTest; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.io.IOException; + +import static io.restassured.RestAssured.given; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; + +public class AppInfoControllerTest { + /** + * Prepare and start a testing-specific web server + */ + @BeforeAll + public static void setUp() { + // start server if it isn't already running + DatatoolsTest.setUp(); + } + + /** + * Make sure the app info endpoint can load and return expected data. + */ + @Test + public void canReturnApprInfo() throws IOException { + String jsonString = given() + .port(4000) + .get("/api/manager/public/appinfo") + .then() + // make sure the repoUrl matches what is found in the pom.xml + .body("repoUrl", equalTo("https://github.com/catalogueglobal/datatools-server.git")) + .extract().response().asString(); + + // parse the json and make sure the commit is the length of characters that a commit hash would be + ObjectMapper mapper = new ObjectMapper(); + JsonNode json = mapper.readTree(jsonString); + assertThat(json.get("commit").asText().length(), equalTo(40)); + } +} diff --git a/src/test/java/com/conveyal/datatools/manager/persistence/PersistenceTest.java b/src/test/java/com/conveyal/datatools/manager/persistence/PersistenceTest.java new file mode 100644 index 000000000..7eb62c77d --- /dev/null +++ b/src/test/java/com/conveyal/datatools/manager/persistence/PersistenceTest.java @@ -0,0 +1,226 @@ +package com.conveyal.datatools.manager.persistence; + +import com.conveyal.datatools.DatatoolsTest; +import com.conveyal.datatools.manager.models.FeedSource; +import com.conveyal.datatools.manager.models.Project; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Created by landon on 9/6/17. + */ +public class PersistenceTest { + private static final Logger LOG = LoggerFactory.getLogger(PersistenceTest.class); + + @BeforeAll + public static void setUp() { + DatatoolsTest.setUp(); + LOG.info("{} setup", PersistenceTest.class.getSimpleName()); + + Persistence.initialize(); + } + + @Test + public void createFeedSource() { + FeedSource feedSource = new FeedSource("test feed source"); + String id = feedSource.id; + Persistence.feedSources.create(feedSource); + String retrievedId = Persistence.feedSources.getById(id).id; + assertEquals(retrievedId, id, "Found FeedSource ID should equal inserted ID."); + } + +// @Test +// public void createOrganization() { +// Organization organization = new Organization(); +// organization.subscriptionBeginDate = new Date(); +// organization.subscriptionEndDate = new Date(); +// Persistence.organizations.create(organization); +// String retrievedId = Persistence.organizations.getById(organization.id).id; +// assertEquals("Found FeedSource ID should equal inserted ID.", retrievedId, organization.id); +// } + +// @Test +// public void doubleInsertFeedSource() { +// FeedSource feedSource = new FeedSource("test feed source"); +// String id = feedSource.id; +// Persistence.feedSources.create(feedSource); +// Persistence.feedSources.create(feedSource); +// String retrievedId = Persistence.feedSources.getById(id).id; +// assertEquals("Found FeedSource ID should equal inserted ID.", retrievedId, id); +// } +// + @Test + public void createProject() { + Project project = new Project(); + String id = project.id; + Persistence.projects.create(project); + String retrievedId = Persistence.projects.getById(id).id; + assertEquals(retrievedId, id, "Found Project ID should equal inserted ID."); + } +// +// @Test +// public void createDeployment() { +// Deployment deployment = new Deployment(); +// String id = deployment.id; +// Persistence.deployments.insertOne(deployment); +// String retrievedId = Persistence.deployments.find(eq(id)).first().id; +// assertEquals("Found Deployment ID should equal inserted ID.", retrievedId, id); +// } +// +// @Test +// public void createNote() { +// Note note = new Note(); +// String id = note.id; +// Persistence.notes.insertOne(note); +// String retrievedId = Persistence.notes.find(eq(id)).first().id; +// assertEquals("Found Note ID should equal inserted ID.", retrievedId, id); +// } +// +// @Test +// public void createOrganization() { +// Organization organization = new Organization(); +// String id = organization.id; +// Persistence.organizations.insertOne(organization); +// String retrievedId = Persistence.organizations.find(eq(id)).first().id; +// assertEquals("Found Organization ID should equal inserted ID.", retrievedId, id); +// } +// +// @Test +// public void createFeedVersion() { +// FeedVersion feedVersion = new FeedVersion(); +// String id = feedVersion.id; +// Persistence.feedVersions.insertOne(feedVersion); +// String retrievedId = Persistence.feedVersions.find(eq(id)).first().id; +// assertEquals("Found FeedVersion ID should equal inserted ID.", retrievedId, id); +// } +// +// @Test +// public void deleteFeedSource() { +// FeedSource feedSource = new FeedSource("test feed source"); +// String id = feedSource.id; +// Persistence.feedSources.insertOne(feedSource); +// DeleteResult deleteResult = Persistence.feedSources.deleteOne(eq(id)); +// assertEquals("Found FeedSource ID should equal inserted ID.", deleteResult.getDeletedCount(), 1); +// } +// +// @Test +// public void deleteProject() { +// Project project = new Project(); +// String id = project.id; +// Persistence.projects.insertOne(project); +// DeleteResult deleteResult = Persistence.projects.deleteOne(eq(id)); +// assertEquals("Found Project ID should equal inserted ID.", deleteResult.getDeletedCount(), 1); +// } +// +// @Test +// public void deleteDeployment() { +// Deployment deployment = new Deployment(); +// String id = deployment.id; +// Persistence.deployments.insertOne(deployment); +// DeleteResult deleteResult = Persistence.deployments.deleteOne(eq(id)); +// assertEquals("Found Deployment ID should equal inserted ID.", deleteResult.getDeletedCount(), 1); +// } +// +// @Test +// public void deleteNote() { +// Note note = new Note(); +// String id = note.id; +// Persistence.notes.insertOne(note); +// DeleteResult deleteResult = Persistence.notes.deleteOne(eq(id)); +// assertEquals("Found Note ID should equal inserted ID.", deleteResult.getDeletedCount(), 1); +// } +// +// @Test +// public void deleteOrganization() { +// Organization organization = new Organization(); +// String id = organization.id; +// Persistence.organizations.insertOne(organization); +// DeleteResult deleteResult = Persistence.organizations.deleteOne(eq(id)); +// assertEquals("Found Organization ID should equal inserted ID.", deleteResult.getDeletedCount(), 1); +// } +// +// @Test +// public void deleteFeedVersion() { +// FeedVersion feedVersion = new FeedVersion(); +// String id = feedVersion.id; +// Persistence.feedVersions.insertOne(feedVersion); +// DeleteResult deleteResult = Persistence.feedVersions.deleteOne(eq(id)); +// assertEquals("Found FeedVersion ID should equal inserted ID.", deleteResult.getDeletedCount(), 1); +// } +// +// @Test +// public void updateFeedSource() { +// FeedSource feedSource = new FeedSource("test feed source"); +// String id = feedSource.id; +// String value = "test"; +// Persistence.feedSources.insertOne(feedSource); +// UpdateResult updateResult = Persistence.feedSources.updateOne(eq(id), set("name", value)); +// FeedSource res = Persistence.feedSources.find(eq(id)).first(); +// assertEquals("Field 'name' should be updated.", res.name, value); +// assertEquals("Found FeedSource ID should equal inserted ID.", updateResult.getModifiedCount(), 1); +// } +// +// @Test +// public void updateProject() { +// Project project = new Project(); +// String id = project.id; +// String value = "test"; +// Persistence.projects.insertOne(project); +// UpdateResult updateResult = Persistence.projects.updateOne(eq(id), set("name", value)); +// Project res = Persistence.projects.find(eq(id)).first(); +// assertEquals("Field 'name' should be updated.", res.name, value); +// assertEquals("Found Project ID should equal inserted ID.", updateResult.getModifiedCount(), 1); +// } +// +// @Test +// public void updateDeployment() { +// Deployment deployment = new Deployment(); +// String id = deployment.id; +// String value = "test"; +// Persistence.deployments.insertOne(deployment); +// UpdateResult updateResult = Persistence.deployments.updateOne(eq(id), set("name", value)); +// Deployment res = Persistence.deployments.find(eq(id)).first(); +// assertEquals("Field 'name' should be updated.", res.name, value); +// assertEquals("Found Deployment ID should equal inserted ID.", updateResult.getModifiedCount(), 1); +// } +// +// @Test +// public void updateNote() { +// Note note = new Note(); +// String id = note.id; +// String value = "test"; +// Persistence.notes.insertOne(note); +// UpdateResult updateResult = Persistence.notes.updateOne(eq(id), set("body", value)); +// Note res = Persistence.notes.find(eq(id)).first(); +// assertEquals("Field 'body' should be updated.", res.body, value); +// assertEquals("Found Note ID should equal inserted ID.", updateResult.getModifiedCount(), 1); +// } +// +// @Test +// public void updateOrganization() { +// Organization organization = new Organization(); +// String id = organization.id; +// String value = "test"; +// Persistence.organizations.insertOne(organization); +// UpdateResult updateResult = Persistence.organizations.updateOne(eq(id), set("name", value)); +// Organization res = Persistence.organizations.find(eq(id)).first(); +// assertEquals("Field 'name' should be updated.", res.name, value); +// assertEquals("Found Organization ID should equal inserted ID.", updateResult.getModifiedCount(), 1); +// } +// +// @Test +// public void updateFeedVersion() { +// FeedVersion feedVersion = new FeedVersion(); +// String id = feedVersion.id; +// String value = "test"; +// Persistence.feedVersions.insertOne(feedVersion); +// UpdateResult updateResult = Persistence.feedVersions.updateOne(eq(id), set("name", value)); +// FeedVersion res = Persistence.feedVersions.find(eq(id)).first(); +// assertEquals("Field 'name' should be updated.", res.name, value); +// assertEquals("Found FeedVersion ID should equal inserted ID.", updateResult.getModifiedCount(), 1); +// } +}