diff --git a/jmeter/README.md b/jmeter/README.md
index db8f693b2..6cbba28da 100644
--- a/jmeter/README.md
+++ b/jmeter/README.md
@@ -31,8 +31,8 @@ The test plan can be ran straight from the command line. A helper script is pro
| 1 | test plan mode | `batch`, `fetch`, `query` or `upload` | which test plan mode to use when running the jmeter script. (see notes below for more explanation of these test plan modes) |
| 2 | number of threads | an integer greater than 0 | The number of simultaneous threads to run at a time. The threads will have staggered start times 1 second apart. |
| 3 | number of loops | an integer greater than 0 | the number of loops to run. This is combined with the number of threads, so if the number of threads is 10 and the number of loops is 8, the total number of test plans to run will be 80. |
-| 4 | project name or batch csv file | string of the project name or string of file path to batch csv file | This argument is required if running the script with the `batch` test plan mode, otherwise, this argument is optional. The jmeter script will create new projects with a project name plus the current iteration number. The default name is "test project #". Also, if the s3 bucket argument is also provided, the output folder will be tarred up and with this name. |
-| 5 | s3 bucket | string of an s3 bucket | OPTIONAL. If provided, the script will tar up the output folder and attempt to upload to the specified s3 bucket. This assumes that aws credentials have been setup for use by the `aws` command line tool. |
+| 4 | project name or batch csv file | string of the project name or string of file path to batch csv file | This argument is required if running the script with the `batch` test plan mode, otherwise, this argument is optional.
If in `fetch` or `upload` mode, the jmeter script will create new projects with a the provided project name (or "test project" if a name is not provided) plus the current iteration number. In `fetch` or `upload` mode, the feed url and upload file is not configurable. In `fetch` mode, the url `http://documents.atlantaregional.com/transitdata/gtfs_ASC.zip` will be used to fetch the feed to create the feed version. In `upload` mode, the file `fixtures/gtfs.zip` will be uploaded to create the feed version.
If in `query` mode, jmeter will try to find the project matching the provided name (as long as the project name is not "test project") or a random project will be picked if this argument is not provided. |
+| 5 | s3 bucket | string of an s3 bucket | OPTIONAL. If provided, the script will tar up the output folder and attempt to upload to the specified s3 bucket. This assumes that aws credentials have been setup for use by the `aws` command line tool. If not running in batch mode and a project name has been specified, the name of this file will be `{project name}.tar.gz`. Otherwise, the name will be `output.tar.gz`. |
Examples:
@@ -48,7 +48,7 @@ _Run the test plan in query mode 80 total times in 10 threads each completing 8
_Run in batch mode. Note that all feeds in the csv file will be processed in each loop. So in the following command, each feed in the batch.csv file would be processed 6 times. See the section below for documentation on the csv file and also see the fixtures folder for an example file._
```sh
-./run-tests.sh query 3 2 batch.csv my-s3-bucket
+./run-tests.sh batch 3 2 batch.csv my-s3-bucket
```
### Running the upload test on multiple gtfs files
@@ -124,6 +124,8 @@ This section is run under the `query` test plan mode. This script assumes that
This section is run in all test plan modes.
+1. Fetch stops and a row count of stops
+1. Make sure the number of stops matches the row count of stops
1. Fetch all routes
1. Pick a random route
1. Fetch all trips on selected route
@@ -133,6 +135,8 @@ This section is run in all test plan modes.
1. Fetch embedded stop_times from trips from a random pattern
1. Check that all stop_times have proper trip_id
1. Check that all stop_times in trips on pattern have same stop sequence as pattern
+1. Make a GraphQL request that contains a nested query of routes, patterns and stops
+1. Make sure that each route is present in the route within the list of patterns
## Reporting
@@ -140,4 +144,4 @@ If running this script in GUI mode, it is possible to see all results in real-ti
When running the test plan from the command line in non-gui mode, reports will be saved to the `output` folder. The outputs will contain a csv file of all requests made and an html report summarizing the results. If the test plan mode was `batch`, `fetch` or `upload` than another csv file will be written that contains a list of the elapsed time for processing the creation of a new gtfs feed version.
-The csv files can be loaded into a jmeter GUI listener to view more details.
+The csv files can be loaded into a jmeter GUI to view more details.
diff --git a/jmeter/test-script.jmx b/jmeter/test-script.jmx
index 60e890cae..4ed4ed0eb 100644
--- a/jmeter/test-script.jmx
+++ b/jmeter/test-script.jmx
@@ -99,8 +99,9 @@
-
+
${continueBatchLoop}
+ Used to determine if this loop should be completed again by advancing to the next record in a batch csv file
@@ -645,20 +646,47 @@ if (failureMessage?.trim()) {
-
+
false
// do not do batch loop in this case
vars.put("continueBatchLoop", "false")
+ Batch mode is not enabled in query mode
-
+
projectId
$[*].id
0
+
+
+
+
+ import groovy.json.JsonSlurper;
+
+JsonSlurper JSON = new JsonSlurper ();
+
+// if a custom project name is specified, try to find it.
+// if a project matching the name is found, then override the projectId set by the JSON extractor in the step before this.
+if (!vars.get("projectName").equals("test project")) {
+ // parse json
+ try {
+ def jsonResponse = JSON.parseText(prev.getResponseDataAsString());
+ jsonResponse.each{ project ->
+ if (project.name.equals(vars.get("projectName"))) {
+ vars.put("projectId", project.id)
+ }
+ }
+ } catch (Exception e) {
+
+ }
+}
+ groovy
+
+
@@ -764,6 +792,43 @@ vars.put("continueBatchLoop", "false")
+
+ groovy
+
+
+
+ import groovy.json.JsonSlurper;
+
+def failureMessage = "";
+def jsonResponse = null;
+
+JsonSlurper JSON = new JsonSlurper ();
+
+// parse json
+try {
+ jsonResponse = JSON.parseText(prev.getResponseDataAsString());
+} catch (Exception e) {
+ failureMessage += "Invalid JSON.\n"
+}
+
+def curDate = jsonResponse.validationResult.firstCalendarDate.toInteger()
+def dateWithMostService = curDate
+def maxServiceSeconds = jsonResponse.validationResult.dailyTotalSeconds[0]
+
+jsonResponse.validationResult.dailyTotalSeconds.each {
+ // Update maxServiceSeconds if needed
+ if (it > maxServiceSeconds) {
+ dateWithMostService = curDate
+ maxServiceSeconds = it
+ }
+
+ // increment cur date
+ curDate += 1
+}
+
+vars.put("date", dateWithMostService.toString())
+
+
true
@@ -1114,16 +1179,18 @@ try {
failureMessage += "Invalid JSON.\n"
}
-def trips = jsonResponse.data.feed.patterns[0].trips
+def firstPattern = jsonResponse.data.feed.patterns[0]
-trips.each { trip ->
- trip.stop_times.each { stop_time ->
- if (!trip.trip_id.equals(stop_time.trip_id)) {
- failureMessage += "trip_id mismatch."
- failureMessage += "Parent trip has trip_id: " + trip.trip_id
- failureMessage += " Stop Time has stop_id: " + stop_time.stop_id
- failureMessage += " and trip_id: " + stop_time.trip_id + "\n"
- }
+if (firstPattern != null) {
+ firstPattern.trips.each { trip ->
+ trip.stop_times.each { stop_time ->
+ if (!trip.trip_id.equals(stop_time.trip_id)) {
+ failureMessage += "trip_id mismatch."
+ failureMessage += "Parent trip has trip_id: " + trip.trip_id
+ failureMessage += " Stop Time has stop_id: " + stop_time.stop_id
+ failureMessage += " and trip_id: " + stop_time.trip_id + "\n"
+ }
+ }
}
}
@@ -1153,27 +1220,97 @@ try {
failureMessage += "Invalid JSON.\n"
}
-def numStopsInPattern = jsonResponse.data.feed.patterns[0].stops.size()
def trips = jsonResponse.data.feed.patterns[0].trips
-def numStopTimesInTrip = 0
trips.each { trip ->
- numStopTimesInTrip = trip.stop_times.size()
- if (numStopTimesInTrip != numStopsInPattern) {
- failureMessage += "mismatch in number of trip stops vs number of pattern stops."
- failureMessage += "There are " + numStopsInPattern + " pattern stops"
- failureMessage += ", but there are " + numStopTimesInTrip + " stop_times"
- failureMessage += " in trip " + trip.trip_id + "\n"
- } else {
- trip.stop_times.eachWithIndex { stop_time, idx ->
- if (!stop_time.stop_id.equals(trip.stop_times[idx].stop_id)) {
- failureMessage += "stop_id mismatch."
- failureMessage += "Pattern stop list stop_id: " + trip.stop_times[idx].stop_id
- failureMessage += " at index: " + idx
- failureMessage += " Stop Time of trip " + trip.trip_id
- failureMessage += " at index: " + idx
- failureMessage += " has stop_id: " + stop_time.stop_id + "\n"
- }
+ trip.stop_times.eachWithIndex { stop_time, idx ->
+ if (!stop_time.stop_id.equals(trip.stop_times[idx].stop_id)) {
+ failureMessage += "stop_id mismatch."
+ failureMessage += "Pattern stop list stop_id: " + trip.stop_times[idx].stop_id
+ failureMessage += " at index: " + idx
+ failureMessage += " Stop Time of trip " + trip.trip_id
+ failureMessage += " at index: " + idx
+ failureMessage += " has stop_id: " + stop_time.stop_id + "\n"
+ }
+ }
+}
+
+// set assertion result to fail if an error happened
+if (failureMessage?.trim()) {
+ AssertionResult.setFailureMessage(failureMessage);
+ AssertionResult.setFailure(true);
+}
+ groovy
+
+
+
+
+ true
+
+
+
+ false
+ {"query":"query nestedQuery($namespace: String) {feed (namespace: $namespace) { routes(limit: 1) { patterns(limit: -1) { route { patterns(limit: -1) { route { route_id stops (limit: -1) { stop_id }}} route_id stops (limit: -1) { stop_id }}} route_id stops(limit: -1) { stop_id }}}}", "variables": {"namespace": "${namespace}"}}
+ =
+
+
+
+
+
+
+
+ /api/manager/secure/gtfs/graphql
+ POST
+ true
+ false
+ true
+ false
+
+
+
+ This should test whether the dataloader is able to cache queries and also if queries can be combined
+
+
+
+ 1000
+ 1000.0
+
+
+
+
+
+ Content-Type
+ application/json
+
+
+
+
+
+
+
+
+ import groovy.json.JsonSlurper;
+
+def failureMessage = "";
+def jsonResponse = null;
+
+JsonSlurper JSON = new JsonSlurper ();
+
+// parse json
+try {
+ jsonResponse = JSON.parseText(prev.getResponseDataAsString());
+} catch (Exception e) {
+ failureMessage += "Invalid JSON.\n"
+}
+
+def routes = jsonResponse.data.feed.routes
+
+routes.each { route ->
+ routeId = route.route_id
+ route.patterns.each { pattern ->
+ if (pattern.route[0].route_id != routeId) {
+ failureMessage += "route id not found in nested list of the route in a pattern."
+ failureMessage += " Missing route id: " + routeId + "\n"
}
}
}
@@ -1187,8 +1324,100 @@ if (failureMessage?.trim()) {
+
+ true
+
+
+
+ false
+ {"query":"query shapesQuery($namespace: String) {feed (namespace: $namespace) {patterns(limit: -1) {shape(limit: -1) {shape_pt_lat shape_pt_lon shape_pt_sequence}}}}", "variables": {"namespace": "${namespace}"}}
+ =
+
+
+
+
+
+
+
+ /api/manager/secure/gtfs/graphql
+ POST
+ true
+ false
+ true
+ false
+
+
+
+ This tests a common query of getting all shapes of all patterns.
+
+
+
+ 1000
+ 1000.0
+
+
+
+
+
+ Content-Type
+ application/json
+
+
+
+
+
+
+ true
+
+
+
+ false
+ {"query":"query patternsQuery($date: String, $namespace: String, $routeId: [String]) { feed (namespace: $namespace) { routes (route_id: $routeId) { route_id, route_short_name, route_long_name, patterns (limit: -1) { pattern_id, name, shape (limit: -1) { lat: shape_pt_lat lon: shape_pt_lon }, stops (limit: -1) { stop_id } trips ( date: $date, limit: -1 ) { stop_times (limit: 1) { arrival_time departure_time } } } stops (limit: -1) { location_type stop_code stop_desc stop_id stop_lat stop_lon stop_name stop_url wheelchair_boarding zone_id } } } }", "variables": {"date": "${date}", "namespace": "${namespace}", "route_id": "${randomRouteId}"}}
+ =
+
+
+
+
+
+
+
+ /api/manager/secure/gtfs/graphql
+ POST
+ true
+ false
+ true
+ false
+
+
+
+ This executes the patternsQuery which is used to display the TripsPerHourChart in datatools-ui
+
+
+
+ 1000
+ 1000.0
+
+
+
+
+
+ Content-Type
+ application/json
+
+
+
+
+
+
+ groovy
+
+
+
+ vars.put("continueBatchLoop", "true")
+
+
false
diff --git a/src/main/java/com/conveyal/datatools/common/utils/SparkUtils.java b/src/main/java/com/conveyal/datatools/common/utils/SparkUtils.java
index 89a2ba3c9..9aa6cd195 100644
--- a/src/main/java/com/conveyal/datatools/common/utils/SparkUtils.java
+++ b/src/main/java/com/conveyal/datatools/common/utils/SparkUtils.java
@@ -30,6 +30,7 @@ public class SparkUtils {
private static final ObjectMapper mapper = new ObjectMapper();
private static final String BASE_URL = getConfigPropertyAsText("application.public_url");
private static final int DEFAULT_LINES_TO_PRINT = 10;
+ private static final int MAX_CHARACTERS_TO_PRINT = 500;
/**
* Write out the supplied file to the Spark response as an octet-stream.
@@ -162,13 +163,19 @@ public static void logRequestOrResponse(boolean logRequest, Request request, Res
}
if ("application/json".equals(contentType)) {
bodyString = logRequest ? request.body() : response.body();
- if (bodyString != null) {
+ if (bodyString == null) {
+ bodyString = "{body content is null}";
+ } else if (bodyString.length() > MAX_CHARACTERS_TO_PRINT) {
+ bodyString = new StringBuilder()
+ .append("body content is longer than 500 characters, printing first 500 characters here:\n")
+ .append(bodyString, 0, MAX_CHARACTERS_TO_PRINT)
+ .append("\n...and " + (bodyString.length() - MAX_CHARACTERS_TO_PRINT) + " more characters")
+ .toString();
+ } else {
// Pretty print JSON if ContentType is JSON and body is not empty
JsonNode jsonNode = mapper.readTree(bodyString);
// Add new line for legibility when printing
bodyString = "\n" + mapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonNode);
- } else {
- bodyString = "{body content is null}";
}
} else if (contentType != null) {
bodyString = String.format("\nnon-JSON body type: %s", contentType);
diff --git a/src/main/java/com/conveyal/datatools/manager/DataManager.java b/src/main/java/com/conveyal/datatools/manager/DataManager.java
index dd1b5e1c1..3db0c3e26 100644
--- a/src/main/java/com/conveyal/datatools/manager/DataManager.java
+++ b/src/main/java/com/conveyal/datatools/manager/DataManager.java
@@ -27,7 +27,7 @@
import com.conveyal.datatools.manager.persistence.FeedStore;
import com.conveyal.datatools.manager.persistence.Persistence;
import com.conveyal.gtfs.GTFS;
-import com.conveyal.gtfs.GraphQLMain;
+import com.conveyal.gtfs.GraphQLController;
import com.conveyal.gtfs.loader.Table;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
@@ -196,7 +196,7 @@ static void registerRoutes() throws IOException {
CorsFilter.apply();
// Initialize GTFS GraphQL API service
// FIXME: Add user permissions check to ensure user has access to feeds.
- GraphQLMain.initialize(GTFS_DATA_SOURCE, GTFS_API_PREFIX);
+ GraphQLController.initialize(GTFS_DATA_SOURCE, GTFS_API_PREFIX);
// Register core API routes
AppInfoController.register(API_PREFIX);
ProjectController.register(API_PREFIX);
diff --git a/src/main/java/com/conveyal/gtfs/GraphQLController.java b/src/main/java/com/conveyal/gtfs/GraphQLController.java
index 468181806..45bf77846 100644
--- a/src/main/java/com/conveyal/gtfs/GraphQLController.java
+++ b/src/main/java/com/conveyal/gtfs/GraphQLController.java
@@ -2,6 +2,7 @@
import com.conveyal.gtfs.graphql.GTFSGraphQL;
import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
import graphql.ExecutionInput;
import graphql.ExecutionResult;
import graphql.introspection.IntrospectionQuery;
@@ -10,10 +11,14 @@
import spark.Request;
import spark.Response;
+import javax.sql.DataSource;
import java.io.IOException;
+import java.util.HashMap;
import java.util.Map;
import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
+import static spark.Spark.get;
+import static spark.Spark.post;
/**
* This Spark Controller contains methods to provide HTTP responses to GraphQL queries, including a query for the
@@ -21,14 +26,15 @@
*/
public class GraphQLController {
private static final Logger LOG = LoggerFactory.getLogger(GraphQLController.class);
+ private static final ObjectMapper mapper = new ObjectMapper();
/**
* A Spark Controller that responds to a GraphQL query in HTTP GET query parameters.
*/
- public static Map get (Request request, Response response) {
+ public static Map getGraphQL (Request request, Response response) {
JsonNode varsJson = null;
try {
- varsJson = GraphQLMain.mapper.readTree(request.queryParams("variables"));
+ varsJson = mapper.readTree(request.queryParams("variables"));
} catch (IOException e) {
LOG.warn("Error processing variables", e);
haltWithMessage(request, 400, "Malformed JSON");
@@ -40,10 +46,10 @@ public static Map get (Request request, Response response) {
/**
* A Spark Controller that responds to a GraphQL query in an HTTP POST body.
*/
- public static Map post (Request req, Response response) {
+ public static Map postGraphQL (Request req, Response response) {
JsonNode node = null;
try {
- node = GraphQLMain.mapper.readTree(req.body());
+ node = mapper.readTree(req.body());
} catch (IOException e) {
LOG.warn("Error processing POST body JSON", e);
haltWithMessage(req, 400, "Malformed JSON");
@@ -61,7 +67,11 @@ private static Map doQuery (JsonNode varsJson, String queryJson,
if (varsJson == null && queryJson == null) {
return getSchema(null, null);
}
- Map variables = GraphQLMain.mapper.convertValue(varsJson, Map.class);
+ // The graphiql app sends over this unparseable string while doing an introspection query. Therefore this code
+ // checks for it and sends an empty map in that case.
+ Map variables = varsJson == null || varsJson.toString().equals("\"{}\"")
+ ? new HashMap<>()
+ : mapper.convertValue(varsJson, Map.class);
ExecutionInput executionInput = ExecutionInput.newExecutionInput()
.query(queryJson)
.variables(variables)
@@ -81,4 +91,18 @@ static Map getSchema(Request req, Response res) {
}
+ /**
+ * Register Spark HTTP endpoints. API prefix should begin and end with "/", e.g. "/api/".
+ */
+ public static void initialize (DataSource dataSource, String apiPrefix) {
+ LOG.info("Initialized GTFS GraphQL API at localhost:port{}", apiPrefix);
+ if (dataSource == null) {
+ throw new RuntimeException("Cannot initialize GraphQL endpoints. Data source must not be null.");
+ }
+ GTFSGraphQL.initialize(dataSource);
+ get(apiPrefix + "graphql", GraphQLController::getGraphQL, mapper::writeValueAsString);
+ post(apiPrefix + "graphql", GraphQLController::postGraphQL, mapper::writeValueAsString);
+ get(apiPrefix + "graphql/schema", GraphQLController::getSchema, mapper::writeValueAsString);
+ post(apiPrefix + "graphql/schema", GraphQLController::getSchema, mapper::writeValueAsString);
+ }
}
diff --git a/src/main/java/com/conveyal/gtfs/GraphQLMain.java b/src/main/java/com/conveyal/gtfs/GraphQLMain.java
index e0b326a23..043ac88a6 100644
--- a/src/main/java/com/conveyal/gtfs/GraphQLMain.java
+++ b/src/main/java/com/conveyal/gtfs/GraphQLMain.java
@@ -1,17 +1,10 @@
package com.conveyal.gtfs;
import com.conveyal.datatools.common.utils.CorsFilter;
-import com.conveyal.gtfs.graphql.GTFSGraphQL;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import spark.ResponseTransformer;
import javax.sql.DataSource;
import static spark.Spark.after;
-import static spark.Spark.get;
-import static spark.Spark.post;
/**
* Test main method to set up a new-style (as of June 2017) GraphQL API
@@ -32,10 +25,6 @@
* POSTGRES_LOCAL_URL = "jdbc:postgresql://localhost/catalogue";
*/
public class GraphQLMain {
- // Shared object mapper with GraphQLController.
- protected static final ObjectMapper mapper = new ObjectMapper();
- private static final Logger LOG = LoggerFactory.getLogger(GraphQLMain.class);
-
/**
* @param args to use the local postgres database, jdbc:postgresql://localhost/gtfs
*/
@@ -45,24 +34,12 @@ public static void main (String[] args) {
if (args.length > 1) {
apiPrefix = args[1];
}
- DataSource dataSource = GTFS.createDataSource(databaseUrl, null, null);
- initialize(dataSource, apiPrefix);
+ // Initialize HTTP endpoints with new data source.
+ GraphQLController.initialize(GTFS.createDataSource(databaseUrl, null, null), apiPrefix);
+ // Apply CORS and content encoding header.
CorsFilter.apply();
after((request, response) -> response.header("Content-Encoding", "gzip"));
}
- /**
- * DataSource created with GTFS::createDataSource (see main() for example)
- * API prefix should begin and end with "/", e.g. "/api/"
- */
- public static void initialize (DataSource dataSource, String apiPrefix) {
- LOG.info("Initialized GTFS GraphQL API at localhost:port{}", apiPrefix);
- GTFSGraphQL.initialize(dataSource);
- get(apiPrefix + "graphql", GraphQLController::get, mapper::writeValueAsString);
- post(apiPrefix + "graphql", GraphQLController::post, mapper::writeValueAsString);
- get(apiPrefix + "graphql/schema", GraphQLController::getSchema, mapper::writeValueAsString);
- post(apiPrefix + "graphql/schema", GraphQLController::getSchema, mapper::writeValueAsString);
- }
-
}