Skip to content

Commit

Permalink
Merge pull request #1038 from cal-itp/feb-open-data-part2
Browse files Browse the repository at this point in the history
Feb open data part 2
  • Loading branch information
tiffanychu90 authored Feb 27, 2024
2 parents 4b3f0e9 + 47ceef7 commit 44eec17
Show file tree
Hide file tree
Showing 15 changed files with 141 additions and 100 deletions.
3 changes: 3 additions & 0 deletions open_data/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ Traffic Ops had a request for all transit routes and transit stops to be publish
### Metadata
* [Metadata](./metadata.yml)
* [Data dictionary](./data_dictionary.yml)
* [update_vars](./update_vars.py) and [publish_utils](./publish_utils.py) contain a lot of the variables that would frequently get updated in the publishing process.
* Apply standardized column names across published datasets, even they differ from internal keys (`org_id` in favor of `gtfs_dataset_key`, `agency` in favor of `organization_name`).
* Since we do not save multiple versions of published datasets, the columns are renamed prior to exporting the geoparquet as a zipped shapefile.

## Open Data Intake Process
* Open a [ticket](https://forms.office.com/Pages/ResponsePage.aspx?id=ZAobYkAXzEONiEVA00h1VuRQZHWRcbdNm496kj4opnZUNUo1NjRNRFpIOVRBMVFFTFJDM1JKNkY0SC4u) on the Intranet to update or add new services and provide [justification](./intake_justification.md)
31 changes: 31 additions & 0 deletions open_data/check_exported_data.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,16 @@
"print_stats(gdf)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b9c367fe-c7c1-4ca3-8f64-ecf94f99bd99",
"metadata": {},
"outputs": [],
"source": [
"gdf[gdf.hqta_type==\"major_stop_brt\"].route_id.value_counts()"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand Down Expand Up @@ -254,6 +264,27 @@
"print_stats(gdf)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "46f3badd-a344-49c1-a523-761251dd8e32",
"metadata": {},
"outputs": [],
"source": [
"gdf.p50_mph.hist(bins=range(0, 80, 5))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "221fa13b-14d4-4eb1-b70d-9621b82720ee",
"metadata": {},
"outputs": [],
"source": [
"for col in [\"p20_mph\", \"p50_mph\", \"p80_mph\"]:\n",
" print(gdf[col].describe())"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand Down
53 changes: 6 additions & 47 deletions open_data/gcs_to_esri.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,51 +12,12 @@

from loguru import logger

import open_data
import publish_utils
from calitp_data_analysis import utils, geography_utils
from shared_utils import portfolio_utils
from update_vars import analysis_date
from update_vars import analysis_date, RUN_ME

catalog = intake.open_catalog("./catalog.yml")

def standardize_column_names(df: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
"""
Standardize how agency is referred to.
"""
RENAME_DICT = {
"caltrans_district": "district_name",
"organization_source_record_id": "org_id",
"organization_name": "org_name"
}
# these rename hqta datasets
# agency_name_primary, agency_name_secondary, etc
df.columns = df.columns.str.replace('agency_name', 'agency')

df = df.rename(columns = RENAME_DICT)
df

return df


def remove_internal_keys(df: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
"""
Remove columns used in our internal data modeling.
Leave only natural identifiers (route_id, shape_id).
Remove shape_array_key, gtfs_dataset_key, etc.
"""
exclude_list = [
"sec_elapsed", "meters_elapsed",
"name" #schedule_gtfs_dataset_name
]
cols = [c for c in df.columns]

internal_cols = [c for c in cols if "_key" in c or c in exclude_list]

print(f"drop: {internal_cols}")

return df.drop(columns = internal_cols)


def print_info(gdf: gpd.GeoDataFrame):
"""
Double check that the metadata is entered correctly and
Expand Down Expand Up @@ -89,13 +50,11 @@ def remove_zipped_shapefiles():
logger.add(sys.stderr,
format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}",
level="INFO")

#datasets = list(dict(catalog).keys())
datasets = open_data.RUN_ME

for d in datasets :

for d in RUN_ME :
gdf = catalog[d].read().to_crs(geography_utils.WGS84)
gdf = standardize_column_names(gdf).pipe(remove_internal_keys)
gdf = publish_utils.standardize_column_names(gdf).pipe(
publish_utils.remove_internal_keys)

logger.info(f"********* {d} *************")
print_info(gdf)
Expand Down
2 changes: 1 addition & 1 deletion open_data/metadata.json

Large diffs are not rendered by default.

10 changes: 1 addition & 9 deletions open_data/open_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,8 @@
from pathlib import Path

import metadata_update_pro
from update_vars import XML_FOLDER, META_JSON
from update_vars import XML_FOLDER, META_JSON, RUN_ME

RUN_ME = [
"ca_hq_transit_areas",
"ca_hq_transit_stops",
"ca_transit_routes",
"ca_transit_stops",
"speeds_by_stop_segments",
"speeds_by_route_time_of_day",
]

if __name__=="__main__":
assert str(Path.cwd()).endswith("open_data"), "this script must be run from open_data directory!"
Expand Down
61 changes: 61 additions & 0 deletions open_data/publish_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import geopandas as gpd
import pandas as pd

STANDARDIZED_COLUMNS_DICT = {
"caltrans_district": "district_name",
"organization_source_record_id": "org_id",
"organization_name": "agency",
"agency_name_primary": "agency_primary",
"agency_name_secondary": "agency_secondary"
}


# Rename columns when shapefile truncates
RENAME_HQTA = {
"agency_pri": "agency_primary",
"agency_sec": "agency_secondary",
"hqta_detai": "hqta_details",
"base64_url": "base64_url_primary",
"base64_u_1": "base64_url_secondary",
"org_id_pri": "org_id_primary",
"org_id_sec": "org_id_secondary",
}

RENAME_SPEED = {
"stop_seque": "stop_sequence",
"time_of_da": "time_of_day",
"time_perio": "time_period",
"district_n": "district_name",
"direction_": "direction_id",
"common_sha": "common_shape_id",
"avg_sched_": "avg_sched_trip_min",
"avg_rt_tri": "avg_rt_trip_min",
"caltrans_d": "district_name",
"organization_source_record_id": "org_id",
"organization_name": "agency"
}

def standardize_column_names(df: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
"""
Standardize how agency is referred to.
"""
return df.rename(columns = STANDARDIZED_COLUMNS_DICT)


def remove_internal_keys(df: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
"""
Remove columns used in our internal data modeling.
Leave only natural identifiers (route_id, shape_id).
Remove shape_array_key, gtfs_dataset_key, etc.
"""
exclude_list = [
"sec_elapsed", "meters_elapsed",
"name", "schedule_gtfs_dataset_key"
]
cols = [c for c in df.columns]

internal_cols = [c for c in cols if "_key" in c or c in exclude_list]

print(f"drop: {internal_cols}")

return df.drop(columns = internal_cols)
30 changes: 3 additions & 27 deletions open_data/supplement_meta.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

from calitp_data_analysis import utils
from update_vars import analysis_date, ESRI_BASE_URL

from publish_utils import RENAME_HQTA, RENAME_SPEED

def get_esri_url(name: str)-> str:
return f"{ESRI_BASE_URL}{name}/FeatureServer"
Expand All @@ -25,30 +25,6 @@ def get_esri_url(name: str)-> str:

ROUTE_METHODOLOGY = "This data was estimated by combining GTFS real-time vehicle positions with GTFS scheduled trips and shapes. GTFS real-time (RT) vehicle positions are spatially joined to GTFS scheduled shapes, so only vehicle positions traveling along the route alignment path are kept. A sample of five vehicle positions are selected (min, 25th percentile, 50th percentile, 75th percentile, max). The trip speed is calculated using these five vehicle positions. Each trip is categorized into a time-of-day. The average speed for a route-direction-time_of_day is calculated. Additional metrics are stored, such as the number of trips observed, the average scheduled service minutes, and the average RT observed service minutes. For convenience, we also provide a singular shape (common_shape_id) to associate with a route-direction. This is the shape that had the most number of trips for a given route-direction. Time-of-day is determined by the GTFS scheduled trip start time. The trip start hour (military time) is categorized based on the following: Owl (0-3), Early AM (4-6), AM Peak (7-9), Midday (10-14), PM Peak (15-19), and Evening (20-23). The start and end hours are inclusive (e.g., 4-6 refers to 4am, 5am, and 6am)."

#--------------------------------------------------------#
# Rename columns
#--------------------------------------------------------#
RENAME_HQTA = {
"agency_pri": "agency_primary",
"agency_sec": "agency_secondary",
"hqta_detai": "hqta_details",
"base64_url": "base64_url_primary",
"base64_u_1": "base64_url_secondary",
"org_id_pri": "org_id_primary",
"org_id_sec": "org_id_secondary",
}

RENAME_SPEED = {
"stop_seque": "stop_sequence",
"time_of_da": "time_of_day",
"time_perio": "time_period",
"district_n": "district_name",
"direction_": "direction_id",
"common_sha": "common_shape_id",
"avg_sched_": "avg_sched_trip_min",
"avg_rt_tri": "avg_rt_trip_min",
"caltrans_d": "district_name"
}

#--------------------------------------------------------#
# Put supplemental parts together into dict
Expand Down Expand Up @@ -95,7 +71,7 @@ def get_esri_url(name: str)-> str:

METADATA_FILE = "metadata.yml"

with open(f"./{METADATA_FILE}") as f:
with open(METADATA_FILE) as f:
meta = yaml.load(f, yaml.Loader)

# The dictionaries for each dataset are stored in a list
Expand All @@ -122,7 +98,7 @@ def get_esri_url(name: str)-> str:
# Output a json to use in ArcPro, and only of the subset of dict that's meta["tables"]
JSON_FILE = utils.sanitize_file_path(METADATA_FILE)

with open(f"./{JSON_FILE}.json", 'w') as f:
with open(f"{JSON_FILE}.json", 'w') as f:
json.dump(output, f)

print(f"{JSON_FILE} produced")
16 changes: 13 additions & 3 deletions open_data/update_data_dict.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,11 @@
from pathlib import Path
from typing import Union

import publish_utils
from update_vars import analysis_date

catalog = intake.open_catalog("catalog.yml")


def unpack_list_of_tables_as_dict(list_of_dict: list) -> dict:
"""
In the yml, the datasets come as a list of dictionary items.
Expand Down Expand Up @@ -54,8 +54,18 @@ def new_columns_for_data_dict(

# Columns in our dataset
FILE = catalog[t].urlpath
col_list = gpd.read_parquet(FILE).columns.tolist()

gdf = gpd.read_parquet(FILE).pipe(
publish_utils.standardize_column_names
).pipe(
publish_utils.remove_internal_keys)

if "hq_" in t:
gdf = gdf.rename(columns = publish_utils.RENAME_HQTA)
elif "speed" in t:
gdf = gdf.rename(columns = publish_utils.RENAME_SPEED)

col_list = gdf.columns.tolist()

# Columns included in data dictionary
cols_defined = [c for c in dict_of_tables[t].keys()]

Expand Down
11 changes: 10 additions & 1 deletion open_data/update_vars.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,13 @@
XML_FOLDER = Path("xml")
DEFAULT_XML_TEMPLATE = XML_FOLDER.joinpath(Path("default_pro.xml"))
META_JSON = Path("metadata.json")
DATA_DICT_YML = Path("data_dictionary.yml")
DATA_DICT_YML = Path("data_dictionary.yml")

RUN_ME = [
"ca_hq_transit_areas",
"ca_hq_transit_stops",
"ca_transit_routes",
"ca_transit_stops",
"speeds_by_stop_segments",
"speeds_by_route_time_of_day",
]
4 changes: 2 additions & 2 deletions open_data/xml/ca_hq_transit_areas.xml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
</ns0:hierarchyLevelName>
<ns0:contact ns1:nilReason="missing"></ns0:contact>
<ns0:dateStamp>
<ns1:Date>2024-01-26</ns1:Date>
<ns1:Date>2024-02-23</ns1:Date>
</ns0:dateStamp>
<ns0:metadataStandardName>
<ns1:CharacterString>ISO 19139 Geographic Information - Metadata - Implementation Specification</ns1:CharacterString>
Expand Down Expand Up @@ -85,7 +85,7 @@
<ns0:date>
<ns0:CI_Date>
<ns0:date>
<ns1:Date>2024-01-17</ns1:Date>
<ns1:Date>2024-02-14</ns1:Date>
</ns0:date>
<ns0:dateType>
<ns0:CI_DateTypeCode codeList="http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml#CI_DateTypeCode" codeListValue="revision" codeSpace="ISOTC211/19115">
Expand Down
4 changes: 2 additions & 2 deletions open_data/xml/ca_hq_transit_stops.xml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
</ns0:hierarchyLevelName>
<ns0:contact ns1:nilReason="missing"></ns0:contact>
<ns0:dateStamp>
<ns1:Date>2024-01-26</ns1:Date>
<ns1:Date>2024-02-23</ns1:Date>
</ns0:dateStamp>
<ns0:metadataStandardName>
<ns1:CharacterString>ISO 19139 Geographic Information - Metadata - Implementation Specification</ns1:CharacterString>
Expand Down Expand Up @@ -85,7 +85,7 @@
<ns0:date>
<ns0:CI_Date>
<ns0:date>
<ns1:Date>2024-01-17</ns1:Date>
<ns1:Date>2024-02-14</ns1:Date>
</ns0:date>
<ns0:dateType>
<ns0:CI_DateTypeCode codeList="http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml#CI_DateTypeCode" codeListValue="revision" codeSpace="ISOTC211/19115">
Expand Down
4 changes: 2 additions & 2 deletions open_data/xml/ca_transit_routes.xml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
</ns0:hierarchyLevelName>
<ns0:contact ns1:nilReason="missing"></ns0:contact>
<ns0:dateStamp>
<ns1:Date>2024-01-26</ns1:Date>
<ns1:Date>2024-02-23</ns1:Date>
</ns0:dateStamp>
<ns0:metadataStandardName>
<ns1:CharacterString>ISO 19139 Geographic Information - Metadata - Implementation Specification</ns1:CharacterString>
Expand Down Expand Up @@ -85,7 +85,7 @@
<ns0:date>
<ns0:CI_Date>
<ns0:date>
<ns1:Date>2024-01-17</ns1:Date>
<ns1:Date>2024-02-14</ns1:Date>
</ns0:date>
<ns0:dateType>
<ns0:CI_DateTypeCode codeList="http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml#CI_DateTypeCode" codeListValue="revision" codeSpace="ISOTC211/19115">
Expand Down
4 changes: 2 additions & 2 deletions open_data/xml/ca_transit_stops.xml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
</ns0:hierarchyLevelName>
<ns0:contact ns1:nilReason="missing"></ns0:contact>
<ns0:dateStamp>
<ns1:Date>2024-01-26</ns1:Date>
<ns1:Date>2024-02-23</ns1:Date>
</ns0:dateStamp>
<ns0:metadataStandardName>
<ns1:CharacterString>ISO 19139 Geographic Information - Metadata - Implementation Specification</ns1:CharacterString>
Expand Down Expand Up @@ -85,7 +85,7 @@
<ns0:date>
<ns0:CI_Date>
<ns0:date>
<ns1:Date>2024-01-17</ns1:Date>
<ns1:Date>2024-02-14</ns1:Date>
</ns0:date>
<ns0:dateType>
<ns0:CI_DateTypeCode codeList="http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml#CI_DateTypeCode" codeListValue="revision" codeSpace="ISOTC211/19115">
Expand Down
Loading

0 comments on commit 44eec17

Please sign in to comment.