aboutsummaryrefslogtreecommitdiff
path: root/src/gtfs_vigo_stops
diff options
context:
space:
mode:
Diffstat (limited to 'src/gtfs_vigo_stops')
-rw-r--r--src/gtfs_vigo_stops/.gitignore13
-rw-r--r--src/gtfs_vigo_stops/pyproject.toml12
-rw-r--r--src/gtfs_vigo_stops/pytest.ini4
-rw-r--r--src/gtfs_vigo_stops/src/common.py60
-rw-r--r--src/gtfs_vigo_stops/src/download.py130
-rw-r--r--src/gtfs_vigo_stops/src/logger.py54
-rw-r--r--src/gtfs_vigo_stops/src/report_writer.py68
-rw-r--r--src/gtfs_vigo_stops/src/routes.py43
-rw-r--r--src/gtfs_vigo_stops/src/services.py107
-rw-r--r--src/gtfs_vigo_stops/src/stop_times.py120
-rw-r--r--src/gtfs_vigo_stops/src/stops.py51
-rw-r--r--src/gtfs_vigo_stops/src/street_name.py51
-rw-r--r--src/gtfs_vigo_stops/src/trips.py120
-rw-r--r--src/gtfs_vigo_stops/stop_report.py311
-rw-r--r--src/gtfs_vigo_stops/uv.lock186
15 files changed, 1330 insertions, 0 deletions
diff --git a/src/gtfs_vigo_stops/.gitignore b/src/gtfs_vigo_stops/.gitignore
new file mode 100644
index 0000000..2be2c5f
--- /dev/null
+++ b/src/gtfs_vigo_stops/.gitignore
@@ -0,0 +1,13 @@
+feed/
+output/
+
+# Python-generated files
+__pycache__/
+*.py[oc]
+build/
+dist/
+wheels/
+*.egg-info
+
+# Virtual environments
+.venv
diff --git a/src/gtfs_vigo_stops/pyproject.toml b/src/gtfs_vigo_stops/pyproject.toml
new file mode 100644
index 0000000..f0268e7
--- /dev/null
+++ b/src/gtfs_vigo_stops/pyproject.toml
@@ -0,0 +1,12 @@
+[project]
+name = "gtfs-vigo"
+version = "0.1.0"
+description = "Add your description here"
+readme = "README.md"
+requires-python = ">=3.13"
+dependencies = [
+ "colorama>=0.4.6",
+ "jinja2>=3.1.6",
+ "pytest>=8.4.1",
+ "requests>=2.32.3",
+]
diff --git a/src/gtfs_vigo_stops/pytest.ini b/src/gtfs_vigo_stops/pytest.ini
new file mode 100644
index 0000000..e455bb4
--- /dev/null
+++ b/src/gtfs_vigo_stops/pytest.ini
@@ -0,0 +1,4 @@
+[pytest]
+minversion = 6.0
+testpaths = tests
+python_files = test_*.py
diff --git a/src/gtfs_vigo_stops/src/common.py b/src/gtfs_vigo_stops/src/common.py
new file mode 100644
index 0000000..fbb3a73
--- /dev/null
+++ b/src/gtfs_vigo_stops/src/common.py
@@ -0,0 +1,60 @@
+"""
+Common utilities for GTFS report generation.
+"""
+import os
+import csv
+from datetime import datetime, timedelta
+from typing import List
+
+
+def get_all_feed_dates(feed_dir: str) -> List[str]:
+ """
+ Returns all dates the feed is valid for, using calendar.txt if present, else calendar_dates.txt.
+ """
+ calendar_path = os.path.join(feed_dir, 'calendar.txt')
+ calendar_dates_path = os.path.join(feed_dir, 'calendar_dates.txt')
+
+ # Try calendar.txt first
+ if os.path.exists(calendar_path):
+ with open(calendar_path, encoding='utf-8') as f:
+ reader = csv.DictReader(f)
+ start_dates: List[str] = []
+ end_dates: List[str] = []
+ for row in reader:
+ if row.get('start_date') and row.get('end_date'):
+ start_dates.append(row['start_date'])
+ end_dates.append(row['end_date'])
+ if start_dates and end_dates:
+ min_date = min(start_dates)
+ max_date = max(end_dates)
+ # Convert YYYYMMDD to YYYY-MM-DD
+ start = datetime.strptime(min_date, '%Y%m%d')
+ end = datetime.strptime(max_date, '%Y%m%d')
+ result: List[str] = []
+ while start <= end:
+ result.append(start.strftime('%Y-%m-%d'))
+ start += timedelta(days=1)
+ return result
+
+ # Fallback: use calendar_dates.txt
+ if os.path.exists(calendar_dates_path):
+ with open(calendar_dates_path, encoding='utf-8') as f:
+ reader = csv.DictReader(f)
+ dates: set[str] = set()
+ for row in reader:
+ if row.get('exception_type') == '1' and row.get('date'):
+ # Convert YYYYMMDD to YYYY-MM-DD
+ d = row['date']
+ dates.add(f"{d[:4]}-{d[4:6]}-{d[6:]}")
+ return sorted(dates)
+
+ return []
+
+
+def time_to_seconds(time_str: str) -> int:
+ """Convert HH:MM:SS to seconds since midnight."""
+ parts = time_str.split(':')
+ if len(parts) != 3:
+ return 0
+ hours, minutes, seconds = map(int, parts)
+ return hours * 3600 + minutes * 60 + seconds
diff --git a/src/gtfs_vigo_stops/src/download.py b/src/gtfs_vigo_stops/src/download.py
new file mode 100644
index 0000000..19125bc
--- /dev/null
+++ b/src/gtfs_vigo_stops/src/download.py
@@ -0,0 +1,130 @@
+import os
+import tempfile
+import zipfile
+import requests
+import json
+from typing import Optional, Tuple
+
+from src.logger import get_logger
+
+logger = get_logger("download")
+
+def _get_metadata_path(output_dir: str) -> str:
+ """Get the path to the metadata file for storing ETag and Last-Modified info."""
+ return os.path.join(output_dir, '.gtfsmetadata')
+
+def _load_metadata(output_dir: str) -> Optional[dict]:
+ """Load existing metadata from the output directory."""
+ metadata_path = _get_metadata_path(output_dir)
+ if os.path.exists(metadata_path):
+ try:
+ with open(metadata_path, 'r', encoding='utf-8') as f:
+ return json.load(f)
+ except (json.JSONDecodeError, IOError) as e:
+ logger.warning(f"Failed to load metadata from {metadata_path}: {e}")
+ return None
+
+def _save_metadata(output_dir: str, etag: Optional[str], last_modified: Optional[str]) -> None:
+ """Save ETag and Last-Modified metadata to the output directory."""
+ metadata_path = _get_metadata_path(output_dir)
+ metadata = {
+ 'etag': etag,
+ 'last_modified': last_modified
+ }
+
+ # Ensure output directory exists
+ os.makedirs(output_dir, exist_ok=True)
+
+ try:
+ with open(metadata_path, 'w', encoding='utf-8') as f:
+ json.dump(metadata, f, indent=2)
+ except IOError as e:
+ logger.warning(f"Failed to save metadata to {metadata_path}: {e}")
+
+def _check_if_modified(feed_url: str, output_dir: str) -> Tuple[bool, Optional[str], Optional[str]]:
+ """
+ Check if the feed has been modified using conditional headers.
+ Returns (is_modified, etag, last_modified)
+ """
+ metadata = _load_metadata(output_dir)
+ if not metadata:
+ return True, None, None
+
+ headers = {}
+ if metadata.get('etag'):
+ headers['If-None-Match'] = metadata['etag']
+ if metadata.get('last_modified'):
+ headers['If-Modified-Since'] = metadata['last_modified']
+
+ if not headers:
+ return True, None, None
+
+ try:
+ response = requests.head(feed_url, headers=headers)
+
+ if response.status_code == 304:
+ logger.info("Feed has not been modified (304 Not Modified), skipping download")
+ return False, metadata.get('etag'), metadata.get('last_modified')
+ elif response.status_code == 200:
+ etag = response.headers.get('ETag')
+ last_modified = response.headers.get('Last-Modified')
+ return True, etag, last_modified
+ else:
+ logger.warning(f"Unexpected response status {response.status_code} when checking for modifications, proceeding with download")
+ return True, None, None
+ except requests.RequestException as e:
+ logger.warning(f"Failed to check if feed has been modified: {e}, proceeding with download")
+ return True, None, None
+
+def download_feed_from_url(feed_url: str, output_dir: str = None, force_download: bool = False) -> Optional[str]:
+ """
+ Download GTFS feed from URL.
+
+ Args:
+ feed_url: URL to download the GTFS feed from
+ output_dir: Directory where reports will be written (used for metadata storage)
+ force_download: If True, skip conditional download checks
+
+ Returns:
+ Path to the directory containing the extracted GTFS files, or None if download was skipped
+ """
+
+ # Check if we need to download the feed
+ if not force_download and output_dir:
+ is_modified, cached_etag, cached_last_modified = _check_if_modified(feed_url, output_dir)
+ if not is_modified:
+ logger.info("Feed has not been modified, skipping download")
+ return None
+
+ # Create a directory in the system temporary directory
+ temp_dir = tempfile.mkdtemp(prefix='gtfs_vigo_')
+
+ # Create a temporary zip file in the temporary directory
+ zip_filename = os.path.join(temp_dir, 'gtfs_vigo.zip')
+
+ headers = {}
+ response = requests.get(feed_url, headers=headers)
+
+ if response.status_code != 200:
+ raise Exception(f"Failed to download GTFS data: {response.status_code}")
+
+ with open(zip_filename, 'wb') as file:
+ file.write(response.content)
+
+ # Extract and save metadata if output_dir is provided
+ if output_dir:
+ etag = response.headers.get('ETag')
+ last_modified = response.headers.get('Last-Modified')
+ if etag or last_modified:
+ _save_metadata(output_dir, etag, last_modified)
+
+ # Extract the zip file
+ with zipfile.ZipFile(zip_filename, 'r') as zip_ref:
+ zip_ref.extractall(temp_dir)
+
+ # Clean up the downloaded zip file
+ os.remove(zip_filename)
+
+ logger.info(f"GTFS feed downloaded from {feed_url} and extracted to {temp_dir}")
+
+ return temp_dir \ No newline at end of file
diff --git a/src/gtfs_vigo_stops/src/logger.py b/src/gtfs_vigo_stops/src/logger.py
new file mode 100644
index 0000000..9488076
--- /dev/null
+++ b/src/gtfs_vigo_stops/src/logger.py
@@ -0,0 +1,54 @@
+"""
+Logging configuration for the GTFS application.
+"""
+import logging
+from colorama import init, Fore, Style
+
+# Initialize Colorama (required on Windows)
+init(autoreset=True)
+
+class ColorFormatter(logging.Formatter):
+ def format(self, record: logging.LogRecord):
+ # Base format
+ log_format = "%(asctime)s | %(levelname)-8s | %(name)s | %(funcName)s:%(lineno)d | %(message)s"
+
+ # Apply colors based on log level
+ if record.levelno == logging.DEBUG:
+ prefix = Style.DIM + Fore.WHITE # "Dark grey"
+ elif record.levelno == logging.INFO:
+ prefix = Fore.CYAN
+ elif record.levelno == logging.WARNING:
+ prefix = Fore.YELLOW
+ elif record.levelno == logging.ERROR:
+ prefix = Fore.RED
+ elif record.levelno == logging.CRITICAL:
+ prefix = Style.BRIGHT + Fore.RED
+ else:
+ prefix = ""
+
+ # Add color to the entire line
+ formatter = logging.Formatter(
+ prefix + log_format + Style.RESET_ALL, "%Y-%m-%d %H:%M:%S")
+ return formatter.format(record)
+
+def get_logger(name: str) -> logging.Logger:
+ """
+ Create and return a logger with the given name.
+
+ Args:
+ name (str): The name of the logger.
+
+ Returns:
+ logging.Logger: Configured logger instance.
+ """
+ logger = logging.getLogger(name)
+ logger.setLevel(logging.INFO)
+
+ # Only add handler if it doesn't already have one
+ if not logger.handlers:
+ console_handler = logging.StreamHandler()
+ console_handler.setLevel(logging.DEBUG)
+ console_handler.setFormatter(ColorFormatter())
+ logger.addHandler(console_handler)
+
+ return logger
diff --git a/src/gtfs_vigo_stops/src/report_writer.py b/src/gtfs_vigo_stops/src/report_writer.py
new file mode 100644
index 0000000..aa57834
--- /dev/null
+++ b/src/gtfs_vigo_stops/src/report_writer.py
@@ -0,0 +1,68 @@
+"""
+Report writers for various output formats (HTML, JSON).
+Centralizes all write operations for different report types.
+"""
+from typing import List, Dict, Any
+from src.logger import get_logger
+import os
+import json
+
+
+def write_stop_json(output_dir: str, date: str, stop_code: str, arrivals: List[Dict[str, Any]]) -> None:
+ """
+ Write stop arrivals data to a JSON file.
+
+ Args:
+ output_dir: Base output directory
+ date: Date string for the data
+ stop_code: Stop code identifier
+ arrivals: List of arrival dictionaries
+ pretty: Whether to format JSON with indentation
+ """
+ logger = get_logger("report_writer")
+
+ try:
+ # Create the stops directory for this date
+ date_dir = os.path.join(output_dir, date)
+ os.makedirs(date_dir, exist_ok=True)
+
+ # Create the JSON file
+ file_path = os.path.join(date_dir, f"{stop_code}.json")
+
+ with open(file_path, 'w', encoding='utf-8') as f:
+ json.dump(arrivals, f, ensure_ascii=False)
+
+ logger.debug(f"Stop JSON written to: {file_path}")
+ except Exception as e:
+ logger.error(f"Error writing stop JSON to {output_dir}/stops/{date}/{stop_code}.json: {e}")
+ raise
+
+
+def write_index_json(output_dir: str, data: Dict[str, Any], filename: str = "index.json", pretty: bool = False) -> None:
+ """
+ Write index data to a JSON file.
+
+ Args:
+ output_dir: Directory where the JSON file should be written
+ data: Dictionary containing the index data
+ filename: Name of the JSON file (default: "index.json")
+ pretty: Whether to format JSON with indentation
+ """
+ logger = get_logger("report_writer")
+
+ try:
+ # Create the output directory if it doesn't exist
+ os.makedirs(output_dir, exist_ok=True)
+
+ # Write the index.json file
+ index_filepath = os.path.join(output_dir, filename)
+ with open(index_filepath, 'w', encoding='utf-8') as f:
+ if pretty:
+ json.dump(data, f, ensure_ascii=False, indent=2)
+ else:
+ json.dump(data, f, ensure_ascii=False, separators=(',', ':'))
+
+ logger.info(f"Index JSON written to: {index_filepath}")
+ except Exception as e:
+ logger.error(f"Error writing index JSON to {output_dir}/{filename}: {e}")
+ raise
diff --git a/src/gtfs_vigo_stops/src/routes.py b/src/gtfs_vigo_stops/src/routes.py
new file mode 100644
index 0000000..e67a1a4
--- /dev/null
+++ b/src/gtfs_vigo_stops/src/routes.py
@@ -0,0 +1,43 @@
+"""
+Module for loading and querying GTFS routes data.
+"""
+import os
+import csv
+from src.logger import get_logger
+
+logger = get_logger("routes")
+
+def load_routes(feed_dir: str) -> dict[str, dict[str, str]]:
+ """
+ Load routes data from the GTFS feed.
+
+ Returns:
+ dict[str, dict[str, str]]: A dictionary where keys are route IDs and values are dictionaries
+ containing route_short_name and route_color.
+ """
+ routes: dict[str, dict[str, str]] = {}
+ routes_file_path = os.path.join(feed_dir, 'routes.txt')
+
+ try:
+ with open(routes_file_path, 'r', encoding='utf-8') as routes_file:
+ reader = csv.DictReader(routes_file)
+ header = reader.fieldnames or []
+ if 'route_color' not in header:
+ logger.warning("Column 'route_color' not found in routes.txt. Defaulting to black (#000000).")
+
+ for row in reader:
+ route_id = row['route_id']
+ if 'route_color' in row and row['route_color']:
+ route_color = row['route_color']
+ else:
+ route_color = '000000'
+ routes[route_id] = {
+ 'route_short_name': row['route_short_name'],
+ 'route_color': route_color
+ }
+ except FileNotFoundError:
+ raise FileNotFoundError(f"Routes file not found at {routes_file_path}")
+ except KeyError as e:
+ raise KeyError(f"Missing required column in routes file: {e}")
+
+ return routes
diff --git a/src/gtfs_vigo_stops/src/services.py b/src/gtfs_vigo_stops/src/services.py
new file mode 100644
index 0000000..9b16173
--- /dev/null
+++ b/src/gtfs_vigo_stops/src/services.py
@@ -0,0 +1,107 @@
+import os
+import datetime
+from src.logger import get_logger
+
+logger = get_logger("services")
+
+def get_active_services(feed_dir: str, date: str) -> list[str]:
+ """
+ Get active services for a given date based on the 'calendar.txt' and 'calendar_dates.txt' files.
+
+ Args:
+ date (str): Date in 'YYYY-MM-DD' format.
+
+ Returns:
+ list[str]: List of active service IDs for the given date.
+
+ Raises:
+ ValueError: If the date format is incorrect.
+ """
+ search_date = date.replace("-", "").replace(":", "").replace("/", "")
+ weekday = datetime.datetime.strptime(date, '%Y-%m-%d').weekday()
+ active_services: list[str] = []
+
+ try:
+ with open(os.path.join(feed_dir, 'calendar.txt'), 'r', encoding="utf-8") as calendar_file:
+ lines = calendar_file.readlines()
+ if len(lines) >1:
+ # First parse the header, get each column's index
+ header = lines[0].strip().split(',')
+ try:
+ service_id_index = header.index('service_id')
+ monday_index = header.index('monday')
+ tuesday_index = header.index('tuesday')
+ wednesday_index = header.index('wednesday')
+ thursday_index = header.index('thursday')
+ friday_index = header.index('friday')
+ saturday_index = header.index('saturday')
+ sunday_index = header.index('sunday')
+ except ValueError as e:
+ logger.error(f"Required column not found in header: {e}")
+ return active_services
+ # Now read the rest of the file, find all services where the day of the week matches
+ weekday_columns = {
+ 0: monday_index,
+ 1: tuesday_index,
+ 2: wednesday_index,
+ 3: thursday_index,
+ 4: friday_index,
+ 5: saturday_index,
+ 6: sunday_index
+ }
+
+ for idx, line in enumerate(lines[1:], 1):
+ parts = line.strip().split(',')
+ if len(parts) < len(header):
+ logger.warning(
+ f"Skipping malformed line in calendar.txt line {idx+1}: {line.strip()}")
+ continue
+
+ service_id = parts[service_id_index]
+ day_value = parts[weekday_columns[weekday]]
+
+ if day_value == '1':
+ active_services.append(service_id)
+ except FileNotFoundError:
+ logger.warning("calendar.txt file not found.")
+
+ try:
+ with open(os.path.join(feed_dir, 'calendar_dates.txt'), 'r', encoding="utf-8") as calendar_dates_file:
+ lines = calendar_dates_file.readlines()
+ if len(lines) <= 1:
+ logger.warning(
+ "calendar_dates.txt file is empty or has only header line, not processing.")
+ return active_services
+
+ header = lines[0].strip().split(',')
+ try:
+ service_id_index = header.index('service_id')
+ date_index = header.index('date')
+ exception_type_index = header.index('exception_type')
+ except ValueError as e:
+ logger.error(f"Required column not found in header: {e}")
+ return active_services
+
+ # Now read the rest of the file, find all services where 'date' matches the search_date
+ # Start from 1 to skip header
+ for idx, line in enumerate(lines[1:], 1):
+ parts = line.strip().split(',')
+ if len(parts) < len(header):
+ logger.warning(
+ f"Skipping malformed line in calendar_dates.txt line {idx+1}: {line.strip()}")
+ continue
+
+ service_id = parts[service_id_index]
+ date_value = parts[date_index]
+ exception_type = parts[exception_type_index]
+
+ if date_value == search_date and exception_type == '1':
+ active_services.append(service_id)
+
+ if date_value == search_date and exception_type == '2':
+ if service_id in active_services:
+ active_services.remove(service_id)
+ except FileNotFoundError:
+ logger.warning("calendar_dates.txt file not found.")
+
+ return active_services
diff --git a/src/gtfs_vigo_stops/src/stop_times.py b/src/gtfs_vigo_stops/src/stop_times.py
new file mode 100644
index 0000000..f3c3f25
--- /dev/null
+++ b/src/gtfs_vigo_stops/src/stop_times.py
@@ -0,0 +1,120 @@
+"""
+Functions for handling GTFS stop_times data.
+"""
+import csv
+import os
+from src.logger import get_logger
+
+logger = get_logger("stop_times")
+
+
+STOP_TIMES_BY_FEED: dict[str, dict[str, list["StopTime"]]] = {}
+STOP_TIMES_BY_REQUEST: dict[tuple[str, frozenset[str]], dict[str, list["StopTime"]]] = {}
+
+class StopTime:
+ """
+ Class representing a stop time entry in the GTFS data.
+ """
+ def __init__(self, trip_id: str, arrival_time: str, departure_time: str, stop_id: str, stop_sequence: int, shape_dist_traveled: float | None):
+ self.trip_id = trip_id
+ self.arrival_time = arrival_time
+ self.departure_time = departure_time
+ self.stop_id = stop_id
+ self.stop_sequence = stop_sequence
+ self.shape_dist_traveled = shape_dist_traveled
+ self.day_change = False # New attribute to indicate day change
+
+ def __str__(self):
+ return f"StopTime({self.trip_id=}, {self.arrival_time=}, {self.departure_time=}, {self.stop_id=}, {self.stop_sequence=})"
+
+
+def _load_stop_times_for_feed(feed_dir: str) -> dict[str, list[StopTime]]:
+ """Load and cache all stop_times for a feed directory."""
+ if feed_dir in STOP_TIMES_BY_FEED:
+ return STOP_TIMES_BY_FEED[feed_dir]
+
+ stops: dict[str, list[StopTime]] = {}
+
+ try:
+ with open(os.path.join(feed_dir, 'stop_times.txt'), 'r', encoding="utf-8", newline='') as stop_times_file:
+ reader = csv.DictReader(stop_times_file)
+ if reader.fieldnames is None:
+ logger.error("stop_times.txt missing header row.")
+ STOP_TIMES_BY_FEED[feed_dir] = {}
+ return STOP_TIMES_BY_FEED[feed_dir]
+
+ required_columns = ['trip_id', 'arrival_time', 'departure_time', 'stop_id', 'stop_sequence']
+ missing_columns = [col for col in required_columns if col not in reader.fieldnames]
+ if missing_columns:
+ logger.error(f"Required columns not found in header: {missing_columns}")
+ STOP_TIMES_BY_FEED[feed_dir] = {}
+ return STOP_TIMES_BY_FEED[feed_dir]
+
+ has_shape_dist = 'shape_dist_traveled' in reader.fieldnames
+ if not has_shape_dist:
+ logger.warning("Column 'shape_dist_traveled' not found in stop_times.txt. Distances will be set to None.")
+
+ for row in reader:
+ trip_id = row['trip_id']
+ if trip_id not in stops:
+ stops[trip_id] = []
+
+ dist = None
+ if has_shape_dist and row['shape_dist_traveled']:
+ try:
+ dist = float(row['shape_dist_traveled'])
+ except ValueError:
+ pass
+
+ try:
+ stops[trip_id].append(StopTime(
+ trip_id=trip_id,
+ arrival_time=row['arrival_time'],
+ departure_time=row['departure_time'],
+ stop_id=row['stop_id'],
+ stop_sequence=int(row['stop_sequence']),
+ shape_dist_traveled=dist
+ ))
+ except ValueError as e:
+ logger.warning(f"Error parsing stop_sequence for trip {trip_id}: {e}")
+
+ for trip_stop_times in stops.values():
+ trip_stop_times.sort(key=lambda st: st.stop_sequence)
+
+ except FileNotFoundError:
+ logger.warning("stop_times.txt file not found.")
+ stops = {}
+
+ STOP_TIMES_BY_FEED[feed_dir] = stops
+ return stops
+
+
+def get_stops_for_trips(feed_dir: str, trip_ids: list[str]) -> dict[str, list[StopTime]]:
+ """
+ Get stops for a list of trip IDs based on the cached 'stop_times.txt' data.
+ """
+ if not trip_ids:
+ return {}
+
+ request_key = (feed_dir, frozenset(trip_ids))
+ cached_subset = STOP_TIMES_BY_REQUEST.get(request_key)
+ if cached_subset is not None:
+ return cached_subset
+
+ feed_cache = _load_stop_times_for_feed(feed_dir)
+ if not feed_cache:
+ STOP_TIMES_BY_REQUEST[request_key] = {}
+ return {}
+
+ result: dict[str, list[StopTime]] = {}
+ seen: set[str] = set()
+ for trip_id in trip_ids:
+ if trip_id in seen:
+ continue
+ seen.add(trip_id)
+ trip_stop_times = feed_cache.get(trip_id)
+ if trip_stop_times:
+ result[trip_id] = trip_stop_times
+
+ STOP_TIMES_BY_REQUEST[request_key] = result
+ return result
diff --git a/src/gtfs_vigo_stops/src/stops.py b/src/gtfs_vigo_stops/src/stops.py
new file mode 100644
index 0000000..4afe69c
--- /dev/null
+++ b/src/gtfs_vigo_stops/src/stops.py
@@ -0,0 +1,51 @@
+import csv
+import os
+from dataclasses import dataclass
+from typing import Dict, Optional
+from src.logger import get_logger
+
+logger = get_logger("stops")
+
+
+@dataclass
+class Stop:
+ stop_id: str
+ stop_code: Optional[str]
+ stop_name: Optional[str]
+ stop_lat: Optional[float]
+ stop_lon: Optional[float]
+
+
+CACHED_STOPS: dict[str, dict[str, Stop]] = {}
+
+def get_all_stops(feed_dir: str) -> Dict[str, Stop]:
+ if feed_dir in CACHED_STOPS:
+ return CACHED_STOPS[feed_dir]
+
+ stops: Dict[str, Stop] = {}
+ file_path = os.path.join(feed_dir, 'stops.txt')
+
+ try:
+ with open(file_path, 'r', encoding="utf-8", newline='') as f:
+ reader = csv.DictReader(f, quotechar='"', delimiter=',')
+ for row_num, row in enumerate(reader, start=2):
+ try:
+ stop = Stop(
+ stop_id=row['stop_id'],
+ stop_code=row.get('stop_code'),
+ stop_name=row['stop_name'].strip() if row.get('stop_name', '').strip() else row.get('stop_desc'),
+ stop_lat=float(row['stop_lat']) if row.get('stop_lat') else None,
+ stop_lon=float(row['stop_lon']) if row.get('stop_lon') else None,
+ )
+ stops[stop.stop_id] = stop
+ except Exception as e:
+ logger.warning(f"Error parsing stops.txt line {row_num}: {e} - line data: {row}")
+
+ except FileNotFoundError:
+ logger.error(f"File not found: {file_path}")
+ except Exception as e:
+ logger.error(f"Error reading stops.txt: {e}")
+
+ CACHED_STOPS[feed_dir] = stops
+
+ return stops
diff --git a/src/gtfs_vigo_stops/src/street_name.py b/src/gtfs_vigo_stops/src/street_name.py
new file mode 100644
index 0000000..4bfbdba
--- /dev/null
+++ b/src/gtfs_vigo_stops/src/street_name.py
@@ -0,0 +1,51 @@
+import re
+
+
+re_remove_quotation_marks = re.compile(r'[""”]', re.IGNORECASE)
+re_anything_before_stopcharacters_with_parentheses = re.compile(
+ r'^(.*?)(?:,|\s\s|\s-\s| \d| S\/N|\s\()', re.IGNORECASE)
+re_remove_street_type = re.compile(
+ r'^(?:Rúa|Avda\.?|Avenida|Camiño|Estrada)(?:\s+d[aeo]s?)?\s*', re.IGNORECASE)
+
+exception_streets = [
+ "Avda. do Aeroporto",
+ "Avda. de Samil",
+ "Avda. de Castrelos",
+ "Estrada da Garrida",
+ "Estrada de Valadares",
+ "Estrada do Monte Alba",
+ "Estrada da Gándara",
+ "Estrada do Vao",
+ "Avda. do Tranvía",
+ "Avda. da Atlántida",
+ "Avda. da Ponte",
+ "Rúa da Cruz",
+ "Estrada das Prantas"
+]
+
+
+def get_street_name(original_name: str) -> str:
+ original_name = re.sub(re_remove_quotation_marks,
+ '', original_name).strip()
+ match = re.match(
+ re_anything_before_stopcharacters_with_parentheses, original_name)
+ if match:
+ street_name = match.group(1)
+ else:
+ street_name = original_name
+
+ if street_name in exception_streets:
+ return street_name
+
+ street_name = re.sub(re_remove_street_type, '', street_name).strip()
+ return street_name
+
+
+def normalise_stop_name(original_name: str|None) -> str:
+ if original_name is None:
+ return ''
+ stop_name = re.sub(re_remove_quotation_marks, '', original_name).strip()
+
+ stop_name = stop_name.replace(' ', ', ')
+
+ return stop_name
diff --git a/src/gtfs_vigo_stops/src/trips.py b/src/gtfs_vigo_stops/src/trips.py
new file mode 100644
index 0000000..0c1375c
--- /dev/null
+++ b/src/gtfs_vigo_stops/src/trips.py
@@ -0,0 +1,120 @@
+"""
+Functions for handling GTFS trip data.
+"""
+import os
+from src.logger import get_logger
+
+logger = get_logger("trips")
+
+class TripLine:
+ """
+ Class representing a trip line in the GTFS data.
+ """
+ def __init__(self, route_id: str, service_id: str, trip_id: str, headsign: str, direction_id: int, shape_id: str|None = None):
+ self.route_id = route_id
+ self.service_id = service_id
+ self.trip_id = trip_id
+ self.headsign = headsign
+ self.direction_id = direction_id
+ self.shape_id = shape_id
+ self.route_short_name = ""
+ self.route_color = ""
+
+ def __str__(self):
+ return f"TripLine({self.route_id=}, {self.service_id=}, {self.trip_id=}, {self.headsign=}, {self.direction_id=}, {self.shape_id=})"
+
+
+TRIPS_BY_SERVICE_ID: dict[str, dict[str, list[TripLine]]] = {}
+
+
+def get_trips_for_services(feed_dir: str, service_ids: list[str]) -> dict[str, list[TripLine]]:
+ """
+ Get trips for a list of service IDs based on the 'trips.txt' file.
+ Uses caching to avoid reading and parsing the file multiple times.
+
+ Args:
+ feed_dir (str): Directory containing the GTFS feed files.
+ service_ids (list[str]): List of service IDs to find trips for.
+
+ Returns:
+ dict[str, list[TripLine]]: Dictionary mapping service IDs to lists of trip objects.
+ """
+ # Check if we already have cached data for this feed directory
+ if feed_dir in TRIPS_BY_SERVICE_ID:
+ logger.debug(f"Using cached trips data for {feed_dir}")
+ # Return only the trips for the requested service IDs
+ return {service_id: TRIPS_BY_SERVICE_ID[feed_dir].get(service_id, [])
+ for service_id in service_ids}
+
+ trips: dict[str, list[TripLine]] = {}
+
+ try:
+ with open(os.path.join(feed_dir, 'trips.txt'), 'r', encoding="utf-8") as trips_file:
+ lines = trips_file.readlines()
+ if len(lines) <= 1:
+ logger.warning(
+ "trips.txt file is empty or has only header line, not processing.")
+ return trips
+
+ header = lines[0].strip().split(',')
+ try:
+ service_id_index = header.index('service_id')
+ trip_id_index = header.index('trip_id')
+ route_id_index = header.index('route_id')
+ headsign_index = header.index('trip_headsign')
+ direction_id_index = header.index('direction_id')
+ except ValueError as e:
+ logger.error(f"Required column not found in header: {e}")
+ return trips
+
+ # Check if shape_id column exists
+ shape_id_index = None
+ if 'shape_id' in header:
+ shape_id_index = header.index('shape_id')
+ else:
+ logger.warning("shape_id column not found in trips.txt")
+
+ # Initialize cache for this feed directory
+ TRIPS_BY_SERVICE_ID[feed_dir] = {}
+
+ for line in lines[1:]:
+ parts = line.strip().split(',')
+ if len(parts) < len(header):
+ logger.warning(
+ f"Skipping malformed line in trips.txt: {line.strip()}")
+ continue
+
+ service_id = parts[service_id_index]
+ trip_id = parts[trip_id_index]
+
+ # Cache all trips, not just the ones requested
+ if service_id not in TRIPS_BY_SERVICE_ID[feed_dir]:
+ TRIPS_BY_SERVICE_ID[feed_dir][service_id] = []
+
+ # Get shape_id if available
+ shape_id = None
+ if shape_id_index is not None and shape_id_index < len(parts):
+ shape_id = parts[shape_id_index] if parts[shape_id_index] else None
+
+ trip_line = TripLine(
+ route_id=parts[route_id_index],
+ service_id=service_id,
+ trip_id=trip_id,
+ headsign=parts[headsign_index],
+ direction_id=int(
+ parts[direction_id_index] if parts[direction_id_index] else -1),
+ shape_id=shape_id
+ )
+
+ TRIPS_BY_SERVICE_ID[feed_dir][service_id].append(trip_line)
+
+ # Also build the result for the requested service IDs
+ if service_id in service_ids:
+ if service_id not in trips:
+ trips[service_id] = []
+ trips[service_id].append(trip_line)
+
+ except FileNotFoundError:
+ logger.warning("trips.txt file not found.")
+
+ return trips
diff --git a/src/gtfs_vigo_stops/stop_report.py b/src/gtfs_vigo_stops/stop_report.py
new file mode 100644
index 0000000..fa541ef
--- /dev/null
+++ b/src/gtfs_vigo_stops/stop_report.py
@@ -0,0 +1,311 @@
+import os
+import shutil
+import sys
+import traceback
+import argparse
+from typing import List, Dict, Any
+from multiprocessing import Pool, cpu_count
+
+from src.download import download_feed_from_url
+from src.logger import get_logger
+from src.common import get_all_feed_dates, time_to_seconds
+from src.stops import get_all_stops
+from src.services import get_active_services
+from src.street_name import get_street_name, normalise_stop_name
+from src.trips import get_trips_for_services
+from src.stop_times import get_stops_for_trips
+from src.routes import load_routes
+from src.report_writer import write_stop_json
+
+logger = get_logger("stop_report")
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(
+ description="Generate stop-based JSON reports for a date or date range.")
+ parser.add_argument('--output-dir', type=str, default="./output/",
+ help='Directory to write reports to (default: ./output/)')
+ parser.add_argument('--feed-dir', type=str,
+ help="Path to the feed directory")
+ parser.add_argument('--feed-url', type=str,
+ help="URL to download the GTFS feed from (if not using local feed directory)")
+ parser.add_argument('--force-download', action='store_true',
+ help="Force download even if the feed hasn't been modified (only applies when using --feed-url)")
+ args = parser.parse_args()
+
+ if args.feed_dir and args.feed_url:
+ parser.error("Specify either --feed-dir or --feed-url, not both.")
+ if not args.feed_dir and not args.feed_url:
+ parser.error(
+ "You must specify either a path to the existing feed (unzipped) or a URL to download the GTFS feed from.")
+ if args.feed_dir and not os.path.exists(args.feed_dir):
+ parser.error(f"Feed directory does not exist: {args.feed_dir}")
+ return args
+
+
+def time_to_seconds(time_str: str) -> int:
+ """Convert HH:MM:SS to seconds since midnight."""
+ if not time_str:
+ return 0
+
+ parts = time_str.split(':')
+ if len(parts) != 3:
+ return 0
+
+ try:
+ hours, minutes, seconds = map(int, parts)
+ return hours * 3600 + minutes * 60 + seconds
+ except ValueError:
+ return 0
+
+
+def get_numeric_code(stop_code: str | None) -> str:
+ if not stop_code:
+ return ""
+ numeric_code = ''.join(c for c in stop_code if c.isdigit())
+ return str(int(numeric_code)) if numeric_code else ""
+
+
+def get_stop_arrivals(
+ feed_dir: str,
+ date: str
+) -> Dict[str, List[Dict[str, Any]]]:
+ """
+ Process trips for the given date and organize stop arrivals.
+
+ Args:
+ feed_dir: Path to the GTFS feed directory
+ date: Date in YYYY-MM-DD format
+ numeric_stop_code: If True, strip non-numeric characters from stop codes
+
+ Returns:
+ Dictionary mapping stop_code to lists of arrival information.
+ """
+ stops = get_all_stops(feed_dir)
+ logger.info(f"Found {len(stops)} stops in the feed.")
+
+ active_services = get_active_services(feed_dir, date)
+ if not active_services:
+ logger.info("No active services found for the given date.")
+ return {}
+
+ logger.info(
+ f"Found {len(active_services)} active services for date {date}.")
+
+ trips = get_trips_for_services(feed_dir, active_services)
+ total_trip_count = sum(len(trip_list) for trip_list in trips.values())
+ logger.info(f"Found {total_trip_count} trips for active services.")
+
+ # Get all trip IDs
+ all_trip_ids = [trip.trip_id for trip_list in trips.values()
+ for trip in trip_list]
+
+ # Get stops for all trips
+ stops_for_all_trips = get_stops_for_trips(feed_dir, all_trip_ids)
+ logger.info(f"Precomputed stops for {len(stops_for_all_trips)} trips.")
+
+ # Load routes information
+ routes = load_routes(feed_dir)
+ logger.info(f"Loaded {len(routes)} routes from feed.")
+
+ # Create a reverse lookup from stop_id to stop_code
+ stop_id_to_code = {}
+ for stop_id, stop in stops.items():
+ if stop.stop_code:
+ stop_id_to_code[stop_id] = get_numeric_code(stop.stop_code)
+
+ # Organize data by stop_code
+ stop_arrivals = {}
+
+ for service_id, trip_list in trips.items():
+ for trip in trip_list:
+ # Get route information once per trip
+ route_info = routes.get(trip.route_id, {})
+ route_short_name = route_info.get('route_short_name', '')
+ trip_headsign = getattr(trip, 'headsign', '') or ''
+ trip_id = trip.trip_id
+
+ # Get stop times for this trip
+ trip_stops = stops_for_all_trips.get(trip.trip_id, [])
+ if not trip_stops:
+ continue
+
+ # Pair stop_times with stop metadata once to avoid repeated lookups
+ trip_stop_pairs = []
+ stop_names = []
+ for stop_time in trip_stops:
+ stop = stops.get(stop_time.stop_id)
+ trip_stop_pairs.append((stop_time, stop))
+ stop_names.append(stop.stop_name if stop else "Unknown Stop")
+
+ # Memoize street names per stop name for this trip and build segments
+ street_cache: dict[str, str] = {}
+ segment_names: list[str] = []
+ stop_to_segment_idx: list[int] = []
+ previous_street: str | None = None
+ for name in stop_names:
+ street = street_cache.get(name)
+ if street is None:
+ street = get_street_name(name) or ""
+ street_cache[name] = street
+ if street != previous_street:
+ segment_names.append(street)
+ previous_street = street
+ stop_to_segment_idx.append(len(segment_names) - 1)
+
+ # Precompute future street transitions per segment
+ future_suffix_by_segment: list[tuple[str, ...]] = [()] * len(segment_names)
+ future_tuple: tuple[str, ...] = ()
+ for idx in range(len(segment_names) - 1, -1, -1):
+ future_suffix_by_segment[idx] = future_tuple
+ current_street = segment_names[idx]
+ future_tuple = (current_street,) + future_tuple if current_street is not None else future_tuple
+
+ segment_future_lists: dict[int, list[str]] = {}
+
+ first_stop_time, first_stop = trip_stop_pairs[0]
+ last_stop_time, last_stop = trip_stop_pairs[-1]
+
+ starting_stop_name = first_stop.stop_name if first_stop else "Unknown Stop"
+ terminus_stop_name = last_stop.stop_name if last_stop else "Unknown Stop"
+
+ starting_code = get_numeric_code(first_stop.stop_code) if first_stop else ""
+ terminus_code = get_numeric_code(last_stop.stop_code) if last_stop else ""
+
+ starting_name = normalise_stop_name(starting_stop_name)
+ terminus_name = normalise_stop_name(terminus_stop_name)
+ starting_time = first_stop_time.departure_time
+ terminus_time = last_stop_time.arrival_time
+
+ for i, (stop_time, _) in enumerate(trip_stop_pairs):
+ stop_code = stop_id_to_code.get(stop_time.stop_id)
+
+ if not stop_code:
+ continue # Skip stops without a code
+
+ if stop_code not in stop_arrivals:
+ stop_arrivals[stop_code] = []
+
+ if segment_names:
+ segment_idx = stop_to_segment_idx[i]
+ if segment_idx not in segment_future_lists:
+ segment_future_lists[segment_idx] = list(future_suffix_by_segment[segment_idx])
+ next_streets = segment_future_lists[segment_idx].copy()
+ else:
+ next_streets = []
+
+ stop_arrivals[stop_code].append({
+ "trip_id": trip_id,
+ "service_id": service_id,
+ "line": route_short_name,
+ "route": trip_headsign,
+ "stop_sequence": stop_time.stop_sequence,
+ 'shape_dist_traveled': getattr(stop_time, 'shape_dist_traveled', 0),
+ "next_streets": next_streets,
+
+ "starting_code": starting_code,
+ "starting_name": starting_name,
+ "starting_time": starting_time,
+
+ "calling_time": stop_time.departure_time,
+ "calling_ssm": time_to_seconds(stop_time.departure_time),
+
+ "terminus_code": terminus_code,
+ "terminus_name": terminus_name,
+ "terminus_time": terminus_time,
+ })
+
+ # Sort each stop's arrivals by arrival time
+ for stop_code in stop_arrivals:
+ # Filter out entries with None arrival_seconds
+ stop_arrivals[stop_code] = [
+ item for item in stop_arrivals[stop_code] if item["calling_ssm"] is not None]
+ stop_arrivals[stop_code].sort(key=lambda x: x["calling_ssm"])
+
+ return stop_arrivals
+
+
+def process_date(
+ feed_dir: str,
+ date: str,
+ output_dir: str
+) -> tuple[str, Dict[str, int]]:
+ """
+ Process a single date and write its stop JSON files.
+ Returns summary data for index generation.
+ """
+ logger = get_logger(f"stop_report_{date}")
+ try:
+ logger.info(f"Starting stop report generation for date {date}")
+
+ # Get all stop arrivals for the current date
+ stop_arrivals = get_stop_arrivals(feed_dir, date)
+
+ if not stop_arrivals:
+ logger.warning(f"No stop arrivals found for date {date}")
+ return date, {}
+
+ # Write individual stop JSON files
+ for stop_code, arrivals in stop_arrivals.items():
+ write_stop_json(output_dir, date, stop_code, arrivals)
+
+ # Create summary for index
+ stop_summary = {stop_code: len(arrivals)
+ for stop_code, arrivals in stop_arrivals.items()}
+ logger.info(f"Processed {len(stop_arrivals)} stops for date {date}")
+
+ return date, stop_summary
+ except Exception as e:
+ logger.error(f"Error processing date {date}: {e}")
+ raise
+
+
+def main():
+ args = parse_args()
+ output_dir = args.output_dir
+ feed_url = args.feed_url
+
+ if not feed_url:
+ feed_dir = args.feed_dir
+ else:
+ logger.info(f"Downloading GTFS feed from {feed_url}...")
+ feed_dir = download_feed_from_url(
+ feed_url, output_dir, args.force_download)
+ if feed_dir is None:
+ logger.info("Download was skipped (feed not modified). Exiting.")
+ return
+
+ all_dates = get_all_feed_dates(feed_dir)
+ if not all_dates:
+ logger.error('No valid dates found in feed.')
+ return
+ date_list = all_dates
+
+ # Ensure date_list is not empty before processing
+ if not date_list:
+ logger.error("No valid dates to process.")
+ return
+
+ logger.info(f"Processing {len(date_list)} dates")
+
+ # Dictionary to store summary data for index files
+ all_stops_summary = {}
+
+ for date in date_list:
+ _, stop_summary = process_date(feed_dir, date, output_dir)
+ all_stops_summary[date] = stop_summary
+
+ if feed_url:
+ if os.path.exists(feed_dir):
+ shutil.rmtree(feed_dir)
+ logger.info(f"Removed temporary feed directory: {feed_dir}")
+
+
+if __name__ == "__main__":
+ try:
+ main()
+ except Exception as e:
+ logger = get_logger("stop_report")
+ logger.critical(f"An unexpected error occurred: {e}", exc_info=True)
+ traceback.print_exc()
+ sys.exit(1)
diff --git a/src/gtfs_vigo_stops/uv.lock b/src/gtfs_vigo_stops/uv.lock
new file mode 100644
index 0000000..11158e4
--- /dev/null
+++ b/src/gtfs_vigo_stops/uv.lock
@@ -0,0 +1,186 @@
+version = 1
+requires-python = ">=3.13"
+
+[[package]]
+name = "certifi"
+version = "2025.4.26"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e8/9e/c05b3920a3b7d20d3d3310465f50348e5b3694f4f88c6daf736eef3024c4/certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6", size = 160705 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/4a/7e/3db2bd1b1f9e95f7cddca6d6e75e2f2bd9f51b1246e546d88addca0106bd/certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3", size = 159618 },
+]
+
+[[package]]
+name = "charset-normalizer"
+version = "3.4.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", size = 126367 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ea/12/a93df3366ed32db1d907d7593a94f1fe6293903e3e92967bebd6950ed12c/charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0", size = 199622 },
+ { url = "https://files.pythonhosted.org/packages/04/93/bf204e6f344c39d9937d3c13c8cd5bbfc266472e51fc8c07cb7f64fcd2de/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf", size = 143435 },
+ { url = "https://files.pythonhosted.org/packages/22/2a/ea8a2095b0bafa6c5b5a55ffdc2f924455233ee7b91c69b7edfcc9e02284/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e", size = 153653 },
+ { url = "https://files.pythonhosted.org/packages/b6/57/1b090ff183d13cef485dfbe272e2fe57622a76694061353c59da52c9a659/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1", size = 146231 },
+ { url = "https://files.pythonhosted.org/packages/e2/28/ffc026b26f441fc67bd21ab7f03b313ab3fe46714a14b516f931abe1a2d8/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c", size = 148243 },
+ { url = "https://files.pythonhosted.org/packages/c0/0f/9abe9bd191629c33e69e47c6ef45ef99773320e9ad8e9cb08b8ab4a8d4cb/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691", size = 150442 },
+ { url = "https://files.pythonhosted.org/packages/67/7c/a123bbcedca91d5916c056407f89a7f5e8fdfce12ba825d7d6b9954a1a3c/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0", size = 145147 },
+ { url = "https://files.pythonhosted.org/packages/ec/fe/1ac556fa4899d967b83e9893788e86b6af4d83e4726511eaaad035e36595/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b", size = 153057 },
+ { url = "https://files.pythonhosted.org/packages/2b/ff/acfc0b0a70b19e3e54febdd5301a98b72fa07635e56f24f60502e954c461/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff", size = 156454 },
+ { url = "https://files.pythonhosted.org/packages/92/08/95b458ce9c740d0645feb0e96cea1f5ec946ea9c580a94adfe0b617f3573/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b", size = 154174 },
+ { url = "https://files.pythonhosted.org/packages/78/be/8392efc43487ac051eee6c36d5fbd63032d78f7728cb37aebcc98191f1ff/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148", size = 149166 },
+ { url = "https://files.pythonhosted.org/packages/44/96/392abd49b094d30b91d9fbda6a69519e95802250b777841cf3bda8fe136c/charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7", size = 98064 },
+ { url = "https://files.pythonhosted.org/packages/e9/b0/0200da600134e001d91851ddc797809e2fe0ea72de90e09bec5a2fbdaccb/charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980", size = 105641 },
+ { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626 },
+]
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 },
+]
+
+[[package]]
+name = "gtfs-vigo"
+version = "0.1.0"
+source = { virtual = "." }
+dependencies = [
+ { name = "colorama" },
+ { name = "jinja2" },
+ { name = "pytest" },
+ { name = "requests" },
+]
+
+[package.metadata]
+requires-dist = [
+ { name = "colorama", specifier = ">=0.4.6" },
+ { name = "jinja2", specifier = ">=3.1.6" },
+ { name = "pytest", specifier = ">=8.4.1" },
+ { name = "requests", specifier = ">=2.32.3" },
+]
+
+[[package]]
+name = "idna"
+version = "3.10"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 },
+]
+
+[[package]]
+name = "iniconfig"
+version = "2.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050 },
+]
+
+[[package]]
+name = "jinja2"
+version = "3.1.6"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "markupsafe" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899 },
+]
+
+[[package]]
+name = "markupsafe"
+version = "3.0.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 },
+ { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 },
+ { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 },
+ { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 },
+ { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 },
+ { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 },
+ { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 },
+ { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 },
+ { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 },
+ { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 },
+ { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 },
+ { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 },
+ { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 },
+ { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 },
+ { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 },
+ { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 },
+ { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 },
+ { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 },
+ { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 },
+ { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 },
+]
+
+[[package]]
+name = "packaging"
+version = "25.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469 },
+]
+
+[[package]]
+name = "pluggy"
+version = "1.6.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538 },
+]
+
+[[package]]
+name = "pygments"
+version = "2.19.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217 },
+]
+
+[[package]]
+name = "pytest"
+version = "8.4.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "colorama", marker = "sys_platform == 'win32'" },
+ { name = "iniconfig" },
+ { name = "packaging" },
+ { name = "pluggy" },
+ { name = "pygments" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/08/ba/45911d754e8eba3d5a841a5ce61a65a685ff1798421ac054f85aa8747dfb/pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c", size = 1517714 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/29/16/c8a903f4c4dffe7a12843191437d7cd8e32751d5de349d45d3fe69544e87/pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7", size = 365474 },
+]
+
+[[package]]
+name = "requests"
+version = "2.32.3"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "certifi" },
+ { name = "charset-normalizer" },
+ { name = "idna" },
+ { name = "urllib3" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 },
+]
+
+[[package]]
+name = "urllib3"
+version = "2.4.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/8a/78/16493d9c386d8e60e442a35feac5e00f0913c0f4b7c217c11e8ec2ff53e0/urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466", size = 390672 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6b/11/cc635220681e93a0183390e26485430ca2c7b5f9d33b15c74c2861cb8091/urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813", size = 128680 },
+]