Skip to content

Commit 892838d

Browse files
committed
add fromEditor flag on feed export
1 parent 2f4da7e commit 892838d

File tree

2 files changed

+65
-12
lines changed

2 files changed

+65
-12
lines changed

Diff for: src/main/java/com/conveyal/gtfs/GTFS.java

+3-3
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,8 @@ public abstract class GTFS {
3232
/**
3333
* Export a feed ID from the database to a zipped GTFS file in the specified export directory.
3434
*/
35-
public static FeedLoadResult export (String feedId, String outFile, DataSource dataSource) {
36-
JdbcGtfsExporter exporter = new JdbcGtfsExporter(feedId, outFile, dataSource);
35+
public static FeedLoadResult export (String feedId, String outFile, DataSource dataSource, boolean fromEditor) {
36+
JdbcGtfsExporter exporter = new JdbcGtfsExporter(feedId, outFile, dataSource, fromEditor);
3737
FeedLoadResult result = exporter.exportTables();
3838
return result;
3939
}
@@ -222,7 +222,7 @@ public static void main (String[] args) {
222222
}
223223
if (namespaceToExport != null) {
224224
LOG.info("Exporting feed with unique identifier {}", namespaceToExport);
225-
FeedLoadResult exportResult = export(namespaceToExport, outFile, dataSource);
225+
FeedLoadResult exportResult = export(namespaceToExport, outFile, dataSource, true);
226226
LOG.info("Done exporting.");
227227
} else {
228228
LOG.error("No feed to export. Specify one, or load a feed in the same command.");

Diff for: src/main/java/com/conveyal/gtfs/loader/JdbcGtfsExporter.java

+62-9
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
package com.conveyal.gtfs.loader;
22

3+
import com.conveyal.gtfs.GTFSFeed;
4+
import com.conveyal.gtfs.model.Calendar;
5+
import com.conveyal.gtfs.model.CalendarDate;
36
import com.conveyal.gtfs.model.Entity;
7+
import com.conveyal.gtfs.model.ScheduleException;
8+
import com.conveyal.gtfs.model.Service;
49
import org.apache.commons.dbutils.DbUtils;
510
import org.postgresql.copy.CopyManager;
611
import org.postgresql.core.BaseConnection;
@@ -16,6 +21,7 @@
1621
import java.io.OutputStream;
1722
import java.sql.Connection;
1823
import java.sql.SQLException;
24+
import java.time.LocalDate;
1925
import java.util.zip.ZipEntry;
2026
import java.util.zip.ZipOutputStream;
2127

@@ -27,18 +33,19 @@ public class JdbcGtfsExporter {
2733

2834
private final String outFile;
2935
private final DataSource dataSource;
36+
private final boolean fromEditor;
3037

3138
// These fields will be filled in once feed snapshot begins.
3239
private Connection connection;
33-
private String tablePrefix;
3440
private ZipOutputStream zipOutputStream;
3541
// The reference feed ID (namespace) to copy.
3642
private final String feedIdToExport;
3743

38-
public JdbcGtfsExporter(String feedId, String outFile, DataSource dataSource) {
44+
public JdbcGtfsExporter(String feedId, String outFile, DataSource dataSource, boolean fromEditor) {
3945
this.feedIdToExport = feedId;
4046
this.outFile = outFile;
4147
this.dataSource = dataSource;
48+
this.fromEditor = fromEditor;
4249
}
4350

4451
/**
@@ -62,20 +69,68 @@ public FeedLoadResult exportTables() {
6269
connection = dataSource.getConnection();
6370
// Include the dot separator in the table prefix.
6471
// This allows everything to work even when there's no prefix.
65-
this.tablePrefix += ".";
6672
// Export each table in turn (by placing entry in zip output stream).
6773
// FIXME: NO non-fatal exception errors are being captured during copy operations.
6874
result.agency = export(Table.AGENCY);
6975
result.calendar = export(Table.CALENDAR);
70-
result.calendarDates = export(Table.CALENDAR_DATES);
76+
if (fromEditor) {
77+
GTFSFeed feed = new GTFSFeed();
78+
// Export schedule exceptions in place of calendar dates if exporting from the GTFS Editor.
79+
// FIXME: The below table readers should probably just share a connection with the exporter.
80+
JDBCTableReader<ScheduleException> exceptionsReader =
81+
new JDBCTableReader(Table.SCHEDULE_EXCEPTIONS, dataSource, feedIdToExport + ".",
82+
EntityPopulator.SCHEDULE_EXCEPTION);
83+
JDBCTableReader<Calendar> calendarsReader =
84+
new JDBCTableReader(Table.CALENDAR, dataSource, feedIdToExport + ".",
85+
EntityPopulator.CALENDAR);
86+
Iterable<Calendar> calendars = calendarsReader.getAll();
87+
for (Calendar cal : calendars) {
88+
LOG.info("Iterating over calendar {}", cal.service_id);
89+
Service service = new Service(cal.service_id);
90+
service.calendar = cal;
91+
Iterable<ScheduleException> exceptions = exceptionsReader.getAll();
92+
for (ScheduleException ex : exceptions) {
93+
LOG.info("Adding exception {} for calendar {}", ex.name, cal.service_id);
94+
if (ex.equals(ScheduleException.ExemplarServiceDescriptor.SWAP) &&
95+
!ex.addedService.contains(cal.service_id) && !ex.removedService.contains(cal.service_id))
96+
// skip swap exception if cal is not referenced by added or removed service
97+
// this is not technically necessary, but the output is cleaner/more intelligible
98+
continue;
99+
100+
for (LocalDate date : ex.dates) {
101+
if (date.isBefore(cal.start_date) || date.isAfter(cal.end_date))
102+
// no need to write dates that do not apply
103+
continue;
104+
105+
CalendarDate calendarDate = new CalendarDate();
106+
calendarDate.date = date;
107+
calendarDate.service_id = cal.service_id;
108+
calendarDate.exception_type = ex.serviceRunsOn(cal) ? 1 : 2;
109+
110+
if (service.calendar_dates.containsKey(date))
111+
throw new IllegalArgumentException("Duplicate schedule exceptions on " + date.toString());
112+
113+
service.calendar_dates.put(date, calendarDate);
114+
}
115+
}
116+
feed.services.put(cal.service_id, service);
117+
}
118+
LOG.info("Writing calendar dates from schedule exceptions");
119+
new CalendarDate.Writer(feed).writeTable(zipOutputStream);
120+
} else {
121+
// Otherwise, simply export the calendar dates as they were loaded in.
122+
result.calendarDates = export(Table.CALENDAR_DATES);
123+
}
71124
result.fareAttributes = export(Table.FARE_ATTRIBUTES);
72125
result.fareRules = export(Table.FARE_RULES);
73126
result.feedInfo = export(Table.FEED_INFO);
74127
result.frequencies = export(Table.FREQUENCIES);
75128
result.routes = export(Table.ROUTES);
76129
// FIXME: Find some place to store errors encountered on export for patterns and pattern stops.
77-
export(Table.PATTERNS);
78-
export(Table.PATTERN_STOP);
130+
// FIXME: Is there a need to export patterns or pattern stops? Should these be iterated over to ensure that
131+
// frequency-based pattern travel times match stop time arrivals/departures?
132+
// export(Table.PATTERNS);
133+
// export(Table.PATTERN_STOP);
79134
result.shapes = export(Table.SHAPES);
80135
result.stops = export(Table.STOPS);
81136
result.stopTimes = export(Table.STOP_TIMES);
@@ -103,7 +158,7 @@ private TableLoadResult export (Table table) {
103158
TableLoadResult tableLoadResult = new TableLoadResult();
104159
try {
105160
// Use the Postgres text load format if we're connected to that DBMS.
106-
boolean postgresText = (connection.getMetaData().getDatabaseProductName().equals("PostgreSQL"));
161+
boolean postgresText = connection.getMetaData().getDatabaseProductName().equals("PostgreSQL");
107162

108163
if (postgresText) {
109164
// Create entry for table
@@ -133,8 +188,6 @@ private TableLoadResult export (Table table) {
133188
}
134189
tableLoadResult.fatalException = e.getMessage();
135190
LOG.error("Exception while exporting tables", e);
136-
} catch (FileNotFoundException e) {
137-
e.printStackTrace();
138191
} catch (IOException e) {
139192
e.printStackTrace();
140193
}

0 commit comments

Comments
 (0)