3
3
import com .conveyal .analysis .AnalysisServerException ;
4
4
import com .conveyal .analysis .SelectingGridReducer ;
5
5
import com .conveyal .analysis .UserPermissions ;
6
+ import com .conveyal .analysis .components .TaskScheduler ;
6
7
import com .conveyal .analysis .components .broker .Broker ;
7
8
import com .conveyal .analysis .components .broker .JobStatus ;
8
9
import com .conveyal .analysis .models .AnalysisRequest ;
11
12
import com .conveyal .analysis .models .RegionalAnalysis ;
12
13
import com .conveyal .analysis .persistence .Persistence ;
13
14
import com .conveyal .analysis .results .CsvResultType ;
15
+ import com .conveyal .analysis .util .HttpStatus ;
14
16
import com .conveyal .analysis .util .JsonUtil ;
15
17
import com .conveyal .file .FileStorage ;
16
18
import com .conveyal .file .FileStorageFormat ;
22
24
import com .conveyal .r5 .analyst .PointSet ;
23
25
import com .conveyal .r5 .analyst .PointSetCache ;
24
26
import com .conveyal .r5 .analyst .cluster .RegionalTask ;
27
+ import com .conveyal .r5 .analyst .progress .Task ;
25
28
import com .google .common .primitives .Ints ;
26
29
import com .mongodb .QueryBuilder ;
27
30
import gnu .trove .list .array .TIntArrayList ;
36
39
import java .io .FileOutputStream ;
37
40
import java .io .IOException ;
38
41
import java .io .InputStream ;
42
+ import java .io .OutputStream ;
39
43
import java .net .URI ;
40
44
import java .nio .file .FileSystem ;
41
45
import java .nio .file .FileSystems ;
45
49
import java .util .ArrayList ;
46
50
import java .util .Arrays ;
47
51
import java .util .Collection ;
52
+ import java .util .Collections ;
53
+ import java .util .HashSet ;
48
54
import java .util .List ;
49
55
import java .util .Locale ;
50
56
import java .util .Map ;
57
+ import java .util .Set ;
51
58
import java .util .zip .GZIPOutputStream ;
52
59
53
60
import static com .conveyal .analysis .util .JsonUtil .toJson ;
60
67
import static com .google .common .base .Preconditions .checkState ;
61
68
import static org .eclipse .jetty .http .MimeTypes .Type .APPLICATION_JSON ;
62
69
import static org .eclipse .jetty .http .MimeTypes .Type .TEXT_HTML ;
70
+ import static org .eclipse .jetty .http .MimeTypes .Type .TEXT_PLAIN ;
63
71
64
72
/**
65
73
* Spark HTTP handler methods that allow launching new regional analyses, as well as deleting them and fetching
@@ -80,10 +88,12 @@ public class RegionalAnalysisController implements HttpController {
80
88
81
89
private final Broker broker ;
82
90
private final FileStorage fileStorage ;
91
+ private final TaskScheduler taskScheduler ;
83
92
84
- public RegionalAnalysisController (Broker broker , FileStorage fileStorage ) {
93
+ public RegionalAnalysisController (Broker broker , FileStorage fileStorage , TaskScheduler taskScheduler ) {
85
94
this .broker = broker ;
86
95
this .fileStorage = fileStorage ;
96
+ this .taskScheduler = taskScheduler ;
87
97
}
88
98
89
99
private Collection <RegionalAnalysis > getRegionalAnalysesForRegion (String regionId , UserPermissions userPermissions ) {
@@ -254,8 +264,9 @@ private HumanKey getSingleCutoffGrid (
254
264
grid .writeGeotiff (fos );
255
265
break ;
256
266
}
257
-
267
+ LOG . debug ( "Finished deriving single-cutoff grid {}. Transferring to storage." , singleCutoffKey );
258
268
fileStorage .moveIntoStorage (singleCutoffFileStorageKey , localFile );
269
+ LOG .debug ("Finished transferring single-cutoff grid {} to storage." , singleCutoffKey );
259
270
}
260
271
String analysisHumanName = humanNameForEntity (analysis );
261
272
String destinationHumanName = humanNameForEntity (destinations );
@@ -266,6 +277,10 @@ private HumanKey getSingleCutoffGrid (
266
277
return new HumanKey (singleCutoffFileStorageKey , resultHumanFilename );
267
278
}
268
279
280
+ // Prevent multiple requests from creating the same files in parallel.
281
+ // This could potentially be integrated into FileStorage with enum return values or an additional boolean method.
282
+ private Set <String > filesBeingPrepared = Collections .synchronizedSet (new HashSet <>());
283
+
269
284
private Object getAllRegionalResults (Request req , Response res ) throws IOException {
270
285
final String regionalAnalysisId = req .params ("_id" );
271
286
final UserPermissions userPermissions = UserPermissions .from (req );
@@ -277,39 +292,61 @@ private Object getAllRegionalResults (Request req, Response res) throws IOExcept
277
292
throw AnalysisServerException .badRequest ("Batch result download only available for gridded origins." );
278
293
}
279
294
FileStorageKey zippedResultsKey = new FileStorageKey (RESULTS , analysis ._id + "_ALL.zip" );
280
- if (!fileStorage .exists (zippedResultsKey )) {
281
- // Iterate over all dest, cutoff, percentile combinations and generate one geotiff grid output for each one.
282
- List <HumanKey > humanKeys = new ArrayList <>();
283
- for (String destinationPointSetId : analysis .destinationPointSetIds ) {
284
- OpportunityDataset destinations = getDestinations (destinationPointSetId , userPermissions );
285
- for (int cutoffMinutes : analysis .cutoffsMinutes ) {
286
- for (int percentile : analysis .travelTimePercentiles ) {
287
- HumanKey gridKey = getSingleCutoffGrid (
288
- analysis , destinations , cutoffMinutes , percentile , FileStorageFormat .GEOTIFF
289
- );
290
- humanKeys .add (gridKey );
295
+ if (fileStorage .exists (zippedResultsKey )) {
296
+ res .type (APPLICATION_JSON .asString ());
297
+ String analysisHumanName = humanNameForEntity (analysis );
298
+ return fileStorage .getJsonUrl (zippedResultsKey , analysisHumanName , "zip" );
299
+ }
300
+ if (filesBeingPrepared .contains (zippedResultsKey .path )) {
301
+ res .type (TEXT_PLAIN .asString ());
302
+ res .status (HttpStatus .ACCEPTED_202 );
303
+ return "Geotiff zip is already being prepared in the background." ;
304
+ }
305
+ // File did not exist. Create it in the background and ask caller to request it later.
306
+ filesBeingPrepared .add (zippedResultsKey .path );
307
+ Task task = Task .create ("Zip all geotiffs for regional analysis " + analysis .name )
308
+ .forUser (userPermissions )
309
+ .withAction (progressListener -> {
310
+ int nSteps = analysis .destinationPointSetIds .length * analysis .cutoffsMinutes .length *
311
+ analysis .travelTimePercentiles .length * 2 + 1 ;
312
+ progressListener .beginTask ("Creating and archiving geotiffs..." , nSteps );
313
+ // Iterate over all dest, cutoff, percentile combinations and generate one geotiff for each combination.
314
+ List <HumanKey > humanKeys = new ArrayList <>();
315
+ for (String destinationPointSetId : analysis .destinationPointSetIds ) {
316
+ OpportunityDataset destinations = getDestinations (destinationPointSetId , userPermissions );
317
+ for (int cutoffMinutes : analysis .cutoffsMinutes ) {
318
+ for (int percentile : analysis .travelTimePercentiles ) {
319
+ HumanKey gridKey = getSingleCutoffGrid (
320
+ analysis , destinations , cutoffMinutes , percentile , FileStorageFormat .GEOTIFF
321
+ );
322
+ humanKeys .add (gridKey );
323
+ progressListener .increment ();
324
+ }
291
325
}
292
326
}
293
- }
294
- File tempZipFile = File .createTempFile ("regional" , ".zip" );
295
- // Zipfs can't open existing empty files, the file has to not exist. FIXME: Non-dangerous race condition
296
- // Examining ZipFileSystemProvider reveals a "useTempFile" env parameter, but this is for the individual entries.
297
- // May be better to just use zipOutputStream which would also allow gzip - zip CSV conversion.
298
- tempZipFile .delete ();
299
- Map <String , String > env = Map .of ("create" , "true" );
300
- URI uri = URI .create ("jar:file:" + tempZipFile .getAbsolutePath ());
301
- try (FileSystem zipFilesystem = FileSystems .newFileSystem (uri , env )) {
302
- for (HumanKey key : humanKeys ) {
303
- Path storagePath = fileStorage .getFile (key .storageKey ).toPath ();
304
- Path zipPath = zipFilesystem .getPath (key .humanName );
305
- Files .copy (storagePath , zipPath , StandardCopyOption .REPLACE_EXISTING );
327
+ File tempZipFile = File .createTempFile ("regional" , ".zip" );
328
+ // Zipfs can't open existing empty files, the file has to not exist. FIXME: Non-dangerous race condition
329
+ // Examining ZipFileSystemProvider reveals a "useTempFile" env parameter, but this is for the individual
330
+ // entries. May be better to just use zipOutputStream which would also allow gzip - zip CSV conversion.
331
+ tempZipFile .delete ();
332
+ Map <String , String > env = Map .of ("create" , "true" );
333
+ URI uri = URI .create ("jar:file:" + tempZipFile .getAbsolutePath ());
334
+ try (FileSystem zipFilesystem = FileSystems .newFileSystem (uri , env )) {
335
+ for (HumanKey key : humanKeys ) {
336
+ Path storagePath = fileStorage .getFile (key .storageKey ).toPath ();
337
+ Path zipPath = zipFilesystem .getPath (key .humanName );
338
+ Files .copy (storagePath , zipPath , StandardCopyOption .REPLACE_EXISTING );
339
+ progressListener .increment ();
340
+ }
306
341
}
307
- }
308
- fileStorage .moveIntoStorage (zippedResultsKey , tempZipFile );
309
- }
310
- res .type (APPLICATION_JSON .asString ());
311
- String analysisHumanName = humanNameForEntity (analysis );
312
- return fileStorage .getJsonUrl (zippedResultsKey , analysisHumanName , "zip" );
342
+ fileStorage .moveIntoStorage (zippedResultsKey , tempZipFile );
343
+ progressListener .increment ();
344
+ filesBeingPrepared .remove (zippedResultsKey .path );
345
+ });
346
+ taskScheduler .enqueue (task );
347
+ res .type (TEXT_PLAIN .asString ());
348
+ res .status (HttpStatus .ACCEPTED_202 );
349
+ return "Building geotiff zip in background." ;
313
350
}
314
351
315
352
/**
@@ -666,7 +703,7 @@ public void registerEndpoints (spark.Service sparkService) {
666
703
sparkService .get ("/:_id" , this ::getRegionalAnalysis );
667
704
sparkService .get ("/:_id/all" , this ::getAllRegionalResults , toJson );
668
705
sparkService .get ("/:_id/grid/:format" , this ::getRegionalResults , toJson );
669
- sparkService .get ("/:_id/csv/:resultType" , this ::getCsvResults , toJson );
706
+ sparkService .get ("/:_id/csv/:resultType" , this ::getCsvResults );
670
707
sparkService .get ("/:_id/scenarioJsonUrl" , this ::getScenarioJsonUrl , toJson );
671
708
sparkService .delete ("/:_id" , this ::deleteRegionalAnalysis , toJson );
672
709
sparkService .post ("" , this ::createRegionalAnalysis , toJson );
0 commit comments