diff --git a/CHANGELOG.unreleased.md b/CHANGELOG.unreleased.md index 0582feccc2a..3683425ebb0 100644 --- a/CHANGELOG.unreleased.md +++ b/CHANGELOG.unreleased.md @@ -11,9 +11,10 @@ For upgrade instructions, please check the [migration guide](MIGRATIONS.released [Commits](https://github.com/scalableminds/webknossos/compare/24.07.0...HEAD) ### Added +- WEBKNOSSOS now automatically searches in subfolder / sub-collection identifiers for valid datasets in case a provided link to a remote dataset does not directly point to a dataset. [#7912](https://github.com/scalableminds/webknossos/pull/7912) - Added the option to move a bounding box via dragging while pressing ctrl / meta. [#7892](https://github.com/scalableminds/webknossos/pull/7892) - Added route `/import?url=` to automatically import and view remote datasets. [#7844](https://github.com/scalableminds/webknossos/pull/7844) -- The context menu that is opened upon right-clicking a segment in the dataview port now contains the segment's name. [#7920](https://github.com/scalableminds/webknossos/pull/7920) +- The context menu that is opened upon right-clicking a segment in the dataview port now contains the segment's name. [#7920](https://github.com/scalableminds/webknossos/pull/7920) - Upgraded backend dependencies for improved performance and stability. [#7922](https://github.com/scalableminds/webknossos/pull/7922) ### Changed diff --git a/docs/datasets.md b/docs/datasets.md index 08076666f93..afeb36c393c 100644 --- a/docs/datasets.md +++ b/docs/datasets.md @@ -47,7 +47,7 @@ In particular, the following file formats are supported for uploading (and conve Once the data is uploaded (and potentially converted), you can further configure a dataset's [Settings](#configuring-datasets) and double-check layer properties, fine tune access rights & permissions, or set default values for rendering. ### Streaming from remote servers and the cloud -WEBKNOSSOS supports loading and remotely streaming [Zarr](https://zarr.dev), [Neuroglancer precomputed format](https://github.com/google/neuroglancer/tree/master/src/neuroglancer/datasource/precomputed) and [N5](https://github.com/saalfeldlab/n5) datasets from a remote source, e.g. Cloud storage (S3) or HTTP server. +WEBKNOSSOS supports loading and remotely streaming [Zarr](https://zarr.dev), [Neuroglancer precomputed format](https://github.com/google/neuroglancer/tree/master/src/neuroglancer/datasource/precomputed) and [N5](https://github.com/saalfeldlab/n5) datasets from a remote source, e.g. Cloud storage (S3 / GCS) or HTTP server. WEBKNOSSOS supports loading Zarr datasets according to the [OME NGFF v0.4 spec](https://ngff.openmicroscopy.org/latest/). WEBKNOSSOS can load several remote sources and assemble them into a WEBKNOSSOS dataset with several layers, e.g. one Zarr file/source for the `color` layer and one Zarr file/source for a `segmentation` layer. diff --git a/frontend/javascripts/admin/dataset/dataset_add_remote_view.tsx b/frontend/javascripts/admin/dataset/dataset_add_remote_view.tsx index e1f00a2b8c1..f1466531346 100644 --- a/frontend/javascripts/admin/dataset/dataset_add_remote_view.tsx +++ b/frontend/javascripts/admin/dataset/dataset_add_remote_view.tsx @@ -461,7 +461,7 @@ function AddRemoteLayer({ const [showCredentialsFields, setShowCredentialsFields] = useState(false); const [usernameOrAccessKey, setUsernameOrAccessKey] = useState(""); const [passwordOrSecretKey, setPasswordOrSecretKey] = useState(""); - const [selectedProtocol, setSelectedProtocol] = useState<"s3" | "https" | "gs">("https"); + const [selectedProtocol, setSelectedProtocol] = useState<"s3" | "https" | "gs" | "file">("https"); const [fileList, setFileList] = useState([]); useEffect(() => { @@ -489,9 +489,11 @@ function AddRemoteLayer({ setSelectedProtocol("s3"); } else if (userInput.startsWith("gs://")) { setSelectedProtocol("gs"); + } else if (userInput.startsWith("file://")) { + setSelectedProtocol("file"); // Unused } else { throw new Error( - "Dataset URL must employ one of the following protocols: https://, http://, s3:// or gs://", + "Dataset URL must employ one of the following protocols: https://, http://, s3://, gs:// or file://", ); } } diff --git a/package.json b/package.json index 3695c6bd90f..14c03016009 100644 --- a/package.json +++ b/package.json @@ -76,6 +76,11 @@ "scripts": { "start": "node tools/proxy/proxy.js", "build": "node --max-old-space-size=4096 node_modules/.bin/webpack --env production", + "@comment build-backend": "Only check for errors in the backend code like done by the CI. This command is not needed to run WEBKNOSSOS", + "build-backend": "yarn build-wk-backend && yarn build-wk-datastore && yarn build-wk-tracingstore", + "build-wk-backend": "sbt -no-colors -DfailOnWarning compile stage", + "build-wk-datastore": "sbt -no-colors -DfailOnWarning \"project webknossosDatastore\" copyMessages compile stage", + "build-wk-tracingstore": "sbt -no-colors -DfailOnWarning \"project webknossosTracingstore\" copyMessages compile stage", "build-dev": "node_modules/.bin/webpack", "build-watch": "node_modules/.bin/webpack -w", "listening": "lsof -i:5005,7155,9000,9001,9002", diff --git a/test/backend/DataVaultTestSuite.scala b/test/backend/DataVaultTestSuite.scala index caeba8536cb..1a6425d26bb 100644 --- a/test/backend/DataVaultTestSuite.scala +++ b/test/backend/DataVaultTestSuite.scala @@ -134,6 +134,9 @@ class DataVaultTestSuite extends PlaySpec { class MockDataVault extends DataVault { override def readBytesAndEncoding(path: VaultPath, range: RangeSpecifier)( implicit ec: ExecutionContext): Fox[(Array[Byte], Encoding.Value)] = ??? + + override def listDirectory(path: VaultPath, + maxItems: Int)(implicit ec: ExecutionContext): Fox[List[VaultPath]] = ??? } "Uri has no trailing slash" should { diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala index 4cdd566a1d2..6459dec6ff4 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala @@ -34,10 +34,11 @@ import play.api.libs.json.Json import play.api.mvc.{Action, AnyContent, MultipartFormData, PlayBodyParsers} import java.io.File -import com.scalableminds.webknossos.datastore.storage.AgglomerateFileKey +import com.scalableminds.webknossos.datastore.storage.{AgglomerateFileKey, DataVaultService} import net.liftweb.common.{Box, Empty, Failure, Full} import play.api.libs.Files +import java.net.URI import scala.collection.mutable.ListBuffer import scala.concurrent.{ExecutionContext, Future} import scala.concurrent.duration._ @@ -721,10 +722,14 @@ class DataSourceController @Inject()( Action.async(validateJson[ExploreRemoteDatasetRequest]) { implicit request => accessTokenService.validateAccess(UserAccessRequest.administrateDataSources(request.body.organizationName), token) { val reportMutable = ListBuffer[String]() + val hasLocalFilesystemRequest = request.body.layerParameters.exists(param => + new URI(param.remoteUri).getScheme == DataVaultService.schemeFile) for { dataSourceBox: Box[GenericDataSource[DataLayer]] <- exploreRemoteLayerService .exploreRemoteDatasource(request.body.layerParameters, reportMutable) .futureBox + // Remove report of recursive exploration in case of exploring the local file system to avoid information exposure. + _ <- Fox.runIf(hasLocalFilesystemRequest)(Fox.successful(reportMutable.clear())) dataSourceOpt = dataSourceBox match { case Full(dataSource) if dataSource.dataLayers.nonEmpty => reportMutable += s"Resulted in dataSource with ${dataSource.dataLayers.length} layers." diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/DataVault.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/DataVault.scala index 1122e0ad586..2fcb3475d7b 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/DataVault.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/DataVault.scala @@ -7,4 +7,6 @@ import scala.concurrent.ExecutionContext trait DataVault { def readBytesAndEncoding(path: VaultPath, range: RangeSpecifier)( implicit ec: ExecutionContext): Fox[(Array[Byte], Encoding.Value)] + + def listDirectory(path: VaultPath, maxItems: Int)(implicit ec: ExecutionContext): Fox[List[VaultPath]] } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/FileSystemDataVault.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/FileSystemDataVault.scala index aa19261fb2f..bb16d34cd00 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/FileSystemDataVault.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/FileSystemDataVault.scala @@ -8,21 +8,18 @@ import org.apache.commons.lang3.builder.HashCodeBuilder import java.nio.ByteBuffer import java.nio.file.{Files, Path, Paths} +import java.util.stream.Collectors import scala.concurrent.ExecutionContext +import scala.jdk.CollectionConverters._ class FileSystemDataVault extends DataVault { override def readBytesAndEncoding(path: VaultPath, range: RangeSpecifier)( - implicit ec: ExecutionContext): Fox[(Array[Byte], Encoding.Value)] = { - val uri = path.toUri + implicit ec: ExecutionContext): Fox[(Array[Byte], Encoding.Value)] = for { - _ <- bool2Fox(uri.getScheme == DataVaultService.schemeFile) ?~> "trying to read from FileSystemDataVault, but uri scheme is not file" - _ <- bool2Fox(uri.getHost == null || uri.getHost.isEmpty) ?~> s"trying to read from FileSystemDataVault, but hostname ${uri.getHost} is non-empty" - localPath = Paths.get(uri.getPath) - _ <- bool2Fox(localPath.isAbsolute) ?~> "trying to read from FileSystemDataVault, but hostname is non-empty" + localPath <- vaultPathToLocalPath(path) bytes <- readBytesLocal(localPath, range) } yield (bytes, Encoding.identity) - } private def readBytesLocal(localPath: Path, range: RangeSpecifier)(implicit ec: ExecutionContext): Fox[Array[Byte]] = if (Files.exists(localPath)) { @@ -53,6 +50,30 @@ class FileSystemDataVault extends DataVault { } } else Fox.empty + override def listDirectory(path: VaultPath, maxItems: Int)(implicit ec: ExecutionContext): Fox[List[VaultPath]] = + vaultPathToLocalPath(path).map( + localPath => + if (Files.isDirectory(localPath)) + Files + .list(localPath) + .filter(file => Files.isDirectory(file)) + .collect(Collectors.toList()) + .asScala + .toList + .map(dir => new VaultPath(dir.toUri, this)) + .take(maxItems) + else List.empty) + + private def vaultPathToLocalPath(path: VaultPath)(implicit ec: ExecutionContext): Fox[Path] = { + val uri = path.toUri + for { + _ <- bool2Fox(uri.getScheme == DataVaultService.schemeFile) ?~> "trying to read from FileSystemDataVault, but uri scheme is not file" + _ <- bool2Fox(uri.getHost == null || uri.getHost.isEmpty) ?~> s"trying to read from FileSystemDataVault, but hostname ${uri.getHost} is non-empty" + localPath = Paths.get(uri.getPath) + _ <- bool2Fox(localPath.isAbsolute) ?~> "trying to read from FileSystemDataVault, but hostname is non-empty" + } yield localPath + } + override def hashCode(): Int = new HashCodeBuilder(19, 31).toHashCode diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/GoogleCloudDataVault.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/GoogleCloudDataVault.scala index aa5da3084b5..4d17f793c3a 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/GoogleCloudDataVault.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/GoogleCloudDataVault.scala @@ -11,6 +11,7 @@ import java.io.ByteArrayInputStream import java.net.URI import java.nio.ByteBuffer import scala.concurrent.ExecutionContext +import scala.jdk.CollectionConverters.IterableHasAsScala class GoogleCloudDataVault(uri: URI, credential: Option[GoogleServiceAccountCredential]) extends DataVault { @@ -72,6 +73,17 @@ class GoogleCloudDataVault(uri: URI, credential: Option[GoogleServiceAccountCred } yield (bytes, encoding) } + override def listDirectory(path: VaultPath, maxItems: Int)(implicit ec: ExecutionContext): Fox[List[VaultPath]] = + tryo({ + val objName = path.toUri.getPath.tail + val blobs = + storage.list(bucket, Storage.BlobListOption.prefix(objName), Storage.BlobListOption.currentDirectory()) + val subDirectories = blobs.getValues.asScala.toList.filter(_.isDirectory).take(maxItems) + val paths = subDirectories.map(dirBlob => + new VaultPath(new URI(s"${uri.getScheme}://$bucket/${dirBlob.getBlobId.getName}"), this)) + paths + }) + private def getUri = uri private def getCredential = credential diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/HttpsDataVault.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/HttpsDataVault.scala index 130342021d3..dbe4f58ccf4 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/HttpsDataVault.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/HttpsDataVault.scala @@ -44,6 +44,10 @@ class HttpsDataVault(credential: Option[DataVaultCredential], ws: WSClient) exte } + override def listDirectory(path: VaultPath, maxItems: Int)(implicit ec: ExecutionContext): Fox[List[VaultPath]] = + // HTTP file listing is currently not supported. + Fox.successful(List.empty) + private val headerInfoCache: AlfuCache[URI, (Boolean, Long)] = AlfuCache() private def getHeaderInformation(uri: URI)(implicit ec: ExecutionContext): Fox[(Boolean, Long)] = diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/S3DataVault.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/S3DataVault.scala index 3590ea71018..e6aeb5d936e 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/S3DataVault.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/S3DataVault.scala @@ -11,14 +11,16 @@ import com.amazonaws.auth.{ import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.regions.Regions import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} -import com.amazonaws.services.s3.model.{GetObjectRequest, S3Object} +import com.amazonaws.services.s3.model.{GetObjectRequest, ListObjectsV2Request, S3Object} import com.amazonaws.util.AwsHostNameUtils import com.scalableminds.util.tools.Fox +import com.scalableminds.util.tools.Fox.box2Fox import com.scalableminds.webknossos.datastore.storage.{ LegacyDataVaultCredential, RemoteSourceDescriptor, S3AccessKeyCredential } +import net.liftweb.common.Box.tryo import net.liftweb.common.{Box, Failure, Full} import org.apache.commons.io.IOUtils import org.apache.commons.lang3.builder.HashCodeBuilder @@ -26,6 +28,7 @@ import org.apache.commons.lang3.builder.HashCodeBuilder import java.net.URI import scala.collection.immutable.NumericRange import scala.concurrent.ExecutionContext +import scala.jdk.CollectionConverters._ class S3DataVault(s3AccessKeyCredential: Option[S3AccessKeyCredential], uri: URI) extends DataVault { private lazy val bucketName = S3DataVault.hostBucketFromUri(uri) match { @@ -50,7 +53,8 @@ class S3DataVault(s3AccessKeyCredential: Option[S3AccessKeyCredential], uri: URI private def getRequest(bucketName: String, key: String): GetObjectRequest = new GetObjectRequest(bucketName, key) - private def performRequest(request: GetObjectRequest)(implicit ec: ExecutionContext): Fox[(Array[Byte], String)] = { + private def performGetObjectRequest(request: GetObjectRequest)( + implicit ec: ExecutionContext): Fox[(Array[Byte], String)] = { var s3objectRef: Option[S3Object] = None // Used for cleanup later (possession of a S3Object requires closing it) try { val s3object = client.getObject(request) @@ -82,10 +86,38 @@ class S3DataVault(s3AccessKeyCredential: Option[S3AccessKeyCredential], uri: URI case SuffixLength(l) => getSuffixRangeRequest(bucketName, objectKey, l) case Complete() => getRequest(bucketName, objectKey) } - (bytes, encodingString) <- performRequest(request) + (bytes, encodingString) <- performGetObjectRequest(request) encoding <- Encoding.fromRfc7231String(encodingString) } yield (bytes, encoding) + override def listDirectory(path: VaultPath, maxItems: Int)(implicit ec: ExecutionContext): Fox[List[VaultPath]] = + for { + prefixKey <- Fox.box2Fox(S3DataVault.objectKeyFromUri(path.toUri)) + s3SubPrefixKeys <- getObjectSummaries(bucketName, prefixKey, maxItems) + vaultPaths <- tryo( + s3SubPrefixKeys.map(key => new VaultPath(new URI(s"${uri.getScheme}://$bucketName/$key"), this))).toFox + } yield vaultPaths + + private def getObjectSummaries(bucketName: String, keyPrefix: String, maxItems: Int)( + implicit ec: ExecutionContext): Fox[List[String]] = + try { + val listObjectsRequest = new ListObjectsV2Request + listObjectsRequest.setBucketName(bucketName) + listObjectsRequest.setPrefix(keyPrefix) + listObjectsRequest.setDelimiter("/") + listObjectsRequest.setMaxKeys(maxItems) + val objectListing = client.listObjectsV2(listObjectsRequest) + val s3SubPrefixes = objectListing.getCommonPrefixes.asScala.toList + Fox.successful(s3SubPrefixes) + } catch { + case e: AmazonServiceException => + e.getStatusCode match { + case 404 => Fox.empty + case _ => Fox.failure(e.getMessage) + } + case e: Exception => Fox.failure(e.getMessage) + } + private def getUri = uri private def getCredential = s3AccessKeyCredential diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/VaultPath.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/VaultPath.scala index 79dd3c9206d..70828233630 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/VaultPath.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/VaultPath.scala @@ -38,6 +38,9 @@ class VaultPath(uri: URI, dataVault: DataVault) extends LazyLogging { } } + def listDirectory(maxItems: Int)(implicit ec: ExecutionContext): Fox[List[VaultPath]] = + dataVault.listDirectory(this, maxItems) + private def decodeBrotli(bytes: Array[Byte]) = { Brotli4jLoader.ensureAvailability() val brotliInputStream = new BrotliInputStream(new ByteArrayInputStream(bytes)) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/ExploreRemoteLayerService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/ExploreRemoteLayerService.scala index 1c6a903f5d8..1d320d7c354 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/ExploreRemoteLayerService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/ExploreRemoteLayerService.scala @@ -15,7 +15,7 @@ import com.scalableminds.webknossos.datastore.services.DSRemoteWebknossosClient import com.scalableminds.webknossos.datastore.storage.{DataVaultCredential, DataVaultService, RemoteSourceDescriptor} import com.typesafe.scalalogging.LazyLogging import net.liftweb.common.Box.tryo -import net.liftweb.common.{Empty, Failure, Full} +import net.liftweb.common.{Box, Empty, Failure, Full} import play.api.libs.json.{Json, OFormat} import java.net.URI @@ -83,20 +83,21 @@ class ExploreRemoteLayerService @Inject()(dataVaultService: DataVaultService, credentialOpt: Option[DataVaultCredential] <- Fox.runOptional(credentialId)(remoteWebknossosClient.getCredential) remoteSource = RemoteSourceDescriptor(uri, credentialOpt) remotePath <- dataVaultService.getVaultPath(remoteSource) ?~> "dataVault.setup.failed" - layersWithVoxelSizes <- exploreRemoteLayersForRemotePath( - remotePath, + layersWithVoxelSizes <- recursivelyExploreRemoteLayerAtPaths( + List((remotePath, 0)), credentialId, - reportMutable, List( - new ZarrArrayExplorer(Vec3Int.ones), + // Explorers are ordered to prioritize the explorer reading meta information over raw Zarr, N5, ... data. new NgffExplorer, new WebknossosZarrExplorer, - new N5ArrayExplorer, + new Zarr3ArrayExplorer, + new ZarrArrayExplorer(Vec3Int.ones), new N5MultiscalesExplorer, + new N5ArrayExplorer, new PrecomputedExplorer, - new Zarr3ArrayExplorer, new NeuroglancerUriExplorer(dataVaultService) - ) + ), + reportMutable, ) } yield layersWithVoxelSizes @@ -106,26 +107,93 @@ class ExploreRemoteLayerService @Inject()(dataVaultService: DataVaultService, uri.getPath.startsWith(whitelistEntry))) ?~> s"Absolute path ${uri.getPath} in local file system is not in path whitelist. Consider adding it to datastore.localFolderWhitelist" } else Fox.successful(()) - private def exploreRemoteLayersForRemotePath(remotePath: VaultPath, - credentialId: Option[String], - reportMutable: ListBuffer[String], - explorers: List[RemoteLayerExplorer])( + private val MAX_RECURSIVE_SEARCH_DEPTH = 3 + + private val MAX_EXPLORED_ITEMS_PER_LEVEL = 10 + + private def recursivelyExploreRemoteLayerAtPaths(remotePathsWithDepth: List[(VaultPath, Int)], + credentialId: Option[String], + explorers: List[RemoteLayerExplorer], + reportMutable: ListBuffer[String])( + implicit ec: ExecutionContext): Fox[List[(DataLayerWithMagLocators, VoxelSize)]] = + remotePathsWithDepth match { + case Nil => + Fox.empty + case (path, searchDepth) :: remainingPaths => + if (searchDepth > MAX_RECURSIVE_SEARCH_DEPTH) Fox.empty + else { + explorePathsWithAllExplorersAndGetFirstMatch(path, explorers, credentialId, reportMutable).futureBox.flatMap( + explorationResultOfPath => + handleExploreResultOfPath(explorationResultOfPath, + path, + searchDepth, + remainingPaths, + credentialId, + explorers, + reportMutable)) + } + } + + private def explorePathsWithAllExplorersAndGetFirstMatch(path: VaultPath, + explorers: List[RemoteLayerExplorer], + credentialId: Option[String], + reportMutable: ListBuffer[String])( implicit ec: ExecutionContext): Fox[List[(DataLayerWithMagLocators, VoxelSize)]] = - explorers match { - case Nil => Fox.empty - case currentExplorer :: remainingExplorers => - reportMutable += s"\nTrying to explore $remotePath as ${currentExplorer.name}..." - currentExplorer.explore(remotePath, credentialId).futureBox.flatMap { - case Full(layersWithVoxelSizes) => - reportMutable += s"Found ${layersWithVoxelSizes.length} ${currentExplorer.name} layers at $remotePath." - Fox.successful(layersWithVoxelSizes) - case f: Failure => - reportMutable += s"Error when reading $remotePath as ${currentExplorer.name}: ${Fox.failureChainAsString(f)}" - exploreRemoteLayersForRemotePath(remotePath, credentialId, reportMutable, remainingExplorers) - case Empty => - reportMutable += s"Error when reading $remotePath as ${currentExplorer.name}: Empty" - exploreRemoteLayersForRemotePath(remotePath, credentialId, reportMutable, remainingExplorers) + Fox + .sequence(explorers.map { explorer => + { + explorer + .explore(path, credentialId) + .futureBox + .flatMap { + handleExploreResult(_, explorer, path, reportMutable) + } + .toFox } + }) + .map(explorationResults => Fox.firstSuccess(explorationResults.map(_.toFox))) + .toFox + .flatten + + private def handleExploreResult(explorationResult: Box[List[(DataLayerWithMagLocators, VoxelSize)]], + explorer: RemoteLayerExplorer, + path: VaultPath, + reportMutable: ListBuffer[String])( + implicit ec: ExecutionContext): Fox[List[(DataLayerWithMagLocators, VoxelSize)]] = explorationResult match { + case Full(layersWithVoxelSizes) => + reportMutable += s"Found ${layersWithVoxelSizes.length} ${explorer.name} layers at $path." + Fox.successful(layersWithVoxelSizes) + case f: Failure => + reportMutable += s"Error when reading $path as ${explorer.name}: ${Fox.failureChainAsString(f)}" + Fox.empty + case Empty => + reportMutable += s"Error when reading $path as ${explorer.name}: Empty" + Fox.empty + } + + private def handleExploreResultOfPath(explorationResultOfPath: Box[List[(DataLayerWithMagLocators, VoxelSize)]], + path: VaultPath, + searchDepth: Int, + remainingPaths: List[(VaultPath, Int)], + credentialId: Option[String], + explorers: List[RemoteLayerExplorer], + reportMutable: ListBuffer[String])( + implicit ec: ExecutionContext): Fox[List[(DataLayerWithMagLocators, VoxelSize)]] = + explorationResultOfPath match { + case Full(layersWithVoxelSizes) => + Fox.successful(layersWithVoxelSizes) + case Empty => + for { + extendedRemainingPaths <- path + .listDirectory(maxItems = MAX_EXPLORED_ITEMS_PER_LEVEL) + .map(dirs => remainingPaths ++ dirs.map((_, searchDepth + 1))) + foundLayers <- recursivelyExploreRemoteLayerAtPaths(extendedRemainingPaths, + credentialId, + explorers, + reportMutable) + } yield foundLayers + case _ => + Fox.successful(List.empty) } }