From ea6616c36811fc4afcbca901a9760fc0f62fb3d2 Mon Sep 17 00:00:00 2001 From: Florian M Date: Mon, 16 Mar 2026 16:15:38 +0100 Subject: [PATCH 01/37] WIP Allow uploading mags, attachments to existing datasets --- .../storage/RedisTemporaryStore.scala | 102 +++++++----------- 1 file changed, 36 insertions(+), 66 deletions(-) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/RedisTemporaryStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/RedisTemporaryStore.scala index 5d6a6e3f61e..ef6fa4dd269 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/RedisTemporaryStore.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/RedisTemporaryStore.scala @@ -12,119 +12,89 @@ trait RedisTemporaryStore extends LazyLogging { protected def address: String protected def port: Int lazy val authority: String = f"$address:$port" - private lazy val r = new RedisClient(address, port) + private lazy val r = new RedisClientPool(address, port) def find(id: String): Fox[Option[String]] = - withExceptionHandler { - r.get(id) - } + withExceptionHandler(_.get(id)) def findLong(id: String): Fox[Option[Long]] = - withExceptionHandler { - r.get(id).map(s => s.toLong) - } + withExceptionHandler(_.get(id).map(s => s.toLong)) def removeAllConditional(pattern: String): Fox[Unit] = - withExceptionHandler { - val keysOpt: Option[List[Option[String]]] = r.keys(pattern) + withExceptionHandler { client => + val keysOpt: Option[List[Option[String]]] = client.keys(pattern) keysOpt.foreach { keys: Seq[Option[String]] => keys.flatMap { key: Option[String] => - key.flatMap(r.del(_)) + key.flatMap(client.del(_)) } } } def findAllConditional(pattern: String): Fox[Seq[String]] = - withExceptionHandler { - val keysOpt: Option[List[Option[String]]] = r.keys(pattern) + withExceptionHandler { client => + val keysOpt: Option[List[Option[String]]] = client.keys(pattern) keysOpt.map { keys: Seq[Option[String]] => keys.flatMap { key: Option[String] => - key.flatMap(r.get(_)) + key.flatMap(client.get(_)) } }.getOrElse(Seq()) } def keys(pattern: String): Fox[List[String]] = - withExceptionHandler { - r.keys(pattern).map(_.flatten).getOrElse(List()) - } + withExceptionHandler(_.keys(pattern).map(_.flatten).getOrElse(List())) def insertKey(id: String, expirationOpt: Option[FiniteDuration] = None): Fox[Unit] = insert(id, "", expirationOpt) def insert(id: String, value: String, expirationOpt: Option[FiniteDuration] = None): Fox[Unit] = - withExceptionHandler { + withExceptionHandler { client => expirationOpt - .map( - expiration => r.setex(id, expiration.toSeconds, value) - ) - .getOrElse( - r.set(id, value) - ) + .map(expiration => client.setex(id, expiration.toSeconds, value)) + .getOrElse(client.set(id, value)) } def insertLong(id: String, value: Long, expirationOpt: Option[FiniteDuration] = None): Fox[Unit] = - withExceptionHandler { + withExceptionHandler { client => expirationOpt - .map( - expiration => r.setex(id, expiration.toSeconds, value) - ) - .getOrElse( - r.set(id, value) - ) + .map(expiration => client.setex(id, expiration.toSeconds, value)) + .getOrElse(client.set(id, value)) } def contains(id: String): Fox[Boolean] = - withExceptionHandler { - r.exists(id) - } + withExceptionHandler(_.exists(id)) def remove(id: String): Fox[Unit] = - withExceptionHandler { - r.del(id) - } + withExceptionHandler(_.del(id)) def checkHealth(implicit ec: ExecutionContext): Fox[Unit] = - try { - val reply = r.ping + withExceptionHandler { client => + val reply = client.ping if (!reply.contains("PONG")) throw new Exception(reply.getOrElse("No Reply")) - Fox.successful(()) - } catch { - case e: Exception => - logger.error(s"Redis health check failed at $address:$port (reply: ${e.getMessage})") - Fox.failure(s"Redis health check failed") - } - - def withExceptionHandler[B](f: => B): Fox[B] = - try { - r.synchronized { - Fox.successful(f) - } - } catch { - case e: Exception => - val msg = "Redis access exception: " + e.getMessage - logger.error(msg) - Fox.failure(msg) + () } def insertIntoSet(id: String, value: String): Fox[Boolean] = - withExceptionHandler { - r.sadd(id, value).getOrElse(0L) > 0 - } + withExceptionHandler(_.sadd(id, value).getOrElse(0L) > 0) def isContainedInSet(id: String, value: String): Fox[Boolean] = - withExceptionHandler { - r.sismember(id, value) - } + withExceptionHandler(_.sismember(id, value)) def removeFromSet(id: String, value: String): Fox[Boolean] = - withExceptionHandler { - r.srem(id, value).getOrElse(0L) > 0 - } + withExceptionHandler(_.srem(id, value).getOrElse(0L) > 0) def findSet(id: String): Fox[Set[String]] = - withExceptionHandler { - r.smembers(id).map(_.flatten).getOrElse(Set.empty) + withExceptionHandler(_.smembers(id).map(_.flatten).getOrElse(Set.empty)) + + private def withExceptionHandler[B](f: RedisClient => B): Fox[B] = + try { + r.withClient { client => + Fox.successful(f(client)) + } + } catch { + case e: Exception => + val msg = "Redis access exception: " + e.getMessage + logger.error(msg) + Fox.failure(msg) } } From 2f4d28d5d9ac4f400911fa9d55c94bc1693e2d81 Mon Sep 17 00:00:00 2001 From: Florian M Date: Tue, 31 Mar 2026 14:13:26 +0200 Subject: [PATCH 02/37] WIP refactor uploadService --- .../controllers/DataSourceController.scala | 6 +- .../datastore/models/UnfinishedUpload.scala | 10 +- .../uploading/UploadMetadataStore.scala | 145 +++++++++++++++ .../services/uploading/UploadService.scala | 168 +++++------------- 4 files changed, 200 insertions(+), 129 deletions(-) create mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala index e317b20e62c..7fe80d1e4dc 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala @@ -114,7 +114,9 @@ class DataSourceController @Inject()( _ <- if (!isKnownUpload) { for { reserveUploadAdditionalInfo <- dsRemoteWebknossosClient.reserveDataSourceUpload(request.body) ?~> "dataset.upload.validation.failed" - _ <- uploadService.reserveUpload(request.body, reserveUploadAdditionalInfo) + _ <- uploadService.reserveUpload(request.body, + reserveUploadAdditionalInfo.newDatasetId, + reserveUploadAdditionalInfo.directoryName) } yield () } else Fox.successful(()) } yield Ok @@ -127,7 +129,7 @@ class DataSourceController @Inject()( for { unfinishedUploads <- dsRemoteWebknossosClient.getUnfinishedUploadsForUser(organizationName) unfinishedUploadsWithUploadIds <- Fox.fromFuture( - uploadService.addUploadIdsToUnfinishedUploads(unfinishedUploads)) + uploadService.enrichUnfinishedUploadInfoWithUploadIds(unfinishedUploads)) unfinishedUploadsWithUploadIdsWithoutDataSourceId = unfinishedUploadsWithUploadIds.map(_.withoutDataSourceId) } yield Ok(Json.toJson(unfinishedUploadsWithUploadIdsWithoutDataSourceId)) } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/UnfinishedUpload.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/UnfinishedUpload.scala index 15ddcbda3f9..dae7af30425 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/UnfinishedUpload.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/UnfinishedUpload.scala @@ -4,13 +4,13 @@ import com.scalableminds.util.time.Instant import com.scalableminds.webknossos.datastore.models.datasource.DataSourceId import play.api.libs.json.{Format, Json} -case class UnfinishedUpload(uploadId: String, +case class UnfinishedUpload(uploadId: String, // Dummy value on wk-side, then filled in by datastore via redis dataSourceId: DataSourceId, datasetName: String, folderId: String, created: Instant, - filePaths: Option[List[String]], - allowedTeams: List[String]) { + filePaths: Option[Seq[String]], + allowedTeams: Seq[String]) { def withoutDataSourceId: UnfinishedUploadWithoutDataSourceId = UnfinishedUploadWithoutDataSourceId(uploadId, datasetName, folderId, created, filePaths, allowedTeams) } @@ -23,8 +23,8 @@ case class UnfinishedUploadWithoutDataSourceId(uploadId: String, datasetName: String, folderId: String, created: Instant, - filePaths: Option[List[String]], - allowedTeams: List[String]) + filePaths: Option[Seq[String]], + allowedTeams: Seq[String]) object UnfinishedUploadWithoutDataSourceId { implicit val dataSourceIdFormat: Format[UnfinishedUploadWithoutDataSourceId] = diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala new file mode 100644 index 00000000000..a00dc9aaed7 --- /dev/null +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala @@ -0,0 +1,145 @@ +package com.scalableminds.webknossos.datastore.services.uploading + +import com.scalableminds.util.objectid.ObjectId +import com.scalableminds.util.tools.{Fox, FoxImplicits, JsonHelper} +import com.scalableminds.webknossos.datastore.models.datasource.DataSourceId +import com.scalableminds.webknossos.datastore.storage.DataStoreRedisStore +import play.api.libs.json.{Json, Reads, Writes} + +import javax.inject.Inject +import scala.concurrent.ExecutionContext + +class UploadMetadataStore @Inject()(metadataRedisStore: DataStoreRedisStore) extends FoxImplicits { + // TODO parameterize this class by domain? (DS vs Mag vs Attachment?) + + /* + * Redis stores different information for each running upload, with different prefixes in the keys. + * Note that Redis synchronizes all db accesses, so we do not need to do it. + */ + private def redisKeyForFileCount(uploadId: String): String = + s"upload___${uploadId}___fileCount" + + private def redisKeyForTotalFileSizeInBytes(uploadId: String): String = + s"upload___${uploadId}___totalFileSizeInBytes" + + private def redisKeyForFileNameSet(uploadId: String): String = + s"upload___${uploadId}___fileNameSet" + + private def redisKeyForLinkedLayerIdentifier(uploadId: String): String = + s"upload___${uploadId}___linkedLayerIdentifier" + + private def redisKeyForFileChunkCount(uploadId: String, fileName: String): String = + s"upload___${uploadId}___file___${fileName}___chunkCount" + + private def redisKeyForFileChunkSet(uploadId: String, fileName: String): String = + s"upload___${uploadId}___file___${fileName}___chunkSet" + + private def redisKeyForUploadIdByDataSourceId(datasourceId: DataSourceId): String = + s"upload___${Json.stringify(Json.toJson(datasourceId))}___datasourceId" + + private def redisKeyForDataSourceId(uploadId: String): String = + s"upload___${uploadId}___dataSourceId" + + private def redisKeyForDatasetId(uploadId: String): String = + s"upload___${uploadId}___datasetId" + + private def redisKeyForFilePaths(uploadId: String): String = + s"upload___${uploadId}___filePaths" + + def isKnownUpload(uploadId: String): Fox[Boolean] = + metadataRedisStore.contains(redisKeyForFileCount(uploadId)) + + def insertTotalFileCount(uploadId: String, totalFileCount: Long): Fox[Unit] = + metadataRedisStore.insert(redisKeyForFileCount(uploadId), String.valueOf(totalFileCount)) + + def insertTotalFileSizeInBytes(uploadId: String, totalFileSizeInBytes: Option[Long])( + implicit ec: ExecutionContext): Fox[Option[Unit]] = + Fox.runOptional(totalFileSizeInBytes) { + metadataRedisStore.insertLong(redisKeyForTotalFileSizeInBytes(uploadId), _) + } + + def insertLinkedLayerIdentifiers(uploadId: String, linkedLayerIdentifiers: Option[Seq[LinkedLayerIdentifier]])( + implicit ec: ExecutionContext): Fox[_] = + insertSerialized(redisKeyForLinkedLayerIdentifier(uploadId), linkedLayerIdentifiers.getOrElse(Seq.empty)) + + def getDataSourceId(uploadId: String)(implicit ec: ExecutionContext): Fox[DataSourceId] = + getParsed[DataSourceId](redisKeyForDataSourceId(uploadId)) + + def getDatasetId(uploadId: String)(implicit ec: ExecutionContext): Fox[ObjectId] = + getParsed[ObjectId](redisKeyForDatasetId(uploadId)) + + // TODO make this Fox[String]? + def getUploadIdByDataSourceId(dataSourceId: DataSourceId): Fox[Option[String]] = + metadataRedisStore.find(redisKeyForUploadIdByDataSourceId(dataSourceId)) + + def getFilePaths(uploadId: String)(implicit ec: ExecutionContext): Fox[Seq[String]] = + getParsed[Seq[String]](redisKeyForFilePaths(uploadId)) + + def isFileKnown(uploadId: String, filePath: String): Fox[Boolean] = + metadataRedisStore.contains(redisKeyForFileChunkCount(uploadId, filePath)) + + def isFileChunkSetKnown(uploadId: String, filePath: String): Fox[Boolean] = + metadataRedisStore.contains(redisKeyForFileChunkSet(uploadId, filePath)) + + def isChunkPresent(uploadId: String, filePath: String, chunkNumber: Long): Fox[Boolean] = + metadataRedisStore.isContainedInSet(redisKeyForFileChunkSet(uploadId, filePath), String.valueOf(chunkNumber)) + + def insertFilePathIntoSet(uploadId: String, filePath: String): Fox[Boolean] = + metadataRedisStore.insertIntoSet(redisKeyForFileNameSet(uploadId), filePath) + + def insertFileChunkCount(uploadId: String, filePath: String, totalChunkCount: Long): Fox[Unit] = + metadataRedisStore.insert(redisKeyForFileChunkCount(uploadId, filePath), String.valueOf(totalChunkCount)) + + def insertFileChunkIntoSet(uploadId: String, filePath: String, chunkNumber: Long): Fox[Boolean] = + metadataRedisStore.insertIntoSet(redisKeyForFileChunkSet(uploadId, filePath), String.valueOf(chunkNumber)) + + def removeFileChunkFromSet(uploadId: String, filePath: String, chunkNumber: Long): Fox[Boolean] = + metadataRedisStore.removeFromSet(redisKeyForFileChunkSet(uploadId, filePath), String.valueOf(chunkNumber)) + + def insertDatasetId(uploadId: String, datasetId: ObjectId)(implicit ec: ExecutionContext): Fox[Unit] = + insertSerialized(redisKeyForDatasetId(uploadId), datasetId) + + def insertDataSourceId(uploadId: String, dataSourceId: DataSourceId)(implicit ec: ExecutionContext): Fox[Unit] = + insertSerialized(redisKeyForDataSourceId(uploadId), dataSourceId) + + // Only here the uploadId is not key but value. This is used to re-connect to unfinished uploads. + def insertUploadIdByDataSourceId(dataSourceId: DataSourceId, uploadId: String)( + implicit ec: ExecutionContext): Fox[Unit] = + insertSerialized(redisKeyForUploadIdByDataSourceId(dataSourceId), uploadId) + + def insertFilePaths(uploadId: String, filePaths: Option[Seq[String]])(implicit ec: ExecutionContext): Fox[Unit] = + insertSerialized(redisKeyForFilePaths(uploadId), filePaths.getOrElse(Seq.empty)) + + def cleanUp(uploadId: String)(implicit ec: ExecutionContext): Fox[Unit] = + for { + _ <- metadataRedisStore.remove(redisKeyForFileCount(uploadId)) + fileNames <- metadataRedisStore.findSet(redisKeyForFileNameSet(uploadId)) + _ <- Fox.serialCombined(fileNames.toList) { fileName => + for { + _ <- metadataRedisStore.remove(redisKeyForFileChunkCount(uploadId, fileName)) + _ <- metadataRedisStore.remove(redisKeyForFileChunkSet(uploadId, fileName)) + } yield () + } + _ <- metadataRedisStore.remove(redisKeyForFileNameSet(uploadId)) + _ <- metadataRedisStore.remove(redisKeyForTotalFileSizeInBytes(uploadId)) + dataSourceId <- getDataSourceId(uploadId) + _ <- metadataRedisStore.remove(redisKeyForDataSourceId(uploadId)) + _ <- metadataRedisStore.remove(redisKeyForDatasetId(uploadId)) + _ <- metadataRedisStore.remove(redisKeyForLinkedLayerIdentifier(uploadId)) + _ <- metadataRedisStore.remove(redisKeyForUploadIdByDataSourceId(dataSourceId)) + _ <- metadataRedisStore.remove(redisKeyForFilePaths(uploadId)) + } yield () + + private def getParsed[T: Reads](key: String)(implicit ec: ExecutionContext): Fox[T] = + for { + objectStringOption <- metadataRedisStore.find(key) + objectString <- objectStringOption.toFox + parsed <- JsonHelper.parseAs[T](objectString).toFox + } yield parsed + + private def insertSerialized[T: Writes](key: String, value: T)(implicit ec: ExecutionContext): Fox[Unit] = { + val serialized = Json.stringify(Json.toJson(value)) + metadataRedisStore.insert(key, serialized) + } + +} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index cc3662dc5f2..6b0ebf8f6fe 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -38,10 +38,10 @@ case class ReserveUploadInformation( name: String, // dataset name organization: String, totalFileCount: Long, - filePaths: Option[List[String]], + filePaths: Option[Seq[String]], totalFileSizeInBytes: Option[Long], - layersToLink: Option[List[LinkedLayerIdentifier]], - initialTeams: List[ObjectId], // team ids + layersToLink: Option[Seq[LinkedLayerIdentifier]], + initialTeams: Seq[ObjectId], // team ids folderId: Option[ObjectId], requireUniqueName: Option[Boolean], isVirtual: Option[Boolean], // Only set (to false) for legacy manual uploads @@ -91,7 +91,7 @@ object CancelUploadInformation { } class UploadService @Inject()(dataSourceService: DataSourceService, - runningUploadMetadataStore: DataStoreRedisStore, + uploadMetadataStore: UploadMetadataStore, dataVaultService: DataVaultService, exploreLocalLayerService: ExploreLocalLayerService, dataStoreConfig: DataStoreConfig, @@ -103,39 +103,18 @@ class UploadService @Inject()(dataSourceService: DataSourceService, with WKWDataFormatHelper with LazyLogging { - /* - * Redis stores different information for each running upload, with different prefixes in the keys. - * Note that Redis synchronizes all db accesses, so we do not need to do it. - */ - private def redisKeyForFileCount(uploadId: String): String = - s"upload___${uploadId}___fileCount" - private def redisKeyForTotalFileSizeInBytes(uploadId: String): String = - s"upload___${uploadId}___totalFileSizeInBytes" - private def redisKeyForFileNameSet(uploadId: String): String = - s"upload___${uploadId}___fileNameSet" - private def redisKeyForDataSourceId(uploadId: String): String = - s"upload___${uploadId}___dataSourceId" - private def redisKeyForLinkedLayerIdentifier(uploadId: String): String = - s"upload___${uploadId}___linkedLayerIdentifier" - private def redisKeyForFileChunkCount(uploadId: String, fileName: String): String = - s"upload___${uploadId}___file___${fileName}___chunkCount" - private def redisKeyForFileChunkSet(uploadId: String, fileName: String): String = - s"upload___${uploadId}___file___${fileName}___chunkSet" - private def redisKeyForUploadId(datasourceId: DataSourceId): String = - s"upload___${Json.stringify(Json.toJson(datasourceId))}___datasourceId" - private def redisKeyForDatasetId(uploadId: String): String = - s"upload___${uploadId}___datasetId" - private def redisKeyForFilePaths(uploadId: String): String = - s"upload___${uploadId}___filePaths" - cleanUpOrphanUploads() override def dataBaseDir: Path = dataSourceService.dataBaseDir - def isKnownUploadByFileId(uploadFileId: String): Fox[Boolean] = isKnownUpload(extractDatasetUploadId(uploadFileId)) + def isKnownUploadByFileId(uploadFileId: String): Fox[Boolean] = + uploadMetadataStore.isKnownUpload(extractDatasetUploadId(uploadFileId)) def isKnownUpload(uploadId: String): Fox[Boolean] = - runningUploadMetadataStore.contains(redisKeyForFileCount(uploadId)) + uploadMetadataStore.isKnownUpload(uploadId) + + def getDatasetIdByUploadId(uploadId: String): Fox[ObjectId] = + uploadMetadataStore.getDatasetId(uploadId) def extractDatasetUploadId(uploadFileId: String): String = uploadFileId.split("/").headOption.getOrElse("") @@ -145,47 +124,27 @@ class UploadService @Inject()(dataSourceService: DataSourceService, private def uploadBackupDirectoryFor(organizationId: String, uploadId: String): Path = dataBaseDir.resolve(organizationId).resolve(trashDir).resolve(s"uploadBackup__$uploadId") - private def getDataSourceIdByUploadId(uploadId: String): Fox[DataSourceId] = - getObjectFromRedis[DataSourceId](redisKeyForDataSourceId(uploadId)) - - def getDatasetIdByUploadId(uploadId: String): Fox[ObjectId] = - getObjectFromRedis[ObjectId](redisKeyForDatasetId(uploadId)) - def reserveUpload(reserveUploadInfo: ReserveUploadInformation, - reserveUploadAdditionalInfo: ReserveAdditionalInformation): Fox[Unit] = + datasetId: ObjectId, + directoryName: String): Fox[Unit] = for { _ <- dataSourceService.assertDataDirWritable(reserveUploadInfo.organization) - newDataSourceId = DataSourceId(reserveUploadAdditionalInfo.directoryName, reserveUploadInfo.organization) - _ = logger.info( - f"Reserving ${uploadFullName(reserveUploadInfo.uploadId, reserveUploadAdditionalInfo.newDatasetId, newDataSourceId)}...") + uploadId = reserveUploadInfo.uploadId + newDataSourceId = DataSourceId(directoryName, reserveUploadInfo.organization) + _ = logger.info(f"Reserving ${uploadFullName(uploadId, datasetId, newDataSourceId)}...") _ <- Fox.fromBool( !reserveUploadInfo.needsConversion.getOrElse(false) || !reserveUploadInfo.layersToLink .exists(_.nonEmpty)) ?~> "Cannot use linked layers if the dataset needs conversion" - _ <- runningUploadMetadataStore.insert(redisKeyForFileCount(reserveUploadInfo.uploadId), - String.valueOf(reserveUploadInfo.totalFileCount)) - _ <- Fox.runOptional(reserveUploadInfo.totalFileSizeInBytes)( - runningUploadMetadataStore.insertLong(redisKeyForTotalFileSizeInBytes(reserveUploadInfo.uploadId), _)) - _ <- runningUploadMetadataStore.insert( - redisKeyForDataSourceId(reserveUploadInfo.uploadId), - Json.stringify(Json.toJson(newDataSourceId)) - ) - _ <- runningUploadMetadataStore.insert( - redisKeyForDatasetId(reserveUploadInfo.uploadId), - Json.stringify(Json.toJson(reserveUploadAdditionalInfo.newDatasetId)) - ) - _ <- runningUploadMetadataStore.insert( - redisKeyForUploadId(DataSourceId(reserveUploadAdditionalInfo.directoryName, reserveUploadInfo.organization)), - reserveUploadInfo.uploadId - ) - filePaths = Json.stringify(Json.toJson(reserveUploadInfo.filePaths.getOrElse(List.empty))) - _ <- runningUploadMetadataStore.insert(redisKeyForFilePaths(reserveUploadInfo.uploadId), filePaths) - _ <- runningUploadMetadataStore.insert( - redisKeyForLinkedLayerIdentifier(reserveUploadInfo.uploadId), - Json.stringify(Json.toJson(LinkedLayerIdentifiers(reserveUploadInfo.layersToLink))) - ) + _ <- uploadMetadataStore.insertDataSourceId(uploadId, newDataSourceId) + _ <- uploadMetadataStore.insertUploadIdByDataSourceId(newDataSourceId, uploadId) + _ <- uploadMetadataStore.insertDatasetId(uploadId, datasetId) + _ <- uploadMetadataStore.insertTotalFileCount(uploadId, reserveUploadInfo.totalFileCount) + _ <- uploadMetadataStore.insertTotalFileSizeInBytes(uploadId, reserveUploadInfo.totalFileSizeInBytes) + _ <- uploadMetadataStore.insertFilePaths(uploadId, reserveUploadInfo.filePaths) + _ <- uploadMetadataStore.insertLinkedLayerIdentifiers(uploadId, reserveUploadInfo.layersToLink) } yield () - def addUploadIdsToUnfinishedUploads( + def enrichUnfinishedUploadInfoWithUploadIds( unfinishedUploadsWithoutIds: List[UnfinishedUpload]): Future[List[UnfinishedUpload]] = for { maybeUnfinishedUploads: List[Box[Option[UnfinishedUpload]]] <- Fox.sequence( @@ -193,15 +152,12 @@ class UploadService @Inject()(dataSourceService: DataSourceService, unfinishedUploadsWithoutIds.map( unfinishedUpload => { for { - uploadIdOpt <- runningUploadMetadataStore.find(redisKeyForUploadId(unfinishedUpload.dataSourceId)) + uploadIdOpt <- uploadMetadataStore.getUploadIdByDataSourceId(unfinishedUpload.dataSourceId) updatedUploadOpt = uploadIdOpt.map(uploadId => unfinishedUpload.copy(uploadId = uploadId)) updatedUploadWithFilePathsOpt <- Fox.runOptional(updatedUploadOpt)(updatedUpload => for { - filePathsStringOpt <- runningUploadMetadataStore.find(redisKeyForFilePaths(updatedUpload.uploadId)) - filePathsOpt <- filePathsStringOpt.map(JsonHelper.parseAs[List[String]]).toFox - uploadUpdatedWithFilePaths <- filePathsOpt - .map(filePaths => updatedUpload.copy(filePaths = Some(filePaths))) - .toFox + filePaths <- uploadMetadataStore.getFilePaths(updatedUpload.uploadId) + uploadUpdatedWithFilePaths = updatedUpload.copy(filePaths = Some(filePaths)) } yield uploadUpdatedWithFilePaths) } yield updatedUploadWithFilePathsOpt } @@ -212,10 +168,10 @@ class UploadService @Inject()(dataSourceService: DataSourceService, private def isOutsideUploadDir(uploadDir: Path, filePath: String): Boolean = uploadDir.relativize(uploadDir.resolve(filePath)).startsWith("../") - private def getFilePathAndDirOfUploadId(uploadFileId: String): Fox[(String, Path)] = { + private def getFilePathAndDirForUploadFileId(uploadFileId: String): Fox[(String, Path)] = { val uploadId = extractDatasetUploadId(uploadFileId) for { - dataSourceId <- getDataSourceIdByUploadId(uploadId) + dataSourceId <- uploadMetadataStore.getDataSourceId(uploadId) uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId) filePathRaw = uploadFileId.split("/").tail.mkString("/") filePath = if (filePathRaw.charAt(0) == '/') filePathRaw.drop(1) else filePathRaw @@ -226,13 +182,11 @@ class UploadService @Inject()(dataSourceService: DataSourceService, def isChunkPresent(uploadFileId: String, currentChunkNumber: Long): Fox[Boolean] = { val uploadId = extractDatasetUploadId(uploadFileId) for { - (filePath, _) <- getFilePathAndDirOfUploadId(uploadFileId) - isFileKnown <- runningUploadMetadataStore.contains(redisKeyForFileChunkCount(uploadId, filePath)) - isFilesChunkSetKnown <- Fox.runIf(isFileKnown)( - runningUploadMetadataStore.contains(redisKeyForFileChunkSet(uploadId, filePath))) + (filePath, _) <- getFilePathAndDirForUploadFileId(uploadFileId) + isFileKnown <- uploadMetadataStore.isFileKnown(uploadId, filePath) + isFilesChunkSetKnown <- Fox.runIf(isFileKnown)(uploadMetadataStore.isFileChunkSetKnown(uploadId, filePath)) isChunkPresent <- Fox.runIf(isFileKnown)( - runningUploadMetadataStore.isContainedInSet(redisKeyForFileChunkSet(uploadId, filePath), - String.valueOf(currentChunkNumber))) + uploadMetadataStore.isChunkPresent(uploadId, filePath, currentChunkNumber)) } yield isFileKnown && isFilesChunkSetKnown.getOrElse(false) && isChunkPresent.getOrElse(false) } @@ -244,19 +198,17 @@ class UploadService @Inject()(dataSourceService: DataSourceService, chunkFile: File): Fox[Unit] = { val uploadId = extractDatasetUploadId(uploadFileId) for { - datasetId <- getDatasetIdByUploadId(uploadId) - dataSourceId <- getDataSourceIdByUploadId(uploadId) - (filePath, uploadDir) <- getFilePathAndDirOfUploadId(uploadFileId) - isFileKnown <- runningUploadMetadataStore.contains(redisKeyForFileChunkCount(uploadId, filePath)) + datasetId <- uploadMetadataStore.getDatasetId(uploadId) + dataSourceId <- uploadMetadataStore.getDataSourceId(uploadId) + (filePath, uploadDir) <- getFilePathAndDirForUploadFileId(uploadFileId) + isFileKnown <- uploadMetadataStore.isFileKnown(uploadId, filePath) _ <- Fox.runIf(!isFileKnown) { - runningUploadMetadataStore - .insertIntoSet(redisKeyForFileNameSet(uploadId), filePath) - .flatMap(_ => - runningUploadMetadataStore.insert(redisKeyForFileChunkCount(uploadId, filePath), - String.valueOf(totalChunkCount))) + for { + _ <- uploadMetadataStore.insertFilePathIntoSet(uploadId, filePath) + _ <- uploadMetadataStore.insertFileChunkCount(uploadId, filePath, totalChunkCount) + } yield () } - isNewChunk <- runningUploadMetadataStore.insertIntoSet(redisKeyForFileChunkSet(uploadId, filePath), - String.valueOf(currentChunkNumber)) + isNewChunk <- uploadMetadataStore.insertFileChunkIntoSet(uploadId, filePath, currentChunkNumber) _ <- Fox.runIf(isNewChunk) { try { val bytes = Files.readAllBytes(chunkFile.toPath) @@ -272,8 +224,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, Fox.successful(()) } catch { case e: Exception => - runningUploadMetadataStore.removeFromSet(redisKeyForFileChunkSet(uploadId, filePath), - String.valueOf(currentChunkNumber)) + uploadMetadataStore.removeFileChunkFromSet(uploadId, filePath, currentChunkNumber) val errorMsg = s"Error receiving chunk $currentChunkNumber for ${uploadFullName(uploadId, datasetId, dataSourceId)}: ${e.getMessage}" logger.warn(errorMsg) @@ -286,7 +237,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, def cancelUpload(cancelUploadInformation: CancelUploadInformation): Fox[Unit] = { val uploadId = cancelUploadInformation.uploadId for { - dataSourceId <- getDataSourceIdByUploadId(uploadId) + dataSourceId <- uploadMetadataStore.getDataSourceId(uploadId) datasetId <- getDatasetIdByUploadId(uploadId) knownUpload <- isKnownUpload(uploadId) } yield @@ -305,7 +256,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, val uploadId = uploadInformation.uploadId for { - dataSourceId <- getDataSourceIdByUploadId(uploadId) + dataSourceId <- uploadMetadataStore.getDataSourceId(uploadId) _ = logger.info(s"Finishing ${uploadFullName(uploadId, datasetId, dataSourceId)}...") linkedLayerIdentifiers <- getObjectFromRedis[LinkedLayerIdentifiers](redisKeyForLinkedLayerIdentifier(uploadId)) needsConversion = uploadInformation.needsConversion.getOrElse(false) @@ -759,27 +710,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, for { _ <- Fox.successful(logger.info(s"Cleaning up uploaded dataset. Reason: $reason")) _ <- PathUtils.deleteDirectoryRecursively(uploadDir).toFox - _ <- removeFromRedis(uploadId) - } yield () - - private def removeFromRedis(uploadId: String): Fox[Unit] = - for { - _ <- runningUploadMetadataStore.remove(redisKeyForFileCount(uploadId)) - fileNames <- runningUploadMetadataStore.findSet(redisKeyForFileNameSet(uploadId)) - _ <- Fox.serialCombined(fileNames.toList) { fileName => - for { - _ <- runningUploadMetadataStore.remove(redisKeyForFileChunkCount(uploadId, fileName)) - _ <- runningUploadMetadataStore.remove(redisKeyForFileChunkSet(uploadId, fileName)) - } yield () - } - _ <- runningUploadMetadataStore.remove(redisKeyForFileNameSet(uploadId)) - _ <- runningUploadMetadataStore.remove(redisKeyForTotalFileSizeInBytes(uploadId)) - dataSourceId <- getDataSourceIdByUploadId(uploadId) - _ <- runningUploadMetadataStore.remove(redisKeyForDataSourceId(uploadId)) - _ <- runningUploadMetadataStore.remove(redisKeyForDatasetId(uploadId)) - _ <- runningUploadMetadataStore.remove(redisKeyForLinkedLayerIdentifier(uploadId)) - _ <- runningUploadMetadataStore.remove(redisKeyForUploadId(dataSourceId)) - _ <- runningUploadMetadataStore.remove(redisKeyForFilePaths(uploadId)) + _ <- uploadMetadataStore.cleanUp(uploadId) } yield () private def cleanUpOrphanUploads(): Fox[Unit] = @@ -796,7 +727,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, for { uploadDirs <- PathUtils.listDirectories(orgaUploadingDir, silent = false).toFox _ <- Fox.serialCombined(uploadDirs) { uploadDir => - isKnownUpload(uploadDir.getFileName.toString).map { + uploadMetadataStore.isKnownUpload(uploadDir.getFileName.toString).map { case false => val deleteResult = PathUtils.deleteDirectoryRecursively(uploadDir) if (deleteResult.isDefined) { @@ -811,13 +742,6 @@ class UploadService @Inject()(dataSourceService: DataSourceService, } } - private def getObjectFromRedis[T: Reads](key: String): Fox[T] = - for { - objectStringOption <- runningUploadMetadataStore.find(key) - objectString <- objectStringOption.toFox - parsed <- JsonHelper.parseAs[T](objectString).toFox - } yield parsed - } object UploadedDataSourceType extends Enumeration { From 584f4c1ff08e862c8575632c8ea58cc9a8eccdcd Mon Sep 17 00:00:00 2001 From: Florian M Date: Tue, 31 Mar 2026 14:42:52 +0200 Subject: [PATCH 03/37] complete the extraction of redis interaction --- .../uploading/UploadMetadataStore.scala | 112 +++++++++--------- .../services/uploading/UploadService.scala | 44 +++---- .../storage/RedisTemporaryStore.scala | 27 +++-- 3 files changed, 95 insertions(+), 88 deletions(-) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala index a00dc9aaed7..ab08eeebf46 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala @@ -1,16 +1,16 @@ package com.scalableminds.webknossos.datastore.services.uploading import com.scalableminds.util.objectid.ObjectId -import com.scalableminds.util.tools.{Fox, FoxImplicits, JsonHelper} +import com.scalableminds.util.tools.{Fox, FoxImplicits} import com.scalableminds.webknossos.datastore.models.datasource.DataSourceId import com.scalableminds.webknossos.datastore.storage.DataStoreRedisStore -import play.api.libs.json.{Json, Reads, Writes} +import play.api.libs.json.Json import javax.inject.Inject import scala.concurrent.ExecutionContext -class UploadMetadataStore @Inject()(metadataRedisStore: DataStoreRedisStore) extends FoxImplicits { - // TODO parameterize this class by domain? (DS vs Mag vs Attachment?) +class UploadMetadataStore @Inject()(store: DataStoreRedisStore) extends FoxImplicits { + // TODO parameterize this class by domain? (DS vs Mag vs Attachment?) or even make it trait with three implementations? /* * Redis stores different information for each running upload, with different prefixes in the keys. @@ -47,99 +47,105 @@ class UploadMetadataStore @Inject()(metadataRedisStore: DataStoreRedisStore) ext s"upload___${uploadId}___filePaths" def isKnownUpload(uploadId: String): Fox[Boolean] = - metadataRedisStore.contains(redisKeyForFileCount(uploadId)) + store.contains(redisKeyForFileCount(uploadId)) def insertTotalFileCount(uploadId: String, totalFileCount: Long): Fox[Unit] = - metadataRedisStore.insert(redisKeyForFileCount(uploadId), String.valueOf(totalFileCount)) + store.insert(redisKeyForFileCount(uploadId), String.valueOf(totalFileCount)) def insertTotalFileSizeInBytes(uploadId: String, totalFileSizeInBytes: Option[Long])( implicit ec: ExecutionContext): Fox[Option[Unit]] = Fox.runOptional(totalFileSizeInBytes) { - metadataRedisStore.insertLong(redisKeyForTotalFileSizeInBytes(uploadId), _) + store.insertLong(redisKeyForTotalFileSizeInBytes(uploadId), _) } - def insertLinkedLayerIdentifiers(uploadId: String, linkedLayerIdentifiers: Option[Seq[LinkedLayerIdentifier]])( - implicit ec: ExecutionContext): Fox[_] = - insertSerialized(redisKeyForLinkedLayerIdentifier(uploadId), linkedLayerIdentifiers.getOrElse(Seq.empty)) + def insertLinkedLayerIdentifiers(uploadId: String, + linkedLayerIdentifiers: Option[Seq[LinkedLayerIdentifier]]): Fox[_] = + store.insertSerialized(redisKeyForLinkedLayerIdentifier(uploadId), linkedLayerIdentifiers.getOrElse(Seq.empty)) def getDataSourceId(uploadId: String)(implicit ec: ExecutionContext): Fox[DataSourceId] = - getParsed[DataSourceId](redisKeyForDataSourceId(uploadId)) + store.findParsed[DataSourceId](redisKeyForDataSourceId(uploadId)) def getDatasetId(uploadId: String)(implicit ec: ExecutionContext): Fox[ObjectId] = - getParsed[ObjectId](redisKeyForDatasetId(uploadId)) + store.findParsed[ObjectId](redisKeyForDatasetId(uploadId)) // TODO make this Fox[String]? def getUploadIdByDataSourceId(dataSourceId: DataSourceId): Fox[Option[String]] = - metadataRedisStore.find(redisKeyForUploadIdByDataSourceId(dataSourceId)) + store.find(redisKeyForUploadIdByDataSourceId(dataSourceId)) def getFilePaths(uploadId: String)(implicit ec: ExecutionContext): Fox[Seq[String]] = - getParsed[Seq[String]](redisKeyForFilePaths(uploadId)) + store.findParsed[Seq[String]](redisKeyForFilePaths(uploadId)) + + def getLinkedLayerIdentifiers(uploadId: String)(implicit ec: ExecutionContext): Fox[Seq[LinkedLayerIdentifier]] = + store.findParsed[Seq[LinkedLayerIdentifier]](redisKeyForLinkedLayerIdentifier(uploadId)) + + // TODO make this Fox[Long]? + def getTotalFileSizeInBytes(uploadId: String): Fox[Option[Long]] = + store.findLong(redisKeyForTotalFileSizeInBytes(uploadId)) + + def getFileCount(uploadId: String): Fox[Option[Long]] = + store.findLong(redisKeyForFileCount(uploadId)) + + def getFileNames(uploadId: String): Fox[Set[String]] = + store.findSet(redisKeyForFileNameSet(uploadId)) + + def getFileChunkCount(uploadId: String, filePath: String): Fox[Option[Long]] = + store.findLong(redisKeyForFileChunkCount(uploadId, filePath)) + + def getFileChunkSet(uploadId: String, filePath: String): Fox[Set[String]] = + store.findSet(redisKeyForFileChunkSet(uploadId, filePath)) def isFileKnown(uploadId: String, filePath: String): Fox[Boolean] = - metadataRedisStore.contains(redisKeyForFileChunkCount(uploadId, filePath)) + store.contains(redisKeyForFileChunkCount(uploadId, filePath)) def isFileChunkSetKnown(uploadId: String, filePath: String): Fox[Boolean] = - metadataRedisStore.contains(redisKeyForFileChunkSet(uploadId, filePath)) + store.contains(redisKeyForFileChunkSet(uploadId, filePath)) def isChunkPresent(uploadId: String, filePath: String, chunkNumber: Long): Fox[Boolean] = - metadataRedisStore.isContainedInSet(redisKeyForFileChunkSet(uploadId, filePath), String.valueOf(chunkNumber)) + store.isContainedInSet(redisKeyForFileChunkSet(uploadId, filePath), String.valueOf(chunkNumber)) def insertFilePathIntoSet(uploadId: String, filePath: String): Fox[Boolean] = - metadataRedisStore.insertIntoSet(redisKeyForFileNameSet(uploadId), filePath) + store.insertIntoSet(redisKeyForFileNameSet(uploadId), filePath) def insertFileChunkCount(uploadId: String, filePath: String, totalChunkCount: Long): Fox[Unit] = - metadataRedisStore.insert(redisKeyForFileChunkCount(uploadId, filePath), String.valueOf(totalChunkCount)) + store.insert(redisKeyForFileChunkCount(uploadId, filePath), String.valueOf(totalChunkCount)) def insertFileChunkIntoSet(uploadId: String, filePath: String, chunkNumber: Long): Fox[Boolean] = - metadataRedisStore.insertIntoSet(redisKeyForFileChunkSet(uploadId, filePath), String.valueOf(chunkNumber)) + store.insertIntoSet(redisKeyForFileChunkSet(uploadId, filePath), String.valueOf(chunkNumber)) def removeFileChunkFromSet(uploadId: String, filePath: String, chunkNumber: Long): Fox[Boolean] = - metadataRedisStore.removeFromSet(redisKeyForFileChunkSet(uploadId, filePath), String.valueOf(chunkNumber)) + store.removeFromSet(redisKeyForFileChunkSet(uploadId, filePath), String.valueOf(chunkNumber)) - def insertDatasetId(uploadId: String, datasetId: ObjectId)(implicit ec: ExecutionContext): Fox[Unit] = - insertSerialized(redisKeyForDatasetId(uploadId), datasetId) + def insertDatasetId(uploadId: String, datasetId: ObjectId): Fox[Unit] = + store.insertSerialized(redisKeyForDatasetId(uploadId), datasetId) - def insertDataSourceId(uploadId: String, dataSourceId: DataSourceId)(implicit ec: ExecutionContext): Fox[Unit] = - insertSerialized(redisKeyForDataSourceId(uploadId), dataSourceId) + def insertDataSourceId(uploadId: String, dataSourceId: DataSourceId): Fox[Unit] = + store.insertSerialized(redisKeyForDataSourceId(uploadId), dataSourceId) // Only here the uploadId is not key but value. This is used to re-connect to unfinished uploads. - def insertUploadIdByDataSourceId(dataSourceId: DataSourceId, uploadId: String)( - implicit ec: ExecutionContext): Fox[Unit] = - insertSerialized(redisKeyForUploadIdByDataSourceId(dataSourceId), uploadId) + def insertUploadIdByDataSourceId(dataSourceId: DataSourceId, uploadId: String): Fox[Unit] = + store.insertSerialized(redisKeyForUploadIdByDataSourceId(dataSourceId), uploadId) - def insertFilePaths(uploadId: String, filePaths: Option[Seq[String]])(implicit ec: ExecutionContext): Fox[Unit] = - insertSerialized(redisKeyForFilePaths(uploadId), filePaths.getOrElse(Seq.empty)) + def insertFilePaths(uploadId: String, filePaths: Option[Seq[String]]): Fox[Unit] = + store.insertSerialized(redisKeyForFilePaths(uploadId), filePaths.getOrElse(Seq.empty)) def cleanUp(uploadId: String)(implicit ec: ExecutionContext): Fox[Unit] = for { - _ <- metadataRedisStore.remove(redisKeyForFileCount(uploadId)) - fileNames <- metadataRedisStore.findSet(redisKeyForFileNameSet(uploadId)) + _ <- store.remove(redisKeyForFileCount(uploadId)) + fileNames <- store.findSet(redisKeyForFileNameSet(uploadId)) _ <- Fox.serialCombined(fileNames.toList) { fileName => for { - _ <- metadataRedisStore.remove(redisKeyForFileChunkCount(uploadId, fileName)) - _ <- metadataRedisStore.remove(redisKeyForFileChunkSet(uploadId, fileName)) + _ <- store.remove(redisKeyForFileChunkCount(uploadId, fileName)) + _ <- store.remove(redisKeyForFileChunkSet(uploadId, fileName)) } yield () } - _ <- metadataRedisStore.remove(redisKeyForFileNameSet(uploadId)) - _ <- metadataRedisStore.remove(redisKeyForTotalFileSizeInBytes(uploadId)) + _ <- store.remove(redisKeyForFileNameSet(uploadId)) + _ <- store.remove(redisKeyForTotalFileSizeInBytes(uploadId)) dataSourceId <- getDataSourceId(uploadId) - _ <- metadataRedisStore.remove(redisKeyForDataSourceId(uploadId)) - _ <- metadataRedisStore.remove(redisKeyForDatasetId(uploadId)) - _ <- metadataRedisStore.remove(redisKeyForLinkedLayerIdentifier(uploadId)) - _ <- metadataRedisStore.remove(redisKeyForUploadIdByDataSourceId(dataSourceId)) - _ <- metadataRedisStore.remove(redisKeyForFilePaths(uploadId)) + _ <- store.remove(redisKeyForDataSourceId(uploadId)) + _ <- store.remove(redisKeyForDatasetId(uploadId)) + _ <- store.remove(redisKeyForLinkedLayerIdentifier(uploadId)) + _ <- store.remove(redisKeyForUploadIdByDataSourceId(dataSourceId)) + _ <- store.remove(redisKeyForFilePaths(uploadId)) } yield () - private def getParsed[T: Reads](key: String)(implicit ec: ExecutionContext): Fox[T] = - for { - objectStringOption <- metadataRedisStore.find(key) - objectString <- objectStringOption.toFox - parsed <- JsonHelper.parseAs[T](objectString).toFox - } yield parsed - - private def insertSerialized[T: Writes](key: String, value: T)(implicit ec: ExecutionContext): Fox[Unit] = { - val serialized = Json.stringify(Json.toJson(value)) - metadataRedisStore.insert(key, serialized) - } - } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index 5fc7cdf395e..d7391bde3e4 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -21,10 +21,10 @@ import com.scalableminds.webknossos.datastore.models.UnfinishedUpload import com.scalableminds.webknossos.datastore.models.datasource.UsableDataSource.FILENAME_DATASOURCE_PROPERTIES_JSON import com.scalableminds.webknossos.datastore.models.datasource._ import com.scalableminds.webknossos.datastore.services.{DSRemoteWebknossosClient, DataSourceService, ManagedS3Service} -import com.scalableminds.webknossos.datastore.storage.{DataStoreRedisStore, DataVaultService} +import com.scalableminds.webknossos.datastore.storage.DataVaultService import com.typesafe.scalalogging.LazyLogging import org.apache.commons.io.FileUtils -import play.api.libs.json.{Json, OFormat, Reads} +import play.api.libs.json.{Json, OFormat} import software.amazon.awssdk.transfer.s3.model.UploadDirectoryRequest import java.io.{File, RandomAccessFile} @@ -74,11 +74,6 @@ object LinkedLayerIdentifier { implicit val jsonFormat: OFormat[LinkedLayerIdentifier] = Json.format[LinkedLayerIdentifier] } -case class LinkedLayerIdentifiers(layersToLink: Option[List[LinkedLayerIdentifier]]) -object LinkedLayerIdentifiers { - implicit val jsonFormat: OFormat[LinkedLayerIdentifiers] = Json.format[LinkedLayerIdentifiers] -} - case class UploadInformation(uploadId: String, needsConversion: Option[Boolean]) object UploadInformation { @@ -258,7 +253,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, for { dataSourceId <- uploadMetadataStore.getDataSourceId(uploadId) _ = logger.info(s"Finishing ${uploadFullName(uploadId, datasetId, dataSourceId)}...") - linkedLayerIdentifiers <- getObjectFromRedis[LinkedLayerIdentifiers](redisKeyForLinkedLayerIdentifier(uploadId)) + linkedLayerIdentifiers <- uploadMetadataStore.getLinkedLayerIdentifiers(uploadId) needsConversion = uploadInformation.needsConversion.getOrElse(false) uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId) _ <- backupRawUploadedData(uploadDir, uploadBackupDirectoryFor(dataSourceId.organizationId, uploadId), datasetId).toFox @@ -281,14 +276,13 @@ class UploadService @Inject()(dataSourceService: DataSourceService, label = s"processing dataset at $unpackToDir") datasetSizeBytes <- tryo(FileUtils.sizeOfDirectoryAsBigInteger(new File(unpackToDir.toString)).longValue).toFox ?~> "dataset.upload.measureTotalSize.failed" dataSourceWithAbsolutePathsOpt <- moveUnpackedToTarget(unpackToDir, needsConversion, datasetId, dataSourceId) ?~> "dataset.upload.moveUnpackedToTarget.failed" - _ <- remoteWebknossosClient.reportUpload( datasetId, ReportDatasetUploadParameters( uploadInformation.needsConversion.getOrElse(false), datasetSizeBytes, dataSourceWithAbsolutePathsOpt, - linkedLayerIdentifiers.layersToLink.getOrElse(List.empty) + linkedLayerIdentifiers ) ) ?~> "dataset.upload.reportUpload.failed" } yield () @@ -296,11 +290,11 @@ class UploadService @Inject()(dataSourceService: DataSourceService, private def checkWithinRequestedFileSize(uploadDir: Path, uploadId: String, datasetId: ObjectId): Fox[Unit] = for { - totalFileSizeInBytesOpt <- runningUploadMetadataStore.find(redisKeyForTotalFileSizeInBytes(uploadId)) ?~> "Could not look up reserved total file size" + totalFileSizeInBytesOpt <- uploadMetadataStore.getTotalFileSizeInBytes(uploadId) ?~> "Could not look up reserved total file size" _ <- totalFileSizeInBytesOpt.map { reservedFileSize => for { actualFileSize <- tryo(FileUtils.sizeOfDirectoryAsBigInteger(uploadDir.toFile).longValue).toFox ?~> "Could not measure actual file size" - _ <- if (actualFileSize > reservedFileSize.toLong) { + _ <- if (actualFileSize > reservedFileSize) { cleanUpDatasetExceedingSize(uploadDir, uploadId) Fox.failure( f"Uploaded dataset $datasetId exceeds the reserved size of $reservedFileSize bytes, got $actualFileSize bytes.") @@ -512,20 +506,18 @@ class UploadService @Inject()(dataSourceService: DataSourceService, private def checkAllChunksUploaded(uploadId: String): Fox[Unit] = for { - fileCountStringOpt <- runningUploadMetadataStore.find(redisKeyForFileCount(uploadId)) ?~> "Could not look up reserved file count" - fileCountString <- fileCountStringOpt.toFox ?~> "dataset.upload.noFiles" - fileCount <- tryo(fileCountString.toLong).toFox ?~> "Could not look up reserved file count (toLong)" - fileNames <- runningUploadMetadataStore.findSet(redisKeyForFileNameSet(uploadId)) ?~> "Could not look up reserved file names" - _ <- Fox.fromBool(fileCount == fileNames.size) - list <- Fox.serialCombined(fileNames.toList) { fileName => - val chunkCount = - runningUploadMetadataStore - .find(redisKeyForFileChunkCount(uploadId, fileName)) - .map(s => s.getOrElse("").toLong) - val chunks = runningUploadMetadataStore.findSet(redisKeyForFileChunkSet(uploadId, fileName)) - chunks.flatMap(set => chunkCount.map(_ == set.size)) - } ?~> "Could not look up reserved file sizes" - _ <- Fox.fromBool(list.forall(identity)) + fileCountOpt <- uploadMetadataStore.getFileCount(uploadId) ?~> "Could not look up reserved file count." + fileCount <- fileCountOpt.toFox ?~> "Could not look up reserved file count." + fileNames <- uploadMetadataStore.getFileNames(uploadId) ?~> "Could not look up reserved file names." + _ <- Fox.fromBool(fileCount == fileNames.size) ?~> "Reserved file count does not match file names length." + _ <- Fox.serialCombined(fileNames) { fileName => + for { + chunkCountOpt <- uploadMetadataStore.getFileChunkCount(uploadId, fileName) ?~> "Could not look up file chunk count." + chunkCount <- chunkCountOpt.toFox + chunkSet <- uploadMetadataStore.getFileChunkSet(uploadId, fileName) ?~> "Could not look up file chunk set." + _ <- Fox.fromBool(chunkCount == chunkSet.size) ?~> s"Chunks missing for uploaded file $fileName: expected $chunkCount, got ${chunkSet.size}." + } yield () + } } yield () private def unpackToDirFor(dataSourceId: DataSourceId): Path = diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/RedisTemporaryStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/RedisTemporaryStore.scala index ef6fa4dd269..a2afc42be79 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/RedisTemporaryStore.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/RedisTemporaryStore.scala @@ -1,13 +1,14 @@ package com.scalableminds.webknossos.datastore.storage import com.redis._ -import com.scalableminds.util.tools.Fox +import com.scalableminds.util.tools.{Fox, FoxImplicits, JsonHelper} import com.typesafe.scalalogging.LazyLogging +import play.api.libs.json.{Json, Reads, Writes} import scala.concurrent.ExecutionContext import scala.concurrent.duration.FiniteDuration -trait RedisTemporaryStore extends LazyLogging { +trait RedisTemporaryStore extends LazyLogging with FoxImplicits { implicit def ec: ExecutionContext protected def address: String protected def port: Int @@ -48,16 +49,12 @@ trait RedisTemporaryStore extends LazyLogging { def insert(id: String, value: String, expirationOpt: Option[FiniteDuration] = None): Fox[Unit] = withExceptionHandler { client => - expirationOpt - .map(expiration => client.setex(id, expiration.toSeconds, value)) - .getOrElse(client.set(id, value)) + expirationOpt.map(expiration => client.setex(id, expiration.toSeconds, value)).getOrElse(client.set(id, value)) } def insertLong(id: String, value: Long, expirationOpt: Option[FiniteDuration] = None): Fox[Unit] = withExceptionHandler { client => - expirationOpt - .map(expiration => client.setex(id, expiration.toSeconds, value)) - .getOrElse(client.set(id, value)) + expirationOpt.map(expiration => client.setex(id, expiration.toSeconds, value)).getOrElse(client.set(id, value)) } def contains(id: String): Fox[Boolean] = @@ -66,7 +63,7 @@ trait RedisTemporaryStore extends LazyLogging { def remove(id: String): Fox[Unit] = withExceptionHandler(_.del(id)) - def checkHealth(implicit ec: ExecutionContext): Fox[Unit] = + def checkHealth: Fox[Unit] = withExceptionHandler { client => val reply = client.ping if (!reply.contains("PONG")) throw new Exception(reply.getOrElse("No Reply")) @@ -85,6 +82,18 @@ trait RedisTemporaryStore extends LazyLogging { def findSet(id: String): Fox[Set[String]] = withExceptionHandler(_.smembers(id).map(_.flatten).getOrElse(Set.empty)) + def findParsed[T: Reads](key: String)(implicit ec: ExecutionContext): Fox[T] = + for { + objectStringOption <- find(key) + objectString <- objectStringOption.toFox + parsed <- JsonHelper.parseAs[T](objectString).toFox + } yield parsed + + def insertSerialized[T: Writes](key: String, value: T): Fox[Unit] = { + val serialized = Json.stringify(Json.toJson(value)) + insert(key, serialized) + } + private def withExceptionHandler[B](f: RedisClient => B): Fox[B] = try { r.withClient { client => From 0a9cc6906f973087e9b72992942006247ada5ceb Mon Sep 17 00:00:00 2001 From: Florian M Date: Tue, 31 Mar 2026 15:02:19 +0200 Subject: [PATCH 04/37] three metadata stores --- .../uploading/UploadMetadataStore.scala | 124 +++++++++++------- .../services/uploading/UploadService.scala | 30 ++--- 2 files changed, 89 insertions(+), 65 deletions(-) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala index ab08eeebf46..bdc4ac400d7 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala @@ -9,89 +9,67 @@ import play.api.libs.json.Json import javax.inject.Inject import scala.concurrent.ExecutionContext -class UploadMetadataStore @Inject()(store: DataStoreRedisStore) extends FoxImplicits { - // TODO parameterize this class by domain? (DS vs Mag vs Attachment?) or even make it trait with three implementations? +trait UploadMetadataStore extends FoxImplicits { + + protected def domain: String + protected def store: DataStoreRedisStore + + protected def keyPrefix = s"upload___${domain}___" /* * Redis stores different information for each running upload, with different prefixes in the keys. * Note that Redis synchronizes all db accesses, so we do not need to do it. */ private def redisKeyForFileCount(uploadId: String): String = - s"upload___${uploadId}___fileCount" + s"$keyPrefix${uploadId}___fileCount" private def redisKeyForTotalFileSizeInBytes(uploadId: String): String = - s"upload___${uploadId}___totalFileSizeInBytes" + s"$keyPrefix${uploadId}___totalFileSizeInBytes" private def redisKeyForFileNameSet(uploadId: String): String = - s"upload___${uploadId}___fileNameSet" - - private def redisKeyForLinkedLayerIdentifier(uploadId: String): String = - s"upload___${uploadId}___linkedLayerIdentifier" + s"$keyPrefix${uploadId}___fileNameSet" private def redisKeyForFileChunkCount(uploadId: String, fileName: String): String = - s"upload___${uploadId}___file___${fileName}___chunkCount" + s"$keyPrefix${uploadId}___file___${fileName}___chunkCount" private def redisKeyForFileChunkSet(uploadId: String, fileName: String): String = - s"upload___${uploadId}___file___${fileName}___chunkSet" - - private def redisKeyForUploadIdByDataSourceId(datasourceId: DataSourceId): String = - s"upload___${Json.stringify(Json.toJson(datasourceId))}___datasourceId" + s"$keyPrefix${uploadId}___file___${fileName}___chunkSet" private def redisKeyForDataSourceId(uploadId: String): String = - s"upload___${uploadId}___dataSourceId" + s"$keyPrefix${uploadId}___dataSourceId" private def redisKeyForDatasetId(uploadId: String): String = - s"upload___${uploadId}___datasetId" + s"$keyPrefix${uploadId}___datasetId" private def redisKeyForFilePaths(uploadId: String): String = - s"upload___${uploadId}___filePaths" + s"$keyPrefix${uploadId}___filePaths" def isKnownUpload(uploadId: String): Fox[Boolean] = store.contains(redisKeyForFileCount(uploadId)) - def insertTotalFileCount(uploadId: String, totalFileCount: Long): Fox[Unit] = - store.insert(redisKeyForFileCount(uploadId), String.valueOf(totalFileCount)) - - def insertTotalFileSizeInBytes(uploadId: String, totalFileSizeInBytes: Option[Long])( - implicit ec: ExecutionContext): Fox[Option[Unit]] = - Fox.runOptional(totalFileSizeInBytes) { - store.insertLong(redisKeyForTotalFileSizeInBytes(uploadId), _) - } - - def insertLinkedLayerIdentifiers(uploadId: String, - linkedLayerIdentifiers: Option[Seq[LinkedLayerIdentifier]]): Fox[_] = - store.insertSerialized(redisKeyForLinkedLayerIdentifier(uploadId), linkedLayerIdentifiers.getOrElse(Seq.empty)) - - def getDataSourceId(uploadId: String)(implicit ec: ExecutionContext): Fox[DataSourceId] = + def findDataSourceId(uploadId: String)(implicit ec: ExecutionContext): Fox[DataSourceId] = store.findParsed[DataSourceId](redisKeyForDataSourceId(uploadId)) - def getDatasetId(uploadId: String)(implicit ec: ExecutionContext): Fox[ObjectId] = + def findDatasetId(uploadId: String)(implicit ec: ExecutionContext): Fox[ObjectId] = store.findParsed[ObjectId](redisKeyForDatasetId(uploadId)) - // TODO make this Fox[String]? - def getUploadIdByDataSourceId(dataSourceId: DataSourceId): Fox[Option[String]] = - store.find(redisKeyForUploadIdByDataSourceId(dataSourceId)) - - def getFilePaths(uploadId: String)(implicit ec: ExecutionContext): Fox[Seq[String]] = + def findFilePaths(uploadId: String)(implicit ec: ExecutionContext): Fox[Seq[String]] = store.findParsed[Seq[String]](redisKeyForFilePaths(uploadId)) - def getLinkedLayerIdentifiers(uploadId: String)(implicit ec: ExecutionContext): Fox[Seq[LinkedLayerIdentifier]] = - store.findParsed[Seq[LinkedLayerIdentifier]](redisKeyForLinkedLayerIdentifier(uploadId)) - // TODO make this Fox[Long]? - def getTotalFileSizeInBytes(uploadId: String): Fox[Option[Long]] = + def findTotalFileSizeInBytes(uploadId: String): Fox[Option[Long]] = store.findLong(redisKeyForTotalFileSizeInBytes(uploadId)) - def getFileCount(uploadId: String): Fox[Option[Long]] = + def findFileCount(uploadId: String): Fox[Option[Long]] = store.findLong(redisKeyForFileCount(uploadId)) - def getFileNames(uploadId: String): Fox[Set[String]] = + def findFileNames(uploadId: String): Fox[Set[String]] = store.findSet(redisKeyForFileNameSet(uploadId)) - def getFileChunkCount(uploadId: String, filePath: String): Fox[Option[Long]] = + def findFileChunkCount(uploadId: String, filePath: String): Fox[Option[Long]] = store.findLong(redisKeyForFileChunkCount(uploadId, filePath)) - def getFileChunkSet(uploadId: String, filePath: String): Fox[Set[String]] = + def findFileChunkSet(uploadId: String, filePath: String): Fox[Set[String]] = store.findSet(redisKeyForFileChunkSet(uploadId, filePath)) def isFileKnown(uploadId: String, filePath: String): Fox[Boolean] = @@ -103,6 +81,15 @@ class UploadMetadataStore @Inject()(store: DataStoreRedisStore) extends FoxImpli def isChunkPresent(uploadId: String, filePath: String, chunkNumber: Long): Fox[Boolean] = store.isContainedInSet(redisKeyForFileChunkSet(uploadId, filePath), String.valueOf(chunkNumber)) + def insertTotalFileCount(uploadId: String, totalFileCount: Long): Fox[Unit] = + store.insert(redisKeyForFileCount(uploadId), String.valueOf(totalFileCount)) + + def insertTotalFileSizeInBytes(uploadId: String, totalFileSizeInBytes: Option[Long])( + implicit ec: ExecutionContext): Fox[Option[Unit]] = + Fox.runOptional(totalFileSizeInBytes) { + store.insertLong(redisKeyForTotalFileSizeInBytes(uploadId), _) + } + def insertFilePathIntoSet(uploadId: String, filePath: String): Fox[Boolean] = store.insertIntoSet(redisKeyForFileNameSet(uploadId), filePath) @@ -121,10 +108,6 @@ class UploadMetadataStore @Inject()(store: DataStoreRedisStore) extends FoxImpli def insertDataSourceId(uploadId: String, dataSourceId: DataSourceId): Fox[Unit] = store.insertSerialized(redisKeyForDataSourceId(uploadId), dataSourceId) - // Only here the uploadId is not key but value. This is used to re-connect to unfinished uploads. - def insertUploadIdByDataSourceId(dataSourceId: DataSourceId, uploadId: String): Fox[Unit] = - store.insertSerialized(redisKeyForUploadIdByDataSourceId(dataSourceId), uploadId) - def insertFilePaths(uploadId: String, filePaths: Option[Seq[String]]): Fox[Unit] = store.insertSerialized(redisKeyForFilePaths(uploadId), filePaths.getOrElse(Seq.empty)) @@ -140,12 +123,53 @@ class UploadMetadataStore @Inject()(store: DataStoreRedisStore) extends FoxImpli } _ <- store.remove(redisKeyForFileNameSet(uploadId)) _ <- store.remove(redisKeyForTotalFileSizeInBytes(uploadId)) - dataSourceId <- getDataSourceId(uploadId) _ <- store.remove(redisKeyForDataSourceId(uploadId)) _ <- store.remove(redisKeyForDatasetId(uploadId)) + _ <- store.remove(redisKeyForFilePaths(uploadId)) + } yield () + +} + +class DatasetUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) extends UploadMetadataStore { + protected val domain = "dataset" + + private def redisKeyForUploadIdByDataSourceId(datasourceId: DataSourceId): String = + s"upload___${Json.stringify(Json.toJson(datasourceId))}___datasourceId" + + private def redisKeyForLinkedLayerIdentifier(uploadId: String): String = + s"upload___${uploadId}___linkedLayerIdentifier" + + // TODO make this Fox[String]? + def findUploadIdByDataSourceId(dataSourceId: DataSourceId): Fox[Option[String]] = + store.find(redisKeyForUploadIdByDataSourceId(dataSourceId)) + + def findLinkedLayerIdentifiers(uploadId: String)(implicit ec: ExecutionContext): Fox[Seq[LinkedLayerIdentifier]] = + store.findParsed[Seq[LinkedLayerIdentifier]](redisKeyForLinkedLayerIdentifier(uploadId)) + + // Only here the uploadId is not key but value. This is used to re-connect to unfinished uploads. + def insertUploadIdByDataSourceId(dataSourceId: DataSourceId, uploadId: String): Fox[Unit] = + store.insertSerialized(redisKeyForUploadIdByDataSourceId(dataSourceId), uploadId) + + def insertLinkedLayerIdentifiers(uploadId: String, + linkedLayerIdentifiers: Option[Seq[LinkedLayerIdentifier]]): Fox[_] = + store.insertSerialized(redisKeyForLinkedLayerIdentifier(uploadId), linkedLayerIdentifiers.getOrElse(Seq.empty)) + + override def cleanUp(uploadId: String)(implicit ec: ExecutionContext): Fox[Unit] = + for { + dataSourceId <- findDataSourceId(uploadId) _ <- store.remove(redisKeyForLinkedLayerIdentifier(uploadId)) _ <- store.remove(redisKeyForUploadIdByDataSourceId(dataSourceId)) - _ <- store.remove(redisKeyForFilePaths(uploadId)) + _ <- super.cleanUp(uploadId) } yield () } + +class MagUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) extends UploadMetadataStore { + protected val domain = "mag" + +} + +class AttachmentUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) extends UploadMetadataStore { + protected val domain = "attachment" + +} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index d7391bde3e4..9821efe3a99 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -86,7 +86,7 @@ object CancelUploadInformation { } class UploadService @Inject()(dataSourceService: DataSourceService, - uploadMetadataStore: UploadMetadataStore, + uploadMetadataStore: DatasetUploadMetadataStore, dataVaultService: DataVaultService, exploreLocalLayerService: ExploreLocalLayerService, dataStoreConfig: DataStoreConfig, @@ -109,7 +109,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, uploadMetadataStore.isKnownUpload(uploadId) def getDatasetIdByUploadId(uploadId: String): Fox[ObjectId] = - uploadMetadataStore.getDatasetId(uploadId) + uploadMetadataStore.findDatasetId(uploadId) def extractDatasetUploadId(uploadFileId: String): String = uploadFileId.split("/").headOption.getOrElse("") @@ -147,11 +147,11 @@ class UploadService @Inject()(dataSourceService: DataSourceService, unfinishedUploadsWithoutIds.map( unfinishedUpload => { for { - uploadIdOpt <- uploadMetadataStore.getUploadIdByDataSourceId(unfinishedUpload.dataSourceId) + uploadIdOpt <- uploadMetadataStore.findUploadIdByDataSourceId(unfinishedUpload.dataSourceId) updatedUploadOpt = uploadIdOpt.map(uploadId => unfinishedUpload.copy(uploadId = uploadId)) updatedUploadWithFilePathsOpt <- Fox.runOptional(updatedUploadOpt)(updatedUpload => for { - filePaths <- uploadMetadataStore.getFilePaths(updatedUpload.uploadId) + filePaths <- uploadMetadataStore.findFilePaths(updatedUpload.uploadId) uploadUpdatedWithFilePaths = updatedUpload.copy(filePaths = Some(filePaths)) } yield uploadUpdatedWithFilePaths) } yield updatedUploadWithFilePathsOpt @@ -166,7 +166,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, private def getFilePathAndDirForUploadFileId(uploadFileId: String): Fox[(String, Path)] = { val uploadId = extractDatasetUploadId(uploadFileId) for { - dataSourceId <- uploadMetadataStore.getDataSourceId(uploadId) + dataSourceId <- uploadMetadataStore.findDataSourceId(uploadId) uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId) filePathRaw = uploadFileId.split("/").tail.mkString("/") filePath = if (filePathRaw.charAt(0) == '/') filePathRaw.drop(1) else filePathRaw @@ -193,8 +193,8 @@ class UploadService @Inject()(dataSourceService: DataSourceService, chunkFile: File): Fox[Unit] = { val uploadId = extractDatasetUploadId(uploadFileId) for { - datasetId <- uploadMetadataStore.getDatasetId(uploadId) - dataSourceId <- uploadMetadataStore.getDataSourceId(uploadId) + datasetId <- uploadMetadataStore.findDatasetId(uploadId) + dataSourceId <- uploadMetadataStore.findDataSourceId(uploadId) (filePath, uploadDir) <- getFilePathAndDirForUploadFileId(uploadFileId) isFileKnown <- uploadMetadataStore.isFileKnown(uploadId, filePath) _ <- Fox.runIf(!isFileKnown) { @@ -232,7 +232,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, def cancelUpload(cancelUploadInformation: CancelUploadInformation): Fox[Unit] = { val uploadId = cancelUploadInformation.uploadId for { - dataSourceId <- uploadMetadataStore.getDataSourceId(uploadId) + dataSourceId <- uploadMetadataStore.findDataSourceId(uploadId) datasetId <- getDatasetIdByUploadId(uploadId) knownUpload <- isKnownUpload(uploadId) } yield @@ -251,9 +251,9 @@ class UploadService @Inject()(dataSourceService: DataSourceService, val uploadId = uploadInformation.uploadId for { - dataSourceId <- uploadMetadataStore.getDataSourceId(uploadId) + dataSourceId <- uploadMetadataStore.findDataSourceId(uploadId) _ = logger.info(s"Finishing ${uploadFullName(uploadId, datasetId, dataSourceId)}...") - linkedLayerIdentifiers <- uploadMetadataStore.getLinkedLayerIdentifiers(uploadId) + linkedLayerIdentifiers <- uploadMetadataStore.findLinkedLayerIdentifiers(uploadId) needsConversion = uploadInformation.needsConversion.getOrElse(false) uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId) _ <- backupRawUploadedData(uploadDir, uploadBackupDirectoryFor(dataSourceId.organizationId, uploadId), datasetId).toFox @@ -290,7 +290,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, private def checkWithinRequestedFileSize(uploadDir: Path, uploadId: String, datasetId: ObjectId): Fox[Unit] = for { - totalFileSizeInBytesOpt <- uploadMetadataStore.getTotalFileSizeInBytes(uploadId) ?~> "Could not look up reserved total file size" + totalFileSizeInBytesOpt <- uploadMetadataStore.findTotalFileSizeInBytes(uploadId) ?~> "Could not look up reserved total file size" _ <- totalFileSizeInBytesOpt.map { reservedFileSize => for { actualFileSize <- tryo(FileUtils.sizeOfDirectoryAsBigInteger(uploadDir.toFile).longValue).toFox ?~> "Could not measure actual file size" @@ -506,15 +506,15 @@ class UploadService @Inject()(dataSourceService: DataSourceService, private def checkAllChunksUploaded(uploadId: String): Fox[Unit] = for { - fileCountOpt <- uploadMetadataStore.getFileCount(uploadId) ?~> "Could not look up reserved file count." + fileCountOpt <- uploadMetadataStore.findFileCount(uploadId) ?~> "Could not look up reserved file count." fileCount <- fileCountOpt.toFox ?~> "Could not look up reserved file count." - fileNames <- uploadMetadataStore.getFileNames(uploadId) ?~> "Could not look up reserved file names." + fileNames <- uploadMetadataStore.findFileNames(uploadId) ?~> "Could not look up reserved file names." _ <- Fox.fromBool(fileCount == fileNames.size) ?~> "Reserved file count does not match file names length." _ <- Fox.serialCombined(fileNames) { fileName => for { - chunkCountOpt <- uploadMetadataStore.getFileChunkCount(uploadId, fileName) ?~> "Could not look up file chunk count." + chunkCountOpt <- uploadMetadataStore.findFileChunkCount(uploadId, fileName) ?~> "Could not look up file chunk count." chunkCount <- chunkCountOpt.toFox - chunkSet <- uploadMetadataStore.getFileChunkSet(uploadId, fileName) ?~> "Could not look up file chunk set." + chunkSet <- uploadMetadataStore.findFileChunkSet(uploadId, fileName) ?~> "Could not look up file chunk set." _ <- Fox.fromBool(chunkCount == chunkSet.size) ?~> s"Chunks missing for uploaded file $fileName: expected $chunkCount, got ${chunkSet.size}." } yield () } From 93d657955b423ee1c821bce5161082e73f7f9caa Mon Sep 17 00:00:00 2001 From: Florian M Date: Wed, 1 Apr 2026 14:12:49 +0200 Subject: [PATCH 05/37] WIP uploadDomain --- .../controllers/DSLegacyApiController.scala | 12 +- .../controllers/DataSourceController.scala | 143 +------------- .../controllers/UploadController.scala | 180 ++++++++++++++++++ .../services/uploading/UploadDomain.scala | 8 + .../uploading/UploadMetadataStore.scala | 9 +- .../services/uploading/UploadService.scala | 106 +++++++---- .../conf/datastore.latest.routes | 14 +- 7 files changed, 283 insertions(+), 189 deletions(-) create mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala create mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadDomain.scala diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala index d4bf8cfb4fd..f1057e6c996 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala @@ -12,7 +12,11 @@ import com.scalableminds.webknossos.datastore.models.{ } import com.scalableminds.webknossos.datastore.models.datasource.{UnusableDataSource, UsableDataSource} import com.scalableminds.webknossos.datastore.services.mesh.FullMeshRequest -import com.scalableminds.webknossos.datastore.services.uploading.{LinkedLayerIdentifier, ReserveUploadInformation} +import com.scalableminds.webknossos.datastore.services.uploading.{ + LinkedLayerIdentifier, + ReserveUploadInformation, + UploadDomain +} import com.scalableminds.webknossos.datastore.services.{ DSRemoteWebknossosClient, DataSourceService, @@ -81,7 +85,8 @@ class DSLegacyApiController @Inject()( meshController: DSMeshController, dataSourceController: DataSourceController, dataSourceService: DataSourceService, - datasetCache: DatasetCache + datasetCache: DatasetCache, + uploadController: UploadController )(implicit ec: ExecutionContext, bodyParsers: PlayBodyParsers) extends Controller with Zarr3OutputHelper @@ -110,7 +115,8 @@ class DSLegacyApiController @Inject()( isVirtual = None, needsConversion = None ) - result <- Fox.fromFuture(dataSourceController.reserveUpload()(request.withBody(adaptedRequestBody))) + result <- Fox.fromFuture( + uploadController.reserveUpload(UploadDomain.dataset.toString)(request.withBody(adaptedRequestBody))) } yield result } } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala index 7fe80d1e4dc..b94173e07a3 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala @@ -31,7 +31,6 @@ import com.scalableminds.webknossos.datastore.services.mesh.{ MeshMappingHelper } import com.scalableminds.webknossos.datastore.services.segmentindex.SegmentIndexFileService -import com.scalableminds.webknossos.datastore.services.uploading._ import com.scalableminds.webknossos.datastore.services.connectome.{ ByAgglomerateIdsRequest, BySynapseIdsRequest, @@ -39,13 +38,9 @@ import com.scalableminds.webknossos.datastore.services.connectome.{ } import com.scalableminds.webknossos.datastore.services.mapping.AgglomerateService import com.scalableminds.webknossos.datastore.storage.DataVaultService -import com.scalableminds.webknossos.datastore.slacknotification.DSSlackNotificationService -import play.api.data.Form -import play.api.data.Forms.{longNumber, nonEmptyText, number, tuple} -import play.api.i18n.Messages -import play.api.libs.Files + import play.api.libs.json.{Json, OFormat} -import play.api.mvc.{Action, AnyContent, MultipartFormData, PlayBodyParsers} +import play.api.mvc.{Action, AnyContent, PlayBodyParsers} import java.io.File import java.net.URI @@ -71,11 +66,9 @@ class DataSourceController @Inject()( segmentIndexFileService: SegmentIndexFileService, agglomerateService: AgglomerateService, storageUsageService: DSUsedStorageService, - slackNotificationService: DSSlackNotificationService, datasetErrorLoggingService: DSDatasetErrorLoggingService, exploreRemoteLayerService: ExploreRemoteLayerService, fullMeshService: DSFullMeshService, - uploadService: UploadService, managedS3Service: ManagedS3Service, meshFileService: MeshFileService, dataVaultService: DataVaultService, @@ -105,138 +98,6 @@ class DataSourceController @Inject()( } } - def reserveUpload(): Action[ReserveUploadInformation] = - Action.async(validateJson[ReserveUploadInformation]) { implicit request => - accessTokenService.validateAccessFromTokenContext( - UserAccessRequest.administrateDatasets(request.body.organization)) { - for { - isKnownUpload <- uploadService.isKnownUpload(request.body.uploadId) - _ <- if (!isKnownUpload) { - for { - reserveUploadAdditionalInfo <- dsRemoteWebknossosClient.reserveDataSourceUpload(request.body) ?~> "dataset.upload.validation.failed" - _ <- uploadService.reserveUpload(request.body, - reserveUploadAdditionalInfo.newDatasetId, - reserveUploadAdditionalInfo.directoryName) - } yield () - } else Fox.successful(()) - } yield Ok - } - } - - def getUnfinishedUploads(organizationName: String): Action[AnyContent] = - Action.async { implicit request => - accessTokenService.validateAccessFromTokenContext(UserAccessRequest.administrateDatasets(organizationName)) { - for { - unfinishedUploads <- dsRemoteWebknossosClient.getUnfinishedUploadsForUser(organizationName) - unfinishedUploadsWithUploadIds <- Fox.fromFuture( - uploadService.enrichUnfinishedUploadInfoWithUploadIds(unfinishedUploads)) - unfinishedUploadsWithUploadIdsWithoutDataSourceId = unfinishedUploadsWithUploadIds.map(_.withoutDataSourceId) - } yield Ok(Json.toJson(unfinishedUploadsWithUploadIdsWithoutDataSourceId)) - } - } - - /* Upload a byte chunk for a new dataset - Expects: - - As file attachment: A raw byte chunk of the dataset - - As form parameter: - - name (string): dataset name - - owningOrganization (string): owning organization name - - resumableChunkNumber (int): chunk index - - resumableChunkSize (int): chunk size in bytes - - resumableTotalChunks (string): total chunk count of the upload - - totalFileCount (string): total file count of the upload - - resumableIdentifier (string): identifier of the resumable upload and file ("{uploadId}/{filepath}") - - As GET parameter: - - token (string): datastore token identifying the uploading user - */ - def uploadChunk(): Action[MultipartFormData[Files.TemporaryFile]] = - Action.async(parse.multipartFormData) { implicit request => - log(Some(slackNotificationService.noticeFailedUploadRequest)) { - val uploadForm = Form( - tuple( - "resumableChunkNumber" -> number, - "resumableChunkSize" -> number, - "resumableCurrentChunkSize" -> number, - "resumableTotalChunks" -> longNumber, - "resumableIdentifier" -> nonEmptyText - )).fill((-1, -1, -1, -1, "")) - - uploadForm - .bindFromRequest(request.body.dataParts) - .fold( - hasErrors = formWithErrors => Fox.successful(JsonBadRequest(formWithErrors.errors.head.message)), - success = { - case (chunkNumber, chunkSize, currentChunkSize, totalChunkCount, uploadFileId) => - for { - datasetId <- uploadService - .getDatasetIdByUploadId(uploadService.extractDatasetUploadId(uploadFileId)) ?~> "dataset.upload.validation.failed" - result <- accessTokenService - .validateAccessFromTokenContext(UserAccessRequest.writeDataset(datasetId)) { - for { - isKnownUpload <- uploadService.isKnownUploadByFileId(uploadFileId) - _ <- Fox.fromBool(isKnownUpload) ?~> "dataset.upload.validation.failed" - chunkFile <- request.body.file("file").toFox ?~> "zip.file.notFound" - _ <- uploadService.handleUploadChunk(uploadFileId, - chunkSize, - currentChunkSize, - totalChunkCount, - chunkNumber, - new File(chunkFile.ref.path.toString)) - } yield Ok - } - } yield result - } - ) - } - } - - def testChunk(resumableChunkNumber: Int, resumableIdentifier: String): Action[AnyContent] = - Action.async { implicit request => - for { - datasetId <- uploadService.getDatasetIdByUploadId(uploadService.extractDatasetUploadId(resumableIdentifier)) ?~> "dataset.upload.validation.failed" - result <- accessTokenService.validateAccessFromTokenContext(UserAccessRequest.writeDataset(datasetId)) { - for { - isKnownUpload <- uploadService.isKnownUploadByFileId(resumableIdentifier) - _ <- Fox.fromBool(isKnownUpload) ?~> "dataset.upload.validation.failed" - isPresent <- uploadService.isChunkPresent(resumableIdentifier, resumableChunkNumber) - } yield if (isPresent) Ok else NoContent - } - } yield result - } - - def finishUpload(): Action[UploadInformation] = Action.async(validateJson[UploadInformation]) { implicit request => - log(Some(slackNotificationService.noticeFailedUploadRequest)) { - logTime(slackNotificationService.noticeSlowRequest) { - for { - datasetId <- uploadService - .getDatasetIdByUploadId(request.body.uploadId) ?~> s"Cannot find running upload with upload id ${request.body.uploadId}" - response <- accessTokenService.validateAccessFromTokenContext(UserAccessRequest.writeDataset(datasetId)) { - for { - _ <- uploadService.finishUpload(request.body, datasetId) ?~> Messages("dataset.upload.finishFailed", - datasetId) - } yield Ok(Json.obj("newDatasetId" -> datasetId)) - } - } yield response - } - } - } - - def cancelUpload(): Action[CancelUploadInformation] = - Action.async(validateJson[CancelUploadInformation]) { implicit request => - val datasetIdFox = uploadService.isKnownUpload(request.body.uploadId).flatMap { - case false => Fox.failure("dataset.upload.validation.failed") - case true => uploadService.getDatasetIdByUploadId(request.body.uploadId) - } - datasetIdFox.flatMap { datasetId => - accessTokenService.validateAccessFromTokenContext(UserAccessRequest.deleteDataset(datasetId)) { - for { - _ <- dsRemoteWebknossosClient.deleteDataset(datasetId) ?~> "dataset.delete.webknossos.failed" - _ <- uploadService.cancelUpload(request.body) ?~> "Could not cancel the upload." - } yield Ok - } - } - } - def listMappings( datasetId: ObjectId, dataLayerName: String diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala new file mode 100644 index 00000000000..f5b827df039 --- /dev/null +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala @@ -0,0 +1,180 @@ +package com.scalableminds.webknossos.datastore.controllers + +import com.scalableminds.util.tools.Fox +import com.scalableminds.webknossos.datastore.services.{AccessTokenService, DSRemoteWebknossosClient, UserAccessRequest} +import com.scalableminds.webknossos.datastore.services.uploading.{ + CancelUploadInformation, + ReserveUploadInformation, + UploadDomain, + UploadInformation, + UploadService +} +import com.scalableminds.webknossos.datastore.slacknotification.DSSlackNotificationService +import play.api.data.Form +import play.api.data.Forms.tuple +import play.api.i18n.Messages +import play.api.libs.Files +import play.api.libs.json.Json +import play.api.mvc.{Action, AnyContent, MultipartFormData, PlayBodyParsers} +import play.api.data.Forms.{longNumber, nonEmptyText, number} + +import java.io.File +import javax.inject.Inject +import scala.concurrent.ExecutionContext + +class UploadController @Inject()( + accessTokenService: AccessTokenService, + uploadService: UploadService, + dsRemoteWebknossosClient: DSRemoteWebknossosClient, + slackNotificationService: DSSlackNotificationService)(implicit bodyParsers: PlayBodyParsers, ec: ExecutionContext) + extends Controller { + + def reserveUpload(uploadDomain: String): Action[ReserveUploadInformation] = + Action.async(validateJson[ReserveUploadInformation]) { implicit request => + accessTokenService.validateAccessFromTokenContext( + UserAccessRequest.administrateDatasets(request.body.organization)) { + for { + uploadDomainValidated <- UploadDomain.fromString(uploadDomain).toFox + isKnownUpload <- uploadService.isKnownUpload(request.body.uploadId, uploadDomainValidated) + _ <- if (!isKnownUpload) { + for { + reserveUploadAdditionalInfo <- dsRemoteWebknossosClient.reserveDataSourceUpload(request.body) ?~> "dataset.upload.validation.failed" + _ <- uploadService.reserveUpload(request.body, + reserveUploadAdditionalInfo.newDatasetId, + reserveUploadAdditionalInfo.directoryName, + uploadDomainValidated) + } yield () + } else Fox.successful(()) + } yield Ok + } + } + + def getUnfinishedUploads(organizationName: String, uploadDomain: String): Action[AnyContent] = + Action.async { implicit request => + accessTokenService.validateAccessFromTokenContext(UserAccessRequest.administrateDatasets(organizationName)) { + for { + uploadDomainValidated <- UploadDomain.fromString(uploadDomain).toFox + _ <- Fox.fromBool(uploadDomainValidated == UploadDomain.dataset) ?~> "Listing unfinished downloads is only supported for datasets." + unfinishedUploads <- dsRemoteWebknossosClient.getUnfinishedUploadsForUser(organizationName) + unfinishedUploadsWithUploadIds <- Fox.fromFuture( + uploadService.enrichUnfinishedUploadInfoWithUploadIds(unfinishedUploads)) + unfinishedUploadsWithUploadIdsWithoutDataSourceId = unfinishedUploadsWithUploadIds.map(_.withoutDataSourceId) + } yield Ok(Json.toJson(unfinishedUploadsWithUploadIdsWithoutDataSourceId)) + } + } + + /* Upload a byte chunk for a new dataset + Expects: + - As file attachment: A raw byte chunk of the dataset + - As form parameter: + - name (string): dataset name + - owningOrganization (string): owning organization name + - resumableChunkNumber (int): chunk index + - resumableChunkSize (int): chunk size in bytes + - resumableTotalChunks (string): total chunk count of the upload + - totalFileCount (string): total file count of the upload + - resumableIdentifier (string): identifier of the resumable upload and file ("{uploadId}/{filepath}") + - As GET parameter: + - token (string): datastore token identifying the uploading user + */ + def uploadChunk(uploadDomain: String): Action[MultipartFormData[Files.TemporaryFile]] = + Action.async(parse.multipartFormData) { implicit request => + log(Some(slackNotificationService.noticeFailedUploadRequest)) { + val uploadForm = Form( + tuple( + "resumableChunkNumber" -> number, + "resumableChunkSize" -> number, + "resumableCurrentChunkSize" -> number, + "resumableTotalChunks" -> longNumber, + "resumableIdentifier" -> nonEmptyText + )).fill((-1, -1, -1, -1, "")) + + uploadForm + .bindFromRequest(request.body.dataParts) + .fold( + hasErrors = formWithErrors => Fox.successful(JsonBadRequest(formWithErrors.errors.head.message)), + success = { + case (chunkNumber, chunkSize, currentChunkSize, totalChunkCount, uploadFileId) => + for { + uploadDomainValidated <- UploadDomain.fromString(uploadDomain).toFox + datasetId <- uploadService.getDatasetIdByUploadId( + uploadService.extractDatasetUploadId(uploadFileId), + uploadDomainValidated) ?~> "dataset.upload.validation.failed" + result <- accessTokenService + .validateAccessFromTokenContext(UserAccessRequest.writeDataset(datasetId)) { + for { + isKnownUpload <- uploadService.isKnownUploadByFileId(uploadFileId, uploadDomainValidated) + _ <- Fox.fromBool(isKnownUpload) ?~> "dataset.upload.validation.failed" + chunkFile <- request.body.file("file").toFox ?~> "zip.file.notFound" + _ <- uploadService.handleUploadChunk(uploadFileId, + chunkSize, + currentChunkSize, + totalChunkCount, + chunkNumber, + new File(chunkFile.ref.path.toString), + uploadDomainValidated) + } yield Ok + } + } yield result + } + ) + } + } + + def testChunk(resumableChunkNumber: Int, resumableIdentifier: String, uploadDomain: String): Action[AnyContent] = + Action.async { implicit request => + for { + uploadDomainValidated <- UploadDomain.fromString(uploadDomain).toFox + datasetId <- uploadService.getDatasetIdByUploadId(uploadService.extractDatasetUploadId(resumableIdentifier), + uploadDomainValidated) ?~> "dataset.upload.validation.failed" + result <- accessTokenService.validateAccessFromTokenContext(UserAccessRequest.writeDataset(datasetId)) { + for { + isKnownUpload <- uploadService.isKnownUploadByFileId(resumableIdentifier, uploadDomainValidated) + _ <- Fox.fromBool(isKnownUpload) ?~> "dataset.upload.validation.failed" + isPresent <- uploadService.isChunkPresent(resumableIdentifier, resumableChunkNumber, uploadDomainValidated) + } yield if (isPresent) Ok else NoContent + } + } yield result + } + + def finishUpload(uploadDomain: String): Action[UploadInformation] = Action.async(validateJson[UploadInformation]) { + implicit request => + log(Some(slackNotificationService.noticeFailedUploadRequest)) { + logTime(slackNotificationService.noticeSlowRequest) { + for { + uploadDomainValidated <- UploadDomain.fromString(uploadDomain).toFox + datasetId <- uploadService + .getDatasetIdByUploadId(request.body.uploadId, uploadDomainValidated) ?~> s"Cannot find running upload with upload id ${request.body.uploadId}" + response <- accessTokenService.validateAccessFromTokenContext(UserAccessRequest.writeDataset(datasetId)) { + for { + // TODO other domains + _ <- uploadService.finishDatasetUpload(request.body, datasetId) ?~> Messages( + "dataset.upload.finishFailed", + datasetId) + } yield Ok(Json.obj("newDatasetId" -> datasetId)) + } + } yield response + } + } + } + + def cancelUpload(uploadDomain: String): Action[CancelUploadInformation] = + Action.async(validateJson[CancelUploadInformation]) { implicit request => + for { + uploadDomainValidated <- UploadDomain.fromString(uploadDomain).toFox + datasetIdFox = uploadService.isKnownUpload(request.body.uploadId, uploadDomainValidated).flatMap { + case false => Fox.failure("dataset.upload.validation.failed") + case true => uploadService.getDatasetIdByUploadId(request.body.uploadId, uploadDomainValidated) + } + result <- datasetIdFox.flatMap { datasetId => + accessTokenService.validateAccessFromTokenContext(UserAccessRequest.deleteDataset(datasetId)) { + for { + _ <- dsRemoteWebknossosClient.deleteDataset(datasetId) ?~> "dataset.delete.webknossos.failed" + _ <- uploadService.cancelUpload(request.body, uploadDomainValidated) ?~> "Could not cancel the upload." + } yield Ok + } + } + } yield result + } + +} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadDomain.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadDomain.scala new file mode 100644 index 00000000000..3a203f19541 --- /dev/null +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadDomain.scala @@ -0,0 +1,8 @@ +package com.scalableminds.webknossos.datastore.services.uploading + +import com.scalableminds.util.enumeration.ExtendedEnumeration; + +object UploadDomain extends ExtendedEnumeration { + type UploadDomain = Value + val dataset, mag, attachment = Value +} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala index bdc4ac400d7..d9b24e0a7e0 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala @@ -3,6 +3,7 @@ package com.scalableminds.webknossos.datastore.services.uploading import com.scalableminds.util.objectid.ObjectId import com.scalableminds.util.tools.{Fox, FoxImplicits} import com.scalableminds.webknossos.datastore.models.datasource.DataSourceId +import com.scalableminds.webknossos.datastore.services.uploading.UploadDomain.UploadDomain import com.scalableminds.webknossos.datastore.storage.DataStoreRedisStore import play.api.libs.json.Json @@ -11,7 +12,7 @@ import scala.concurrent.ExecutionContext trait UploadMetadataStore extends FoxImplicits { - protected def domain: String + protected def domain: UploadDomain protected def store: DataStoreRedisStore protected def keyPrefix = s"upload___${domain}___" @@ -131,7 +132,7 @@ trait UploadMetadataStore extends FoxImplicits { } class DatasetUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) extends UploadMetadataStore { - protected val domain = "dataset" + protected val domain = UploadDomain.dataset private def redisKeyForUploadIdByDataSourceId(datasourceId: DataSourceId): String = s"upload___${Json.stringify(Json.toJson(datasourceId))}___datasourceId" @@ -165,11 +166,11 @@ class DatasetUploadMetadataStore @Inject()(protected val store: DataStoreRedisSt } class MagUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) extends UploadMetadataStore { - protected val domain = "mag" + protected val domain: UploadDomain = UploadDomain.mag } class AttachmentUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) extends UploadMetadataStore { - protected val domain = "attachment" + protected val domain: UploadDomain = UploadDomain.attachment } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index 9821efe3a99..57b5a4d34cd 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -20,6 +20,7 @@ import com.scalableminds.webknossos.datastore.helpers.{DatasetDeleter, Directory import com.scalableminds.webknossos.datastore.models.UnfinishedUpload import com.scalableminds.webknossos.datastore.models.datasource.UsableDataSource.FILENAME_DATASOURCE_PROPERTIES_JSON import com.scalableminds.webknossos.datastore.models.datasource._ +import com.scalableminds.webknossos.datastore.services.uploading.UploadDomain.UploadDomain import com.scalableminds.webknossos.datastore.services.{DSRemoteWebknossosClient, DataSourceService, ManagedS3Service} import com.scalableminds.webknossos.datastore.storage.DataVaultService import com.typesafe.scalalogging.LazyLogging @@ -86,7 +87,9 @@ object CancelUploadInformation { } class UploadService @Inject()(dataSourceService: DataSourceService, - uploadMetadataStore: DatasetUploadMetadataStore, + datasetUploadMetadataStore: DatasetUploadMetadataStore, + magUploadMetadataStore: MagUploadMetadataStore, + attachmentUploadMetadataStore: AttachmentUploadMetadataStore, dataVaultService: DataVaultService, exploreLocalLayerService: ExploreLocalLayerService, dataStoreConfig: DataStoreConfig, @@ -100,16 +103,22 @@ class UploadService @Inject()(dataSourceService: DataSourceService, cleanUpOrphanUploads() + private def selectUploadMetadataStore(uploadDomain: UploadDomain) = uploadDomain match { + case UploadDomain.dataset => datasetUploadMetadataStore + case UploadDomain.mag => magUploadMetadataStore + case UploadDomain.attachment => attachmentUploadMetadataStore + } + override def dataBaseDir: Path = dataSourceService.dataBaseDir - def isKnownUploadByFileId(uploadFileId: String): Fox[Boolean] = - uploadMetadataStore.isKnownUpload(extractDatasetUploadId(uploadFileId)) + def isKnownUploadByFileId(uploadFileId: String, uploadDomain: UploadDomain): Fox[Boolean] = + selectUploadMetadataStore(uploadDomain).isKnownUpload(extractDatasetUploadId(uploadFileId)) - def isKnownUpload(uploadId: String): Fox[Boolean] = - uploadMetadataStore.isKnownUpload(uploadId) + def isKnownUpload(uploadId: String, uploadDomain: UploadDomain): Fox[Boolean] = + selectUploadMetadataStore(uploadDomain).isKnownUpload(uploadId) - def getDatasetIdByUploadId(uploadId: String): Fox[ObjectId] = - uploadMetadataStore.findDatasetId(uploadId) + def getDatasetIdByUploadId(uploadId: String, uploadDomain: UploadDomain): Fox[ObjectId] = + selectUploadMetadataStore(uploadDomain).findDatasetId(uploadId) def extractDatasetUploadId(uploadFileId: String): String = uploadFileId.split("/").headOption.getOrElse("") @@ -121,7 +130,8 @@ class UploadService @Inject()(dataSourceService: DataSourceService, def reserveUpload(reserveUploadInfo: ReserveUploadInformation, datasetId: ObjectId, - directoryName: String): Fox[Unit] = + directoryName: String, + uploadDomain: UploadDomain): Fox[Unit] = for { _ <- dataSourceService.assertDataDirWritable(reserveUploadInfo.organization) uploadId = reserveUploadInfo.uploadId @@ -130,13 +140,18 @@ class UploadService @Inject()(dataSourceService: DataSourceService, _ <- Fox.fromBool( !reserveUploadInfo.needsConversion.getOrElse(false) || !reserveUploadInfo.layersToLink .exists(_.nonEmpty)) ?~> "Cannot use linked layers if the dataset needs conversion" + uploadMetadataStore = selectUploadMetadataStore(uploadDomain) _ <- uploadMetadataStore.insertDataSourceId(uploadId, newDataSourceId) - _ <- uploadMetadataStore.insertUploadIdByDataSourceId(newDataSourceId, uploadId) _ <- uploadMetadataStore.insertDatasetId(uploadId, datasetId) _ <- uploadMetadataStore.insertTotalFileCount(uploadId, reserveUploadInfo.totalFileCount) _ <- uploadMetadataStore.insertTotalFileSizeInBytes(uploadId, reserveUploadInfo.totalFileSizeInBytes) _ <- uploadMetadataStore.insertFilePaths(uploadId, reserveUploadInfo.filePaths) - _ <- uploadMetadataStore.insertLinkedLayerIdentifiers(uploadId, reserveUploadInfo.layersToLink) + _ <- Fox.runIf(uploadDomain == UploadDomain.dataset) { + datasetUploadMetadataStore.insertUploadIdByDataSourceId(newDataSourceId, uploadId) + } + _ <- Fox.runIf(uploadDomain == UploadDomain.dataset) { + datasetUploadMetadataStore.insertLinkedLayerIdentifiers(uploadId, reserveUploadInfo.layersToLink) + } } yield () def enrichUnfinishedUploadInfoWithUploadIds( @@ -147,11 +162,11 @@ class UploadService @Inject()(dataSourceService: DataSourceService, unfinishedUploadsWithoutIds.map( unfinishedUpload => { for { - uploadIdOpt <- uploadMetadataStore.findUploadIdByDataSourceId(unfinishedUpload.dataSourceId) + uploadIdOpt <- datasetUploadMetadataStore.findUploadIdByDataSourceId(unfinishedUpload.dataSourceId) updatedUploadOpt = uploadIdOpt.map(uploadId => unfinishedUpload.copy(uploadId = uploadId)) updatedUploadWithFilePathsOpt <- Fox.runOptional(updatedUploadOpt)(updatedUpload => for { - filePaths <- uploadMetadataStore.findFilePaths(updatedUpload.uploadId) + filePaths <- datasetUploadMetadataStore.findFilePaths(updatedUpload.uploadId) uploadUpdatedWithFilePaths = updatedUpload.copy(filePaths = Some(filePaths)) } yield uploadUpdatedWithFilePaths) } yield updatedUploadWithFilePathsOpt @@ -163,7 +178,9 @@ class UploadService @Inject()(dataSourceService: DataSourceService, private def isOutsideUploadDir(uploadDir: Path, filePath: String): Boolean = uploadDir.relativize(uploadDir.resolve(filePath)).startsWith("../") - private def getFilePathAndDirForUploadFileId(uploadFileId: String): Fox[(String, Path)] = { + private def getFilePathAndDirForUploadFileId(uploadFileId: String, + uploadDomain: UploadDomain): Fox[(String, Path)] = { + val uploadMetadataStore = selectUploadMetadataStore(uploadDomain) val uploadId = extractDatasetUploadId(uploadFileId) for { dataSourceId <- uploadMetadataStore.findDataSourceId(uploadId) @@ -174,10 +191,11 @@ class UploadService @Inject()(dataSourceService: DataSourceService, } yield (filePath, uploadDir) } - def isChunkPresent(uploadFileId: String, currentChunkNumber: Long): Fox[Boolean] = { + def isChunkPresent(uploadFileId: String, currentChunkNumber: Long, uploadDomain: UploadDomain): Fox[Boolean] = { + val uploadMetadataStore = selectUploadMetadataStore(uploadDomain) val uploadId = extractDatasetUploadId(uploadFileId) for { - (filePath, _) <- getFilePathAndDirForUploadFileId(uploadFileId) + (filePath, _) <- getFilePathAndDirForUploadFileId(uploadFileId, uploadDomain) isFileKnown <- uploadMetadataStore.isFileKnown(uploadId, filePath) isFilesChunkSetKnown <- Fox.runIf(isFileKnown)(uploadMetadataStore.isFileChunkSetKnown(uploadId, filePath)) isChunkPresent <- Fox.runIf(isFileKnown)( @@ -190,12 +208,14 @@ class UploadService @Inject()(dataSourceService: DataSourceService, currentChunkSize: Long, totalChunkCount: Long, currentChunkNumber: Long, - chunkFile: File): Fox[Unit] = { + chunkFile: File, + uploadDomain: UploadDomain): Fox[Unit] = { + val uploadMetadataStore = selectUploadMetadataStore(uploadDomain) val uploadId = extractDatasetUploadId(uploadFileId) for { datasetId <- uploadMetadataStore.findDatasetId(uploadId) dataSourceId <- uploadMetadataStore.findDataSourceId(uploadId) - (filePath, uploadDir) <- getFilePathAndDirForUploadFileId(uploadFileId) + (filePath, uploadDir) <- getFilePathAndDirForUploadFileId(uploadFileId, uploadDomain) isFileKnown <- uploadMetadataStore.isFileKnown(uploadId, filePath) _ <- Fox.runIf(!isFileKnown) { for { @@ -229,40 +249,43 @@ class UploadService @Inject()(dataSourceService: DataSourceService, } yield () } - def cancelUpload(cancelUploadInformation: CancelUploadInformation): Fox[Unit] = { + def cancelUpload(cancelUploadInformation: CancelUploadInformation, uploadDomain: UploadDomain): Fox[Unit] = { + val uploadMetadataStore = selectUploadMetadataStore(uploadDomain) val uploadId = cancelUploadInformation.uploadId for { dataSourceId <- uploadMetadataStore.findDataSourceId(uploadId) - datasetId <- getDatasetIdByUploadId(uploadId) - knownUpload <- isKnownUpload(uploadId) + datasetId <- uploadMetadataStore.findDatasetId(uploadId) + knownUpload <- uploadMetadataStore.isKnownUpload(uploadId) } yield if (knownUpload) { logger.info(f"Cancelling ${uploadFullName(uploadId, datasetId, dataSourceId)}...") cleanUpUploadedDataset(uploadDirectoryFor(dataSourceId.organizationId, uploadId), uploadId, - reason = "Cancelled by user") + reason = "Cancelled by user", + uploadDomain) } else Fox.failure(s"Unknown upload") } private def uploadFullName(uploadId: String, datasetId: ObjectId, dataSourceId: DataSourceId) = s"upload $uploadId of dataset $datasetId ($dataSourceId)" - def finishUpload(uploadInformation: UploadInformation, datasetId: ObjectId)(implicit tc: TokenContext): Fox[Unit] = { + def finishDatasetUpload(uploadInformation: UploadInformation, datasetId: ObjectId)( + implicit tc: TokenContext): Fox[Unit] = { val uploadId = uploadInformation.uploadId for { - dataSourceId <- uploadMetadataStore.findDataSourceId(uploadId) + dataSourceId <- datasetUploadMetadataStore.findDataSourceId(uploadId) _ = logger.info(s"Finishing ${uploadFullName(uploadId, datasetId, dataSourceId)}...") - linkedLayerIdentifiers <- uploadMetadataStore.findLinkedLayerIdentifiers(uploadId) + linkedLayerIdentifiers <- datasetUploadMetadataStore.findLinkedLayerIdentifiers(uploadId) needsConversion = uploadInformation.needsConversion.getOrElse(false) uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId) _ <- backupRawUploadedData(uploadDir, uploadBackupDirectoryFor(dataSourceId.organizationId, uploadId), datasetId).toFox - _ <- checkWithinRequestedFileSize(uploadDir, uploadId, datasetId) ?~> "dataset.upload.fileSizeCheck.failed" - _ <- checkAllChunksUploaded(uploadId) ?~> "dataset.upload.allChunksUploadedCheck.failed" + _ <- checkWithinRequestedFileSize(uploadDir, uploadId, datasetId, UploadDomain.dataset) ?~> "dataset.upload.fileSizeCheck.failed" + _ <- checkAllChunksUploaded(uploadId, UploadDomain.dataset) ?~> "dataset.upload.allChunksUploadedCheck.failed" unpackToDir = unpackToDirFor(dataSourceId) _ <- PathUtils.ensureDirectoryBox(unpackToDir.getParent).toFox ?~> "dataset.import.fileAccessDenied" unpackResult <- unpackDataset(uploadDir, unpackToDir, datasetId).shiftBox - _ <- cleanUpUploadedDataset(uploadDir, uploadId, reason = "Upload complete, data unpacked.") + _ <- cleanUpUploadedDataset(uploadDir, uploadId, reason = "Upload complete, data unpacked.", UploadDomain.dataset) _ <- cleanUpOnFailure(unpackResult, datasetId, dataSourceId, @@ -288,25 +311,31 @@ class UploadService @Inject()(dataSourceService: DataSourceService, } yield () } - private def checkWithinRequestedFileSize(uploadDir: Path, uploadId: String, datasetId: ObjectId): Fox[Unit] = + private def checkWithinRequestedFileSize(uploadDir: Path, + uploadId: String, + datasetId: ObjectId, + uploadDomain: UploadDomain): Fox[Unit] = { + val uploadMetadataStore = selectUploadMetadataStore(uploadDomain) for { totalFileSizeInBytesOpt <- uploadMetadataStore.findTotalFileSizeInBytes(uploadId) ?~> "Could not look up reserved total file size" _ <- totalFileSizeInBytesOpt.map { reservedFileSize => for { actualFileSize <- tryo(FileUtils.sizeOfDirectoryAsBigInteger(uploadDir.toFile).longValue).toFox ?~> "Could not measure actual file size" _ <- if (actualFileSize > reservedFileSize) { - cleanUpDatasetExceedingSize(uploadDir, uploadId) + cleanUpDatasetExceedingSize(uploadDir, uploadId, uploadDomain) Fox.failure( f"Uploaded dataset $datasetId exceeds the reserved size of $reservedFileSize bytes, got $actualFileSize bytes.") } else Fox.successful(()) } yield () }.getOrElse(Fox.successful(())) } yield () + } - private def cleanUpDatasetExceedingSize(uploadDir: Path, uploadId: String): Fox[Unit] = + // TODO adapt for mags/attachments + private def cleanUpDatasetExceedingSize(uploadDir: Path, uploadId: String, uploadDomain: UploadDomain): Fox[Unit] = for { - datasetId <- getDatasetIdByUploadId(uploadId) - _ <- cleanUpUploadedDataset(uploadDir, uploadId, reason = "Exceeded reserved fileSize") + datasetId <- getDatasetIdByUploadId(uploadId, uploadDomain) + _ <- cleanUpUploadedDataset(uploadDir, uploadId, reason = "Exceeded reserved fileSize", uploadDomain) _ <- remoteWebknossosClient.deleteDataset(datasetId) } yield () @@ -504,7 +533,8 @@ class UploadService @Inject()(dataSourceService: DataSourceService, } yield () } - private def checkAllChunksUploaded(uploadId: String): Fox[Unit] = + private def checkAllChunksUploaded(uploadId: String, uploadDomain: UploadDomain): Fox[Unit] = { + val uploadMetadataStore = selectUploadMetadataStore(uploadDomain) for { fileCountOpt <- uploadMetadataStore.findFileCount(uploadId) ?~> "Could not look up reserved file count." fileCount <- fileCountOpt.toFox ?~> "Could not look up reserved file count." @@ -519,6 +549,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, } yield () } } yield () + } private def unpackToDirFor(dataSourceId: DataSourceId): Path = dataBaseDir @@ -701,10 +732,14 @@ class UploadService @Inject()(dataSourceService: DataSourceService, tryo(FileUtils.copyDirectory(uploadDir.toFile, backupDir.toFile)) } - private def cleanUpUploadedDataset(uploadDir: Path, uploadId: String, reason: String): Fox[Unit] = + private def cleanUpUploadedDataset(uploadDir: Path, + uploadId: String, + reason: String, + uploadDomain: UploadDomain): Fox[Unit] = for { _ <- Fox.successful(logger.info(s"Cleaning up uploaded dataset. Reason: $reason")) _ <- PathUtils.deleteDirectoryRecursively(uploadDir).toFox + uploadMetadataStore = selectUploadMetadataStore(uploadDomain) _ <- uploadMetadataStore.cleanUp(uploadId) } yield () @@ -714,6 +749,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, _ <- Fox.serialCombined(organizationDirs)(cleanUpOrphanUploadsForOrga) } yield () + // TODO should also handle attachments/mags private def cleanUpOrphanUploadsForOrga(organizationDir: Path): Fox[Unit] = { val orgaUploadingDir: Path = organizationDir.resolve(uploadingDir) if (!Files.exists(orgaUploadingDir)) @@ -722,7 +758,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, for { uploadDirs <- PathUtils.listDirectories(orgaUploadingDir, silent = false).toFox _ <- Fox.serialCombined(uploadDirs) { uploadDir => - uploadMetadataStore.isKnownUpload(uploadDir.getFileName.toString).map { + datasetUploadMetadataStore.isKnownUpload(uploadDir.getFileName.toString).map { case false => val deleteResult = PathUtils.deleteDirectoryRecursively(uploadDir) if (deleteResult.isDefined) { diff --git a/webknossos-datastore/conf/datastore.latest.routes b/webknossos-datastore/conf/datastore.latest.routes index b994a9d1d2b..92b593436ab 100644 --- a/webknossos-datastore/conf/datastore.latest.routes +++ b/webknossos-datastore/conf/datastore.latest.routes @@ -105,14 +105,16 @@ POST /datasets/:datasetId/layers/:dataLayerName/segmentStatistics/volum POST /datasets/:datasetId/layers/:dataLayerName/segmentStatistics/boundingBox @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSegmentBoundingBox(datasetId: ObjectId, dataLayerName: String) POST /datasets/:datasetId/layers/:dataLayerName/segmentStatistics/surfaceArea @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSegmentSurfaceArea(datasetId: ObjectId, dataLayerName: String) +# Uploads: Datasets, mags, attachments +GET /datasets/upload/:uploadDomain @com.scalableminds.webknossos.datastore.controllers.UploadController.testChunk(resumableChunkNumber: Int, resumableIdentifier: String, uploadDomain: String) +POST /datasets/upload/:uploadDomain @com.scalableminds.webknossos.datastore.controllers.UploadController.uploadChunk(uploadDomain: String) +POST /datasets/upload/:uploadDomain/reserveUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.reserveUpload(uploadDomain: String) +GET /datasets/upload/:uploadDomain/getUnfinishedUploads @com.scalableminds.webknossos.datastore.controllers.UploadController.getUnfinishedUploads(organizationName: String, uploadDomain: String) +POST /datasets/upload/:uploadDomain/finishUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.finishUpload(uploadDomain: String) +POST /datasets/upload/:uploadDomain/cancelUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.cancelUpload(uploadDomain: String) + # DataSource management -GET /datasets @com.scalableminds.webknossos.datastore.controllers.DataSourceController.testChunk(resumableChunkNumber: Int, resumableIdentifier: String) -POST /datasets @com.scalableminds.webknossos.datastore.controllers.DataSourceController.uploadChunk() -GET /datasets/getUnfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getUnfinishedUploads(organizationName: String) GET /datasets/baseDirAbsolute @com.scalableminds.webknossos.datastore.controllers.DataSourceController.baseDirAbsolute -POST /datasets/reserveUpload @com.scalableminds.webknossos.datastore.controllers.DataSourceController.reserveUpload() -POST /datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DataSourceController.finishUpload() -POST /datasets/cancelUpload @com.scalableminds.webknossos.datastore.controllers.DataSourceController.cancelUpload() POST /datasets/measureUsedStorage/:organizationId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.measureUsedStorage(organizationId: String) PUT /datasets/:datasetId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.updateOnDisk(datasetId: ObjectId) DELETE /datasets/:datasetId/deleteOnDisk @com.scalableminds.webknossos.datastore.controllers.DataSourceController.deleteOnDisk(datasetId: ObjectId) From 3c95eb6a81503eb6bc8aa0f18ef9efa6fbd19148 Mon Sep 17 00:00:00 2001 From: Florian M Date: Wed, 1 Apr 2026 14:24:44 +0200 Subject: [PATCH 06/37] inject concrete class, not trait --- .../datastore/controllers/UploadController.scala | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala index f5b827df039..8c5138b1f14 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala @@ -1,7 +1,11 @@ package com.scalableminds.webknossos.datastore.controllers import com.scalableminds.util.tools.Fox -import com.scalableminds.webknossos.datastore.services.{AccessTokenService, DSRemoteWebknossosClient, UserAccessRequest} +import com.scalableminds.webknossos.datastore.services.{ + DSRemoteWebknossosClient, + DataStoreAccessTokenService, + UserAccessRequest +} import com.scalableminds.webknossos.datastore.services.uploading.{ CancelUploadInformation, ReserveUploadInformation, @@ -23,7 +27,7 @@ import javax.inject.Inject import scala.concurrent.ExecutionContext class UploadController @Inject()( - accessTokenService: AccessTokenService, + accessTokenService: DataStoreAccessTokenService, uploadService: UploadService, dsRemoteWebknossosClient: DSRemoteWebknossosClient, slackNotificationService: DSSlackNotificationService)(implicit bodyParsers: PlayBodyParsers, ec: ExecutionContext) From 9f96d4dc1d82463d851e71583260f2a861e3326c Mon Sep 17 00:00:00 2001 From: Florian M Date: Thu, 2 Apr 2026 10:41:30 +0200 Subject: [PATCH 07/37] restructure upload info --- .../WKRemoteDataStoreController.scala | 25 +-- .../admin/dataset/dataset_upload_view.tsx | 15 +- frontend/javascripts/admin/rest_api.ts | 21 +- .../controllers/DSLegacyApiController.scala | 55 ++--- .../controllers/UploadController.scala | 33 +-- .../services/DSRemoteWebknossosClient.scala | 10 +- .../services/DataSourceService.scala | 6 +- .../services/uploading/UploadService.scala | 99 ++++++--- .../conf/datastore.latest.routes | 202 +++++++++--------- 9 files changed, 257 insertions(+), 209 deletions(-) diff --git a/app/controllers/WKRemoteDataStoreController.scala b/app/controllers/WKRemoteDataStoreController.scala index 911656b6813..945f61bc523 100644 --- a/app/controllers/WKRemoteDataStoreController.scala +++ b/app/controllers/WKRemoteDataStoreController.scala @@ -15,8 +15,8 @@ import com.scalableminds.webknossos.datastore.models.datasource.{ import com.scalableminds.webknossos.datastore.services.{DataSourcePathInfo, DataStoreStatus} import com.scalableminds.webknossos.datastore.services.uploading.{ ReportDatasetUploadParameters, - ReserveAdditionalInformation, - ReserveUploadInformation + DatasetUploadAdditionalInfo, + DatasetUploadInfo } import com.typesafe.scalalogging.LazyLogging import models.dataset._ @@ -54,28 +54,29 @@ class WKRemoteDataStoreController @Inject()( val bearerTokenService: WebknossosBearerTokenAuthenticatorService = wkSilhouetteEnvironment.combinedAuthenticatorService.tokenAuthenticatorService - def reserveDatasetUpload(name: String, key: String, token: String): Action[ReserveUploadInformation] = - Action.async(validateJson[ReserveUploadInformation]) { implicit request => + def reserveDatasetUpload(name: String, key: String, token: String): Action[DatasetUploadInfo] = + Action.async(validateJson[DatasetUploadInfo]) { implicit request => dataStoreService.validateAccess(name, key) { dataStore => val uploadInfo = request.body for { user <- bearerTokenService.userForToken(token) ~> FORBIDDEN - organization <- organizationDAO.findOne(uploadInfo.organization)(GlobalAccessContext) ?~> Messages( + organization <- organizationDAO.findOne(uploadInfo.organizationId)(GlobalAccessContext) ?~> Messages( "organization.notFound", - uploadInfo.organization) ~> NOT_FOUND + uploadInfo.organizationId) ~> NOT_FOUND usedStorageBytes <- organizationDAO.getUsedStorage(organization._id) _ <- Fox.runOptional(organization.includedStorageBytes)(includedStorage => - Fox.fromBool(usedStorageBytes + uploadInfo.totalFileSizeInBytes.getOrElse(0L) <= includedStorage)) ?~> "dataset.upload.storageExceeded" ~> FORBIDDEN + Fox.fromBool(usedStorageBytes + uploadInfo.resumableUploadInfo.totalFileSizeInBytes + .getOrElse(0L) <= includedStorage)) ?~> "dataset.upload.storageExceeded" ~> FORBIDDEN _ <- Fox.fromBool(organization._id == user._organization) ?~> "notAllowed" ~> FORBIDDEN - _ <- datasetService.assertValidDatasetName(uploadInfo.name) + _ <- datasetService.assertValidDatasetName(uploadInfo.datasetName) _ <- Fox.fromBool(dataStore.onlyAllowedOrganization.forall(_ == organization._id)) ?~> "dataset.upload.Datastore.restricted" _ <- Fox.serialCombined(uploadInfo.layersToLink.getOrElse(List.empty))(l => layerToLinkService.validateLayerToLink(l, user)) ?~> "dataset.upload.invalidLinkedLayers" _ <- Fox.runIf(request.body.requireUniqueName.getOrElse(false))( - datasetService.assertNewDatasetNameUnique(request.body.name, organization._id)) + datasetService.assertNewDatasetNameUnique(request.body.datasetName, organization._id)) preliminaryDataSource = UnusableDataSource(DataSourceId("", ""), None, DataSourceStatus.notYetUploaded) dataset <- datasetService.createAndSetUpDataset( - uploadInfo.name, + uploadInfo.datasetName, dataStore, preliminaryDataSource, uploadInfo.folderId, @@ -83,8 +84,8 @@ class WKRemoteDataStoreController @Inject()( isVirtual = uploadInfo.isVirtual.getOrElse(true), creationType = DatasetCreationType.Upload ) ?~> "dataset.upload.creation.failed" - _ <- datasetService.addInitialTeams(dataset, uploadInfo.initialTeams, user)(AuthorizedAccessContext(user)) - additionalInfo = ReserveAdditionalInformation(dataset._id, dataset.directoryName) + _ <- datasetService.addInitialTeams(dataset, uploadInfo.initialTeamIds, user)(AuthorizedAccessContext(user)) + additionalInfo = DatasetUploadAdditionalInfo(dataset._id, dataset.directoryName) } yield Ok(Json.toJson(additionalInfo)) } } diff --git a/frontend/javascripts/admin/dataset/dataset_upload_view.tsx b/frontend/javascripts/admin/dataset/dataset_upload_view.tsx index cf849ab27b6..1aad0f9336c 100644 --- a/frontend/javascripts/admin/dataset/dataset_upload_view.tsx +++ b/frontend/javascripts/admin/dataset/dataset_upload_view.tsx @@ -342,23 +342,24 @@ class DatasetUploadView extends React.Component { : `${dayjs(Date.now()).format("YYYY-MM-DD_HH-mm")}__${newDatasetName}__${getRandomString()}`; const filePaths = formValues.zipFile.map((file) => file.path || ""); const totalFileSizeInBytes = getFileSize(formValues.zipFile); - const reserveUploadInformation = { + const resumableUploadInfo = { uploadId, - name: newDatasetName, - directoryName: "", - newDatasetId: "", - organization: activeUser.organization, totalFileCount: formValues.zipFile.length, filePaths: filePaths, totalFileSizeInBytes, + } + const datasetUploadInfo = { + resumableUploadInfo, + datasetName: newDatasetName, + organizationId: activeUser.organization, layersToLink: [], - initialTeams: formValues.initialTeams.map((team: APITeam) => team.id), + initialTeamIds: formValues.initialTeams.map((team: APITeam) => team.id), folderId: formValues.targetFolderId, needsConversion: this.state.needsConversion, }; const datastoreUrl = formValues.datastoreUrl; await refreshToken(); - await reserveDatasetUpload(datastoreUrl, reserveUploadInformation); + await reserveDatasetUpload(datastoreUrl, datasetUploadInfo); const resumableUpload = await createResumableUpload(datastoreUrl, uploadId); this.setState({ uploadId, diff --git a/frontend/javascripts/admin/rest_api.ts b/frontend/javascripts/admin/rest_api.ts index 7559e46b581..197d63e63b9 100644 --- a/frontend/javascripts/admin/rest_api.ts +++ b/frontend/javascripts/admin/rest_api.ts @@ -1243,25 +1243,30 @@ export function createResumableUpload( return resumable; }); } -type ReserveUploadInformation = { + +type ResumableUploadInfo = { uploadId: string; - name: string; - directoryName: string; - newDatasetId: string; - organization: string; totalFileCount: number; filePaths: Array; - initialTeams: Array; + totalFileSizeInBytes: number; +} +type DatasetUploadInfo = { + resumableUploadInfo: ResumableUploadInfo, + datasetName: string; + organizationId: string; + layersToLink: Array; // Always set as empty by frontend, only used by libs + initialTeamIds: Array; folderId: string | null; + needsConversion: boolean; }; export function reserveDatasetUpload( datastoreHost: string, - reserveUploadInformation: ReserveUploadInformation, + datasetUploadInfo: DatasetUploadInfo, ): Promise { return doWithToken((token) => Request.sendJSONReceiveJSON(`/data/datasets/reserveUpload?token=${token}`, { - data: reserveUploadInformation, + data: datasetUploadInfo, host: datastoreHost, }), ); diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala index f1057e6c996..09971e27807 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala @@ -13,9 +13,9 @@ import com.scalableminds.webknossos.datastore.models.{ import com.scalableminds.webknossos.datastore.models.datasource.{UnusableDataSource, UsableDataSource} import com.scalableminds.webknossos.datastore.services.mesh.FullMeshRequest import com.scalableminds.webknossos.datastore.services.uploading.{ + DatasetUploadInfo, LinkedLayerIdentifier, - ReserveUploadInformation, - UploadDomain + ResumableUploadInfo } import com.scalableminds.webknossos.datastore.services.{ DSRemoteWebknossosClient, @@ -32,7 +32,7 @@ import scala.concurrent.{ExecutionContext, Future} case class LegacyReserveManualUploadInformation( datasetName: String, organization: String, - initialTeamIds: List[ObjectId], + initialTeamIds: Seq[ObjectId], folderId: Option[ObjectId], requireUniqueName: Boolean = false, ) @@ -101,22 +101,23 @@ class DSLegacyApiController @Inject()( for { adaptedLayersToLink <- Fox.serialCombined(request.body.layersToLink.getOrElse(List.empty))(adaptLayerToLink) - adaptedRequestBody = ReserveUploadInformation( - uploadId = request.body.uploadId, - name = request.body.name, - organization = request.body.organization, - totalFileCount = request.body.totalFileCount, - filePaths = request.body.filePaths, - totalFileSizeInBytes = request.body.totalFileSizeInBytes, + adaptedRequestBody = DatasetUploadInfo( + resumableUploadInfo = ResumableUploadInfo( + uploadId = request.body.uploadId, + totalFileCount = request.body.totalFileCount, + filePaths = request.body.filePaths, + totalFileSizeInBytes = request.body.totalFileSizeInBytes, + ), + datasetName = request.body.name, + organizationId = request.body.organization, layersToLink = Some(adaptedLayersToLink), - initialTeams = request.body.initialTeams, + initialTeamIds = request.body.initialTeams, folderId = request.body.folderId, requireUniqueName = request.body.requireUniqueName, isVirtual = None, needsConversion = None ) - result <- Fox.fromFuture( - uploadController.reserveUpload(UploadDomain.dataset.toString)(request.withBody(adaptedRequestBody))) + result <- Fox.fromFuture(uploadController.reserveDatasetUpload()(request.withBody(adaptedRequestBody))) } yield result } } @@ -142,19 +143,21 @@ class DSLegacyApiController @Inject()( accessTokenService.validateAccessFromTokenContext( UserAccessRequest.administrateDatasets(request.body.organization)) { for { - reservedDatasetInfo <- remoteWebknossosClient.reserveDataSourceUpload( - ReserveUploadInformation( - "aManualUpload", - request.body.datasetName, - request.body.organization, - 0, - Some(List.empty), - None, - None, - request.body.initialTeamIds, - request.body.folderId, - Some(request.body.requireUniqueName), - Some(false), + reservedDatasetInfo <- remoteWebknossosClient.reserveDatasetUpload( + DatasetUploadInfo( + resumableUploadInfo = ResumableUploadInfo( + uploadId = "aManualUpload", + totalFileCount = 0, + filePaths = Some(List.empty), + totalFileSizeInBytes = None + ), + datasetName = request.body.datasetName, + organizationId = request.body.organization, + layersToLink = None, + initialTeamIds = request.body.initialTeamIds, + folderId = request.body.folderId, + requireUniqueName = Some(request.body.requireUniqueName), + isVirtual = Some(false), needsConversion = None ) ) ?~> "dataset.upload.validation.failed" diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala index 8c5138b1f14..58396bfbc8c 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala @@ -8,7 +8,8 @@ import com.scalableminds.webknossos.datastore.services.{ } import com.scalableminds.webknossos.datastore.services.uploading.{ CancelUploadInformation, - ReserveUploadInformation, + DatasetUploadInfo, + MagUploadInfo, UploadDomain, UploadInformation, UploadService @@ -33,26 +34,32 @@ class UploadController @Inject()( slackNotificationService: DSSlackNotificationService)(implicit bodyParsers: PlayBodyParsers, ec: ExecutionContext) extends Controller { - def reserveUpload(uploadDomain: String): Action[ReserveUploadInformation] = - Action.async(validateJson[ReserveUploadInformation]) { implicit request => + def reserveDatasetUpload(): Action[DatasetUploadInfo] = + Action.async(validateJson[DatasetUploadInfo]) { implicit request => accessTokenService.validateAccessFromTokenContext( - UserAccessRequest.administrateDatasets(request.body.organization)) { + UserAccessRequest.administrateDatasets(request.body.organizationId)) { for { - uploadDomainValidated <- UploadDomain.fromString(uploadDomain).toFox - isKnownUpload <- uploadService.isKnownUpload(request.body.uploadId, uploadDomainValidated) - _ <- if (!isKnownUpload) { + isKnownUpload <- uploadService.isKnownUpload(request.body.resumableUploadInfo.uploadId, UploadDomain.dataset) + _ <- Fox.runIf(isKnownUpload) { for { - reserveUploadAdditionalInfo <- dsRemoteWebknossosClient.reserveDataSourceUpload(request.body) ?~> "dataset.upload.validation.failed" - _ <- uploadService.reserveUpload(request.body, - reserveUploadAdditionalInfo.newDatasetId, - reserveUploadAdditionalInfo.directoryName, - uploadDomainValidated) + reserveUploadAdditionalInfo <- dsRemoteWebknossosClient.reserveDatasetUpload(request.body) ?~> "dataset.upload.validation.failed" + _ <- uploadService.reserveDatasetUpload(request.body, + reserveUploadAdditionalInfo.newDatasetId, + reserveUploadAdditionalInfo.directoryName) } yield () - } else Fox.successful(()) + } } yield Ok } } + def reserveMagUpload(): Action[MagUploadInfo] = Action.async(validateJson[MagUploadInfo]) { implicit request => + Fox.successful(Ok) + } + + def reserveAttachmentUpload(): Action[MagUploadInfo] = Action.async(validateJson[MagUploadInfo]) { implicit request => + Fox.successful(Ok) + } + def getUnfinishedUploads(organizationName: String, uploadDomain: String): Action[AnyContent] = Action.async { implicit request => accessTokenService.validateAccessFromTokenContext(UserAccessRequest.administrateDatasets(organizationName)) { diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala index 7c9301774dd..66176f27450 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala @@ -16,8 +16,8 @@ import com.scalableminds.webknossos.datastore.models.datasource.{DataSource, Dat import com.scalableminds.webknossos.datastore.rpc.RPC import com.scalableminds.webknossos.datastore.services.uploading.{ ReportDatasetUploadParameters, - ReserveAdditionalInformation, - ReserveUploadInformation + DatasetUploadAdditionalInfo, + DatasetUploadInfo } import com.scalableminds.webknossos.datastore.storage.DataVaultCredential import com.typesafe.scalalogging.LazyLogging @@ -117,13 +117,13 @@ class DSRemoteWebknossosClient @Inject()( .silent .putJson(dataSourcePaths) - def reserveDataSourceUpload(info: ReserveUploadInformation)( - implicit tc: TokenContext): Fox[ReserveAdditionalInformation] = + def reserveDatasetUpload(info: DatasetUploadInfo)( + implicit tc: TokenContext): Fox[DatasetUploadAdditionalInfo] = for { reserveUploadInfo <- rpc(s"$webknossosUri/api/datastores/$dataStoreName/reserveUpload") .addQueryParam("key", dataStoreKey) .withTokenFromContext - .postJsonWithJsonResponse[ReserveUploadInformation, ReserveAdditionalInformation](info) + .postJsonWithJsonResponse[DatasetUploadInfo, DatasetUploadAdditionalInfo](info) } yield reserveUploadInfo def updateDataSource(dataSource: DataSource, datasetId: ObjectId)(implicit tc: TokenContext): Fox[_] = diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DataSourceService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DataSourceService.scala index 223e50f5023..5deebbd01c7 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DataSourceService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DataSourceService.scala @@ -56,10 +56,10 @@ class DataSourceService @Inject()( _ = if (inboxCheckVerboseCounter >= 10) inboxCheckVerboseCounter = 0 } yield () - def assertDataDirWritable(organizationId: String): Fox[Unit] = { - val orgaPath = dataBaseDir.resolve(organizationId) + def ensureDataDirWritable(dataSourceId: DataSourceId): Fox[Unit] = { + val orgaPath = dataBaseDir.resolve(dataSourceId.organizationId) if (orgaPath.toFile.exists()) { - Fox.fromBool(Files.isWritable(dataBaseDir.resolve(organizationId))) ?~> "Datastore cannot write to organization data directory." + Fox.fromBool(Files.isWritable(orgaPath)) ?~> "Datastore cannot write to organization data directory." } else { tryo { Files.createDirectory(orgaPath) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index 57b5a4d34cd..1dfe91d380a 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -8,6 +8,7 @@ import com.scalableminds.util.time.Instant import com.scalableminds.util.tools.Box.tryo import com.scalableminds.util.tools._ import com.scalableminds.webknossos.datastore.DataStoreConfig +import com.scalableminds.webknossos.datastore.dataformats.MagLocator import com.scalableminds.webknossos.datastore.dataformats.wkw.WKWDataFormatHelper import com.scalableminds.webknossos.datastore.datareaders.n5.N5Header.FILENAME_ATTRIBUTES_JSON import com.scalableminds.webknossos.datastore.datareaders.n5.{N5Header, N5Metadata} @@ -34,28 +35,51 @@ import java.nio.file.{Files, Path} import scala.concurrent.{ExecutionContext, Future} import scala.jdk.FutureConverters._ -case class ReserveUploadInformation( +case class ResumableUploadInfo( uploadId: String, // upload id that was also used in chunk upload (this time without file paths) - name: String, // dataset name - organization: String, totalFileCount: Long, filePaths: Option[Seq[String]], totalFileSizeInBytes: Option[Long], +) +object ResumableUploadInfo { + implicit val jsonFormat: OFormat[ResumableUploadInfo] = Json.format[ResumableUploadInfo] +} + +// TODO build from legacy param set for LegacyApiController +case class DatasetUploadInfo( + resumableUploadInfo: ResumableUploadInfo, + datasetName: String, + organizationId: String, layersToLink: Option[Seq[LinkedLayerIdentifier]], - initialTeams: Seq[ObjectId], // team ids + initialTeamIds: Seq[ObjectId], // team ids folderId: Option[ObjectId], requireUniqueName: Option[Boolean], isVirtual: Option[Boolean], // Only set (to false) for legacy manual uploads needsConversion: Option[Boolean] // None means false ) -object ReserveUploadInformation { - implicit val jsonFormat: OFormat[ReserveUploadInformation] = Json.format[ReserveUploadInformation] +object DatasetUploadInfo { + implicit val jsonFormat: OFormat[DatasetUploadInfo] = Json.format[DatasetUploadInfo] +} + +case class MagUploadInfo( + resumableUploadInfo: ResumableUploadInfo, + datasetId: ObjectId, + mag: MagLocator +) +object MagUploadInfo { + implicit val jsonFormat: OFormat[MagUploadInfo] = Json.format[MagUploadInfo] +} + +case class DatasetUploadAdditionalInfo(newDatasetId: ObjectId, directoryName: String) +object DatasetUploadAdditionalInfo { + implicit val jsonFormat: OFormat[DatasetUploadAdditionalInfo] = + Json.format[DatasetUploadAdditionalInfo] } -case class ReserveAdditionalInformation(newDatasetId: ObjectId, directoryName: String) -object ReserveAdditionalInformation { - implicit val jsonFormat: OFormat[ReserveAdditionalInformation] = - Json.format[ReserveAdditionalInformation] +case class MagUploadAdditionalInfo(dataSourceId: DataSourceId) +object MagUploadAdditionalInfo { + implicit val jsonFormat: OFormat[MagUploadAdditionalInfo] = + Json.format[MagUploadAdditionalInfo] } case class ReportDatasetUploadParameters( @@ -122,36 +146,41 @@ class UploadService @Inject()(dataSourceService: DataSourceService, def extractDatasetUploadId(uploadFileId: String): String = uploadFileId.split("/").headOption.getOrElse("") - private def uploadDirectoryFor(organizationId: String, uploadId: String): Path = - dataBaseDir.resolve(organizationId).resolve(uploadingDir).resolve(uploadId) + private def uploadDirectoryFor(organizationId: String, uploadId: String, uploadDomain: UploadDomain): Path = + dataBaseDir.resolve(organizationId).resolve(uploadingDir).resolve(uploadDomain.toString).resolve(uploadId) private def uploadBackupDirectoryFor(organizationId: String, uploadId: String): Path = dataBaseDir.resolve(organizationId).resolve(trashDir).resolve(s"uploadBackup__$uploadId") - def reserveUpload(reserveUploadInfo: ReserveUploadInformation, - datasetId: ObjectId, - directoryName: String, - uploadDomain: UploadDomain): Fox[Unit] = + def reserveDatasetUpload(datasetUploadInfo: DatasetUploadInfo, + datasetId: ObjectId, + directoryName: String): Fox[Unit] = { + val dataSourceId = DataSourceId(directoryName, datasetUploadInfo.organizationId) for { - _ <- dataSourceService.assertDataDirWritable(reserveUploadInfo.organization) - uploadId = reserveUploadInfo.uploadId - newDataSourceId = DataSourceId(directoryName, reserveUploadInfo.organization) - _ = logger.info(f"Reserving ${uploadFullName(uploadId, datasetId, newDataSourceId)}...") _ <- Fox.fromBool( - !reserveUploadInfo.needsConversion.getOrElse(false) || !reserveUploadInfo.layersToLink + !datasetUploadInfo.needsConversion.getOrElse(false) || !datasetUploadInfo.layersToLink .exists(_.nonEmpty)) ?~> "Cannot use linked layers if the dataset needs conversion" + _ <- reserveResumableUpload(datasetUploadInfo.resumableUploadInfo, datasetId, dataSourceId, UploadDomain.dataset) + uploadId = datasetUploadInfo.resumableUploadInfo.uploadId + _ <- datasetUploadMetadataStore.insertUploadIdByDataSourceId(dataSourceId, uploadId) + _ <- datasetUploadMetadataStore.insertLinkedLayerIdentifiers(uploadId, datasetUploadInfo.layersToLink) + } yield () + } + + private def reserveResumableUpload(resumableUploadInfo: ResumableUploadInfo, + datasetId: ObjectId, + dataSourceId: DataSourceId, + uploadDomain: UploadDomain): Fox[Unit] = + for { + _ <- dataSourceService.ensureDataDirWritable(dataSourceId) + uploadId = resumableUploadInfo.uploadId + _ = logger.info(f"Reserving $uploadDomain ${uploadFullName(uploadId, datasetId, dataSourceId)}...") uploadMetadataStore = selectUploadMetadataStore(uploadDomain) - _ <- uploadMetadataStore.insertDataSourceId(uploadId, newDataSourceId) + _ <- uploadMetadataStore.insertDataSourceId(uploadId, dataSourceId) _ <- uploadMetadataStore.insertDatasetId(uploadId, datasetId) - _ <- uploadMetadataStore.insertTotalFileCount(uploadId, reserveUploadInfo.totalFileCount) - _ <- uploadMetadataStore.insertTotalFileSizeInBytes(uploadId, reserveUploadInfo.totalFileSizeInBytes) - _ <- uploadMetadataStore.insertFilePaths(uploadId, reserveUploadInfo.filePaths) - _ <- Fox.runIf(uploadDomain == UploadDomain.dataset) { - datasetUploadMetadataStore.insertUploadIdByDataSourceId(newDataSourceId, uploadId) - } - _ <- Fox.runIf(uploadDomain == UploadDomain.dataset) { - datasetUploadMetadataStore.insertLinkedLayerIdentifiers(uploadId, reserveUploadInfo.layersToLink) - } + _ <- uploadMetadataStore.insertTotalFileCount(uploadId, resumableUploadInfo.totalFileCount) + _ <- uploadMetadataStore.insertTotalFileSizeInBytes(uploadId, resumableUploadInfo.totalFileSizeInBytes) + _ <- uploadMetadataStore.insertFilePaths(uploadId, resumableUploadInfo.filePaths) } yield () def enrichUnfinishedUploadInfoWithUploadIds( @@ -184,7 +213,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, val uploadId = extractDatasetUploadId(uploadFileId) for { dataSourceId <- uploadMetadataStore.findDataSourceId(uploadId) - uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId) + uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId, uploadDomain) filePathRaw = uploadFileId.split("/").tail.mkString("/") filePath = if (filePathRaw.charAt(0) == '/') filePathRaw.drop(1) else filePathRaw _ <- Fox.fromBool(!isOutsideUploadDir(uploadDir, filePath)) ?~> s"Invalid file path: $filePath" @@ -259,7 +288,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, } yield if (knownUpload) { logger.info(f"Cancelling ${uploadFullName(uploadId, datasetId, dataSourceId)}...") - cleanUpUploadedDataset(uploadDirectoryFor(dataSourceId.organizationId, uploadId), + cleanUpUploadedDataset(uploadDirectoryFor(dataSourceId.organizationId, uploadId, uploadDomain), uploadId, reason = "Cancelled by user", uploadDomain) @@ -267,7 +296,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, } private def uploadFullName(uploadId: String, datasetId: ObjectId, dataSourceId: DataSourceId) = - s"upload $uploadId of dataset $datasetId ($dataSourceId)" + s"upload $uploadId for dataset $datasetId ($dataSourceId)" def finishDatasetUpload(uploadInformation: UploadInformation, datasetId: ObjectId)( implicit tc: TokenContext): Fox[Unit] = { @@ -278,7 +307,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, _ = logger.info(s"Finishing ${uploadFullName(uploadId, datasetId, dataSourceId)}...") linkedLayerIdentifiers <- datasetUploadMetadataStore.findLinkedLayerIdentifiers(uploadId) needsConversion = uploadInformation.needsConversion.getOrElse(false) - uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId) + uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId, UploadDomain.dataset) _ <- backupRawUploadedData(uploadDir, uploadBackupDirectoryFor(dataSourceId.organizationId, uploadId), datasetId).toFox _ <- checkWithinRequestedFileSize(uploadDir, uploadId, datasetId, UploadDomain.dataset) ?~> "dataset.upload.fileSizeCheck.failed" _ <- checkAllChunksUploaded(uploadId, UploadDomain.dataset) ?~> "dataset.upload.allChunksUploadedCheck.failed" diff --git a/webknossos-datastore/conf/datastore.latest.routes b/webknossos-datastore/conf/datastore.latest.routes index 92b593436ab..b114d3a0b55 100644 --- a/webknossos-datastore/conf/datastore.latest.routes +++ b/webknossos-datastore/conf/datastore.latest.routes @@ -1,135 +1,137 @@ # Defines latest version of datastore routes (Higher priority routes first) # Health endpoint -GET /health @com.scalableminds.webknossos.datastore.controllers.Application.health +GET /health @com.scalableminds.webknossos.datastore.controllers.Application.health # Read image data -POST /datasets/:datasetId/layers/:dataLayerName/data @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.requestViaWebknossos(datasetId: ObjectId, dataLayerName: String) -POST /datasets/:datasetId/layers/:dataLayerName/readData @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.requestRawCuboidPost(datasetId: ObjectId, dataLayerName: String) -GET /datasets/:datasetId/layers/:dataLayerName/data @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.requestRawCuboid(datasetId: ObjectId, dataLayerName: String, x: Int, y: Int, z: Int, width: Int, height: Int, depth: Int, mag: String, halfByte: Boolean ?= false, mappingName: Option[String]) -GET /datasets/:datasetId/layers/:dataLayerName/thumbnail.jpg @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.thumbnailJpeg(datasetId: ObjectId, dataLayerName: String, x: Int, y: Int, z: Int, width: Int, height: Int, mag: String, mappingName: Option[String], intensityMin: Option[Double], intensityMax: Option[Double], color: Option[String], invertColor: Option[Boolean]) -GET /datasets/:datasetId/layers/:dataLayerName/findData @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.findData(datasetId: ObjectId, dataLayerName: String) -GET /datasets/:datasetId/layers/:dataLayerName/histogram @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.histogram(datasetId: ObjectId, dataLayerName: String) +POST /datasets/:datasetId/layers/:dataLayerName/data @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.requestViaWebknossos(datasetId: ObjectId, dataLayerName: String) +POST /datasets/:datasetId/layers/:dataLayerName/readData @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.requestRawCuboidPost(datasetId: ObjectId, dataLayerName: String) +GET /datasets/:datasetId/layers/:dataLayerName/data @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.requestRawCuboid(datasetId: ObjectId, dataLayerName: String, x: Int, y: Int, z: Int, width: Int, height: Int, depth: Int, mag: String, halfByte: Boolean ?= false, mappingName: Option[String]) +GET /datasets/:datasetId/layers/:dataLayerName/thumbnail.jpg @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.thumbnailJpeg(datasetId: ObjectId, dataLayerName: String, x: Int, y: Int, z: Int, width: Int, height: Int, mag: String, mappingName: Option[String], intensityMin: Option[Double], intensityMax: Option[Double], color: Option[String], invertColor: Option[Boolean]) +GET /datasets/:datasetId/layers/:dataLayerName/findData @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.findData(datasetId: ObjectId, dataLayerName: String) +GET /datasets/:datasetId/layers/:dataLayerName/histogram @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.histogram(datasetId: ObjectId, dataLayerName: String) # Read mag and attachment data via proxy -GET /datasets/:datasetId/proxy/layers/:dataLayerName/mags/:mag/*path @com.scalableminds.webknossos.datastore.controllers.DataProxyController.proxyMag(datasetId: ObjectId, dataLayerName: String, mag: String, path: String) -GET /datasets/:datasetId/proxy/layers/:dataLayerName/attachments/:attachmentType/:attachmentName/*path @com.scalableminds.webknossos.datastore.controllers.DataProxyController.proxyAttachment(datasetId: ObjectId, dataLayerName: String, attachmentType: String, attachmentName: String, path: String) -GET /datasets/:datasetId/proxy/datasource-properties.json @com.scalableminds.webknossos.datastore.controllers.DataProxyController.proxyDatasource(datasetId: ObjectId) +GET /datasets/:datasetId/proxy/layers/:dataLayerName/mags/:mag/*path @com.scalableminds.webknossos.datastore.controllers.DataProxyController.proxyMag(datasetId: ObjectId, dataLayerName: String, mag: String, path: String) +GET /datasets/:datasetId/proxy/layers/:dataLayerName/attachments/:attachmentType/:attachmentName/*path @com.scalableminds.webknossos.datastore.controllers.DataProxyController.proxyAttachment(datasetId: ObjectId, dataLayerName: String, attachmentType: String, attachmentName: String, path: String) +GET /datasets/:datasetId/proxy/datasource-properties.json @com.scalableminds.webknossos.datastore.controllers.DataProxyController.proxyDatasource(datasetId: ObjectId) # Knossos compatible routes -GET /datasets/:datasetId/layers/:dataLayerName/mag:mag/x:x/y:y/z:z/bucket.raw @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.requestViaKnossos(datasetId: ObjectId, dataLayerName: String, mag: Int, x: Int, y: Int, z: Int, cubeSize: Int) +GET /datasets/:datasetId/layers/:dataLayerName/mag:mag/x:x/y:y/z:z/bucket.raw @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.requestViaKnossos(datasetId: ObjectId, dataLayerName: String, mag: Int, x: Int, y: Int, z: Int, cubeSize: Int) # Zarr2 compatible routes -GET /zarr/:datasetId @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataSourceDirectoryContents(datasetId: ObjectId, zarrVersion: Int = 2) -GET /zarr/:datasetId/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataSourceDirectoryContents(datasetId: ObjectId, zarrVersion: Int = 2) -GET /zarr/:datasetId/.zgroup @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestZGroup(datasetId: ObjectId, dataLayerName="") -GET /zarr/:datasetId/datasource-properties.json @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataSource(datasetId: ObjectId, zarrVersion: Int = 2) -GET /zarr/:datasetId/:dataLayerName @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataLayerDirectoryContents(datasetId: ObjectId, dataLayerName: String, zarrVersion: Int = 2) -GET /zarr/:datasetId/:dataLayerName/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataLayerDirectoryContents(datasetId: ObjectId, dataLayerName: String, zarrVersion: Int = 2) -GET /zarr/:datasetId/:dataLayerName/.zattrs @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestZAttrs(datasetId: ObjectId, dataLayerName: String) -GET /zarr/:datasetId/:dataLayerName/.zgroup @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestZGroup(datasetId: ObjectId, dataLayerName: String) -GET /zarr/:datasetId/:dataLayerName/:mag @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataLayerMagDirectoryContents(datasetId: ObjectId, dataLayerName: String, mag: String, zarrVersion: Int = 2) -GET /zarr/:datasetId/:dataLayerName/:mag/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataLayerMagDirectoryContents(datasetId: ObjectId, dataLayerName: String, mag: String, zarrVersion: Int = 2) -GET /zarr/:datasetId/:dataLayerName/:mag/.zarray @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestZArray(datasetId: ObjectId, dataLayerName: String, mag: String) -GET /zarr/:datasetId/:dataLayerName/:mag/:coordinates @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestRawZarrCube(datasetId: ObjectId, dataLayerName: String, mag: String, coordinates: String) - -GET /annotations/zarr/:accessTokenOrId @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataSourceDirectoryContentsPrivateLink(accessTokenOrId: String, zarrVersion: Int = 2) -GET /annotations/zarr/:accessTokenOrId/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataSourceDirectoryContentsPrivateLink(accessTokenOrId: String, zarrVersion: Int = 2) -GET /annotations/zarr/:accessTokenOrId/.zgroup @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.zGroupPrivateLink(accessTokenOrId: String, dataLayerName="") -GET /annotations/zarr/:accessTokenOrId/datasource-properties.json @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataSourceWithAnnotationPrivateLink(accessTokenOrId: String, zarrVersion: Int = 2) -GET /annotations/zarr/:accessTokenOrId/:dataLayerName @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataLayerDirectoryContentsPrivateLink(accessTokenOrId: String, dataLayerName: String, zarrVersion: Int = 2) -GET /annotations/zarr/:accessTokenOrId/:dataLayerName/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataLayerDirectoryContentsPrivateLink(accessTokenOrId: String, dataLayerName: String, zarrVersion: Int = 2) -GET /annotations/zarr/:accessTokenOrId/:dataLayerName/.zattrs @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.zAttrsWithAnnotationPrivateLink(accessTokenOrId: String, dataLayerName: String) -GET /annotations/zarr/:accessTokenOrId/:dataLayerName/.zgroup @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.zGroupPrivateLink(accessTokenOrId: String, dataLayerName: String) -GET /annotations/zarr/:accessTokenOrId/:dataLayerName/:mag @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataLayerMagDirectoryContentsPrivateLink(accessTokenOrId: String, dataLayerName: String, mag: String, zarrVersion: Int = 2) -GET /annotations/zarr/:accessTokenOrId/:dataLayerName/:mag/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataLayerMagDirectoryContentsPrivateLink(accessTokenOrId: String, dataLayerName: String, mag: String, zarrVersion: Int = 2) -GET /annotations/zarr/:accessTokenOrId/:dataLayerName/:mag/.zarray @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.zArrayPrivateLink(accessTokenOrId: String, dataLayerName: String, mag: String) -GET /annotations/zarr/:accessTokenOrId/:dataLayerName/:mag/:coordinates @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.rawZarrCubePrivateLink(accessTokenOrId: String, dataLayerName: String, mag: String, coordinates: String) +GET /zarr/:datasetId @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataSourceDirectoryContents(datasetId: ObjectId, zarrVersion: Int = 2) +GET /zarr/:datasetId/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataSourceDirectoryContents(datasetId: ObjectId, zarrVersion: Int = 2) +GET /zarr/:datasetId/.zgroup @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestZGroup(datasetId: ObjectId, dataLayerName="") +GET /zarr/:datasetId/datasource-properties.json @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataSource(datasetId: ObjectId, zarrVersion: Int = 2) +GET /zarr/:datasetId/:dataLayerName @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataLayerDirectoryContents(datasetId: ObjectId, dataLayerName: String, zarrVersion: Int = 2) +GET /zarr/:datasetId/:dataLayerName/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataLayerDirectoryContents(datasetId: ObjectId, dataLayerName: String, zarrVersion: Int = 2) +GET /zarr/:datasetId/:dataLayerName/.zattrs @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestZAttrs(datasetId: ObjectId, dataLayerName: String) +GET /zarr/:datasetId/:dataLayerName/.zgroup @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestZGroup(datasetId: ObjectId, dataLayerName: String) +GET /zarr/:datasetId/:dataLayerName/:mag @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataLayerMagDirectoryContents(datasetId: ObjectId, dataLayerName: String, mag: String, zarrVersion: Int = 2) +GET /zarr/:datasetId/:dataLayerName/:mag/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataLayerMagDirectoryContents(datasetId: ObjectId, dataLayerName: String, mag: String, zarrVersion: Int = 2) +GET /zarr/:datasetId/:dataLayerName/:mag/.zarray @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestZArray(datasetId: ObjectId, dataLayerName: String, mag: String) +GET /zarr/:datasetId/:dataLayerName/:mag/:coordinates @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestRawZarrCube(datasetId: ObjectId, dataLayerName: String, mag: String, coordinates: String) + +GET /annotations/zarr/:accessTokenOrId @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataSourceDirectoryContentsPrivateLink(accessTokenOrId: String, zarrVersion: Int = 2) +GET /annotations/zarr/:accessTokenOrId/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataSourceDirectoryContentsPrivateLink(accessTokenOrId: String, zarrVersion: Int = 2) +GET /annotations/zarr/:accessTokenOrId/.zgroup @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.zGroupPrivateLink(accessTokenOrId: String, dataLayerName="") +GET /annotations/zarr/:accessTokenOrId/datasource-properties.json @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataSourceWithAnnotationPrivateLink(accessTokenOrId: String, zarrVersion: Int = 2) +GET /annotations/zarr/:accessTokenOrId/:dataLayerName @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataLayerDirectoryContentsPrivateLink(accessTokenOrId: String, dataLayerName: String, zarrVersion: Int = 2) +GET /annotations/zarr/:accessTokenOrId/:dataLayerName/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataLayerDirectoryContentsPrivateLink(accessTokenOrId: String, dataLayerName: String, zarrVersion: Int = 2) +GET /annotations/zarr/:accessTokenOrId/:dataLayerName/.zattrs @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.zAttrsWithAnnotationPrivateLink(accessTokenOrId: String, dataLayerName: String) +GET /annotations/zarr/:accessTokenOrId/:dataLayerName/.zgroup @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.zGroupPrivateLink(accessTokenOrId: String, dataLayerName: String) +GET /annotations/zarr/:accessTokenOrId/:dataLayerName/:mag @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataLayerMagDirectoryContentsPrivateLink(accessTokenOrId: String, dataLayerName: String, mag: String, zarrVersion: Int = 2) +GET /annotations/zarr/:accessTokenOrId/:dataLayerName/:mag/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataLayerMagDirectoryContentsPrivateLink(accessTokenOrId: String, dataLayerName: String, mag: String, zarrVersion: Int = 2) +GET /annotations/zarr/:accessTokenOrId/:dataLayerName/:mag/.zarray @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.zArrayPrivateLink(accessTokenOrId: String, dataLayerName: String, mag: String) +GET /annotations/zarr/:accessTokenOrId/:dataLayerName/:mag/:coordinates @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.rawZarrCubePrivateLink(accessTokenOrId: String, dataLayerName: String, mag: String, coordinates: String) # Zarr3 compatible routes -GET /zarr3_experimental/:datasetId @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataSourceDirectoryContents(datasetId: ObjectId, zarrVersion: Int = 3) -GET /zarr3_experimental/:datasetId/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataSourceDirectoryContents(datasetId: ObjectId, zarrVersion: Int = 3) -GET /zarr3_experimental/:datasetId/datasource-properties.json @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataSource(datasetId: ObjectId, zarrVersion: Int = 3) -GET /zarr3_experimental/:datasetId/:dataLayerName @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataLayerDirectoryContents(datasetId: ObjectId, dataLayerName: String, zarrVersion: Int = 3) -GET /zarr3_experimental/:datasetId/:dataLayerName/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataLayerDirectoryContents(datasetId: ObjectId, dataLayerName: String, zarrVersion: Int = 3) -GET /zarr3_experimental/:datasetId/:dataLayerName/zarr.json @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestZarrJson(datasetId: ObjectId, dataLayerName: String) -GET /zarr3_experimental/:datasetId/:dataLayerName/:mag @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataLayerMagDirectoryContents(datasetId: ObjectId, dataLayerName: String, mag: String, zarrVersion: Int = 3) -GET /zarr3_experimental/:datasetId/:dataLayerName/:mag/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataLayerMagDirectoryContents(datasetId: ObjectId, dataLayerName: String, mag: String, zarrVersion: Int = 3) -GET /zarr3_experimental/:datasetId/:dataLayerName/:mag/zarr.json @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestZarrJsonForMag(datasetId: ObjectId, dataLayerName: String, mag: String) -GET /zarr3_experimental/:datasetId/:dataLayerName/:mag/:coordinates @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestRawZarrCube(datasetId: ObjectId, dataLayerName: String, mag: String, coordinates: String) - -GET /annotations/zarr3_experimental/:accessTokenOrId @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataSourceDirectoryContentsPrivateLink(accessTokenOrId: String, zarrVersion: Int = 3) -GET /annotations/zarr3_experimental/:accessTokenOrId/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataSourceDirectoryContentsPrivateLink(accessTokenOrId: String, zarrVersion: Int = 3) -GET /annotations/zarr3_experimental/:accessTokenOrId/datasource-properties.json @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataSourceWithAnnotationPrivateLink(accessTokenOrId: String, zarrVersion: Int = 3) -GET /annotations/zarr3_experimental/:accessTokenOrId/:dataLayerName @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataLayerDirectoryContentsPrivateLink(accessTokenOrId: String, dataLayerName: String, zarrVersion: Int = 3) -GET /annotations/zarr3_experimental/:accessTokenOrId/:dataLayerName/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataLayerDirectoryContentsPrivateLink(accessTokenOrId: String, dataLayerName: String, zarrVersion: Int = 3) -GET /annotations/zarr3_experimental/:accessTokenOrId/:dataLayerName/zarr.json @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.zarrJsonWithAnnotationPrivateLink(accessTokenOrId: String, dataLayerName: String) -GET /annotations/zarr3_experimental/:accessTokenOrId/:dataLayerName/:mag @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataLayerMagDirectoryContentsPrivateLink(accessTokenOrId: String, dataLayerName: String, mag: String, zarrVersion: Int = 3) -GET /annotations/zarr3_experimental/:accessTokenOrId/:dataLayerName/:mag/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataLayerMagDirectoryContentsPrivateLink(accessTokenOrId: String, dataLayerName: String, mag: String, zarrVersion: Int = 3) -GET /annotations/zarr3_experimental/:accessTokenOrId/:dataLayerName/:mag/zarr.json @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.zarrJsonPrivateLink(accessTokenOrId: String, dataLayerName: String, mag: String) -GET /annotations/zarr3_experimental/:accessTokenOrId/:dataLayerName/:mag/:coordinates @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.rawZarrCubePrivateLink(accessTokenOrId: String, dataLayerName: String, mag: String, coordinates: String) +GET /zarr3_experimental/:datasetId @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataSourceDirectoryContents(datasetId: ObjectId, zarrVersion: Int = 3) +GET /zarr3_experimental/:datasetId/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataSourceDirectoryContents(datasetId: ObjectId, zarrVersion: Int = 3) +GET /zarr3_experimental/:datasetId/datasource-properties.json @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataSource(datasetId: ObjectId, zarrVersion: Int = 3) +GET /zarr3_experimental/:datasetId/:dataLayerName @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataLayerDirectoryContents(datasetId: ObjectId, dataLayerName: String, zarrVersion: Int = 3) +GET /zarr3_experimental/:datasetId/:dataLayerName/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataLayerDirectoryContents(datasetId: ObjectId, dataLayerName: String, zarrVersion: Int = 3) +GET /zarr3_experimental/:datasetId/:dataLayerName/zarr.json @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestZarrJson(datasetId: ObjectId, dataLayerName: String) +GET /zarr3_experimental/:datasetId/:dataLayerName/:mag @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataLayerMagDirectoryContents(datasetId: ObjectId, dataLayerName: String, mag: String, zarrVersion: Int = 3) +GET /zarr3_experimental/:datasetId/:dataLayerName/:mag/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestDataLayerMagDirectoryContents(datasetId: ObjectId, dataLayerName: String, mag: String, zarrVersion: Int = 3) +GET /zarr3_experimental/:datasetId/:dataLayerName/:mag/zarr.json @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestZarrJsonForMag(datasetId: ObjectId, dataLayerName: String, mag: String) +GET /zarr3_experimental/:datasetId/:dataLayerName/:mag/:coordinates @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.requestRawZarrCube(datasetId: ObjectId, dataLayerName: String, mag: String, coordinates: String) + +GET /annotations/zarr3_experimental/:accessTokenOrId @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataSourceDirectoryContentsPrivateLink(accessTokenOrId: String, zarrVersion: Int = 3) +GET /annotations/zarr3_experimental/:accessTokenOrId/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataSourceDirectoryContentsPrivateLink(accessTokenOrId: String, zarrVersion: Int = 3) +GET /annotations/zarr3_experimental/:accessTokenOrId/datasource-properties.json @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataSourceWithAnnotationPrivateLink(accessTokenOrId: String, zarrVersion: Int = 3) +GET /annotations/zarr3_experimental/:accessTokenOrId/:dataLayerName @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataLayerDirectoryContentsPrivateLink(accessTokenOrId: String, dataLayerName: String, zarrVersion: Int = 3) +GET /annotations/zarr3_experimental/:accessTokenOrId/:dataLayerName/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataLayerDirectoryContentsPrivateLink(accessTokenOrId: String, dataLayerName: String, zarrVersion: Int = 3) +GET /annotations/zarr3_experimental/:accessTokenOrId/:dataLayerName/zarr.json @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.zarrJsonWithAnnotationPrivateLink(accessTokenOrId: String, dataLayerName: String) +GET /annotations/zarr3_experimental/:accessTokenOrId/:dataLayerName/:mag @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataLayerMagDirectoryContentsPrivateLink(accessTokenOrId: String, dataLayerName: String, mag: String, zarrVersion: Int = 3) +GET /annotations/zarr3_experimental/:accessTokenOrId/:dataLayerName/:mag/ @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.dataLayerMagDirectoryContentsPrivateLink(accessTokenOrId: String, dataLayerName: String, mag: String, zarrVersion: Int = 3) +GET /annotations/zarr3_experimental/:accessTokenOrId/:dataLayerName/:mag/zarr.json @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.zarrJsonPrivateLink(accessTokenOrId: String, dataLayerName: String, mag: String) +GET /annotations/zarr3_experimental/:accessTokenOrId/:dataLayerName/:mag/:coordinates @com.scalableminds.webknossos.datastore.controllers.ZarrStreamingController.rawZarrCubePrivateLink(accessTokenOrId: String, dataLayerName: String, mag: String, coordinates: String) # Segmentation mappings -GET /datasets/:datasetId/layers/:dataLayerName/mappings/:mappingName @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.mappingJson(datasetId: ObjectId, dataLayerName: String, mappingName: String) -GET /datasets/:datasetId/layers/:dataLayerName/mappings @com.scalableminds.webknossos.datastore.controllers.DataSourceController.listMappings(datasetId: ObjectId, dataLayerName: String) +GET /datasets/:datasetId/layers/:dataLayerName/mappings/:mappingName @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.mappingJson(datasetId: ObjectId, dataLayerName: String, mappingName: String) +GET /datasets/:datasetId/layers/:dataLayerName/mappings @com.scalableminds.webknossos.datastore.controllers.DataSourceController.listMappings(datasetId: ObjectId, dataLayerName: String) # Agglomerate files -GET /datasets/:datasetId/layers/:dataLayerName/agglomerates @com.scalableminds.webknossos.datastore.controllers.DataSourceController.listAgglomerates(datasetId: ObjectId, dataLayerName: String) -GET /datasets/:datasetId/layers/:dataLayerName/agglomerates/:mappingName/skeleton/:agglomerateId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.generateAgglomerateSkeleton(datasetId: ObjectId, dataLayerName: String, mappingName: String, agglomerateId: Long) -GET /datasets/:datasetId/layers/:dataLayerName/agglomerates/:mappingName/agglomerateGraph/:agglomerateId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.agglomerateGraph(datasetId: ObjectId, dataLayerName: String, mappingName: String, agglomerateId: Long) -GET /datasets/:datasetId/layers/:dataLayerName/agglomerates/:mappingName/largestAgglomerateId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.largestAgglomerateId(datasetId: ObjectId, dataLayerName: String, mappingName: String) -POST /datasets/:datasetId/layers/:dataLayerName/agglomerates/:mappingName/agglomeratesForSegments @com.scalableminds.webknossos.datastore.controllers.DataSourceController.agglomerateIdsForSegmentIds(datasetId: ObjectId, dataLayerName: String, mappingName: String) -GET /datasets/:datasetId/layers/:dataLayerName/agglomerates/:mappingName/positionForSegment @com.scalableminds.webknossos.datastore.controllers.DataSourceController.positionForSegmentViaAgglomerateFile(datasetId: ObjectId, dataLayerName: String, mappingName: String, segmentId: Long) +GET /datasets/:datasetId/layers/:dataLayerName/agglomerates @com.scalableminds.webknossos.datastore.controllers.DataSourceController.listAgglomerates(datasetId: ObjectId, dataLayerName: String) +GET /datasets/:datasetId/layers/:dataLayerName/agglomerates/:mappingName/skeleton/:agglomerateId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.generateAgglomerateSkeleton(datasetId: ObjectId, dataLayerName: String, mappingName: String, agglomerateId: Long) +GET /datasets/:datasetId/layers/:dataLayerName/agglomerates/:mappingName/agglomerateGraph/:agglomerateId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.agglomerateGraph(datasetId: ObjectId, dataLayerName: String, mappingName: String, agglomerateId: Long) +GET /datasets/:datasetId/layers/:dataLayerName/agglomerates/:mappingName/largestAgglomerateId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.largestAgglomerateId(datasetId: ObjectId, dataLayerName: String, mappingName: String) +POST /datasets/:datasetId/layers/:dataLayerName/agglomerates/:mappingName/agglomeratesForSegments @com.scalableminds.webknossos.datastore.controllers.DataSourceController.agglomerateIdsForSegmentIds(datasetId: ObjectId, dataLayerName: String, mappingName: String) +GET /datasets/:datasetId/layers/:dataLayerName/agglomerates/:mappingName/positionForSegment @com.scalableminds.webknossos.datastore.controllers.DataSourceController.positionForSegmentViaAgglomerateFile(datasetId: ObjectId, dataLayerName: String, mappingName: String, segmentId: Long) # Mesh files -GET /datasets/:datasetId/layers/:dataLayerName/meshes @com.scalableminds.webknossos.datastore.controllers.DSMeshController.listMeshFiles(datasetId: ObjectId, dataLayerName: String) -POST /datasets/:datasetId/layers/:dataLayerName/meshes/chunks @com.scalableminds.webknossos.datastore.controllers.DSMeshController.listMeshChunksForSegment(datasetId: ObjectId, dataLayerName: String, targetMappingName: Option[String], editableMappingTracingId: Option[String]) -POST /datasets/:datasetId/layers/:dataLayerName/meshes/chunks/data @com.scalableminds.webknossos.datastore.controllers.DSMeshController.readMeshChunk(datasetId: ObjectId, dataLayerName: String) -POST /datasets/:datasetId/layers/:dataLayerName/meshes/fullMesh.stl @com.scalableminds.webknossos.datastore.controllers.DSMeshController.loadFullMeshStl(datasetId: ObjectId, dataLayerName: String) +GET /datasets/:datasetId/layers/:dataLayerName/meshes @com.scalableminds.webknossos.datastore.controllers.DSMeshController.listMeshFiles(datasetId: ObjectId, dataLayerName: String) +POST /datasets/:datasetId/layers/:dataLayerName/meshes/chunks @com.scalableminds.webknossos.datastore.controllers.DSMeshController.listMeshChunksForSegment(datasetId: ObjectId, dataLayerName: String, targetMappingName: Option[String], editableMappingTracingId: Option[String]) +POST /datasets/:datasetId/layers/:dataLayerName/meshes/chunks/data @com.scalableminds.webknossos.datastore.controllers.DSMeshController.readMeshChunk(datasetId: ObjectId, dataLayerName: String) +POST /datasets/:datasetId/layers/:dataLayerName/meshes/fullMesh.stl @com.scalableminds.webknossos.datastore.controllers.DSMeshController.loadFullMeshStl(datasetId: ObjectId, dataLayerName: String) # Connectome files -GET /datasets/:datasetId/layers/:dataLayerName/connectomes @com.scalableminds.webknossos.datastore.controllers.DataSourceController.listConnectomeFiles(datasetId: ObjectId, dataLayerName: String) -POST /datasets/:datasetId/layers/:dataLayerName/connectomes/synapses/positions @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSynapsePositions(datasetId: ObjectId, dataLayerName: String) -POST /datasets/:datasetId/layers/:dataLayerName/connectomes/synapses/types @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSynapseTypes(datasetId: ObjectId, dataLayerName: String) -POST /datasets/:datasetId/layers/:dataLayerName/connectomes/synapses/:direction @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSynapticPartnerForSynapses(datasetId: ObjectId, dataLayerName: String, direction: String) -POST /datasets/:datasetId/layers/:dataLayerName/connectomes/synapses @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSynapsesForAgglomerates(datasetId: ObjectId, dataLayerName: String) +GET /datasets/:datasetId/layers/:dataLayerName/connectomes @com.scalableminds.webknossos.datastore.controllers.DataSourceController.listConnectomeFiles(datasetId: ObjectId, dataLayerName: String) +POST /datasets/:datasetId/layers/:dataLayerName/connectomes/synapses/positions @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSynapsePositions(datasetId: ObjectId, dataLayerName: String) +POST /datasets/:datasetId/layers/:dataLayerName/connectomes/synapses/types @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSynapseTypes(datasetId: ObjectId, dataLayerName: String) +POST /datasets/:datasetId/layers/:dataLayerName/connectomes/synapses/:direction @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSynapticPartnerForSynapses(datasetId: ObjectId, dataLayerName: String, direction: String) +POST /datasets/:datasetId/layers/:dataLayerName/connectomes/synapses @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSynapsesForAgglomerates(datasetId: ObjectId, dataLayerName: String) # Ad-Hoc Meshing -POST /datasets/:datasetId/layers/:dataLayerName/adHocMesh @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.requestAdHocMesh(datasetId: ObjectId, dataLayerName: String) +POST /datasets/:datasetId/layers/:dataLayerName/adHocMesh @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.requestAdHocMesh(datasetId: ObjectId, dataLayerName: String) # Segment-Index files -GET /datasets/:datasetId/layers/:dataLayerName/hasSegmentIndex @com.scalableminds.webknossos.datastore.controllers.DataSourceController.checkSegmentIndexFile(datasetId: ObjectId, dataLayerName: String) -POST /datasets/:datasetId/layers/:dataLayerName/segmentIndex @com.scalableminds.webknossos.datastore.controllers.DataSourceController.querySegmentIndex(datasetId: ObjectId, dataLayerName: String) -POST /datasets/:datasetId/layers/:dataLayerName/segmentIndex/:segmentId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSegmentIndex(datasetId: ObjectId, dataLayerName: String, segmentId: String) -POST /datasets/:datasetId/layers/:dataLayerName/segmentStatistics/volume @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSegmentVolume(datasetId: ObjectId, dataLayerName: String) -POST /datasets/:datasetId/layers/:dataLayerName/segmentStatistics/boundingBox @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSegmentBoundingBox(datasetId: ObjectId, dataLayerName: String) -POST /datasets/:datasetId/layers/:dataLayerName/segmentStatistics/surfaceArea @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSegmentSurfaceArea(datasetId: ObjectId, dataLayerName: String) +GET /datasets/:datasetId/layers/:dataLayerName/hasSegmentIndex @com.scalableminds.webknossos.datastore.controllers.DataSourceController.checkSegmentIndexFile(datasetId: ObjectId, dataLayerName: String) +POST /datasets/:datasetId/layers/:dataLayerName/segmentIndex @com.scalableminds.webknossos.datastore.controllers.DataSourceController.querySegmentIndex(datasetId: ObjectId, dataLayerName: String) +POST /datasets/:datasetId/layers/:dataLayerName/segmentIndex/:segmentId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSegmentIndex(datasetId: ObjectId, dataLayerName: String, segmentId: String) +POST /datasets/:datasetId/layers/:dataLayerName/segmentStatistics/volume @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSegmentVolume(datasetId: ObjectId, dataLayerName: String) +POST /datasets/:datasetId/layers/:dataLayerName/segmentStatistics/boundingBox @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSegmentBoundingBox(datasetId: ObjectId, dataLayerName: String) +POST /datasets/:datasetId/layers/:dataLayerName/segmentStatistics/surfaceArea @com.scalableminds.webknossos.datastore.controllers.DataSourceController.getSegmentSurfaceArea(datasetId: ObjectId, dataLayerName: String) # Uploads: Datasets, mags, attachments -GET /datasets/upload/:uploadDomain @com.scalableminds.webknossos.datastore.controllers.UploadController.testChunk(resumableChunkNumber: Int, resumableIdentifier: String, uploadDomain: String) -POST /datasets/upload/:uploadDomain @com.scalableminds.webknossos.datastore.controllers.UploadController.uploadChunk(uploadDomain: String) -POST /datasets/upload/:uploadDomain/reserveUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.reserveUpload(uploadDomain: String) -GET /datasets/upload/:uploadDomain/getUnfinishedUploads @com.scalableminds.webknossos.datastore.controllers.UploadController.getUnfinishedUploads(organizationName: String, uploadDomain: String) -POST /datasets/upload/:uploadDomain/finishUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.finishUpload(uploadDomain: String) -POST /datasets/upload/:uploadDomain/cancelUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.cancelUpload(uploadDomain: String) +GET /datasets/upload/:uploadDomain @com.scalableminds.webknossos.datastore.controllers.UploadController.testChunk(resumableChunkNumber: Int, resumableIdentifier: String, uploadDomain: String) +POST /datasets/upload/:uploadDomain @com.scalableminds.webknossos.datastore.controllers.UploadController.uploadChunk(uploadDomain: String) +POST /datasets/upload/dataset/reserveUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.reserveDatasetUpload() +POST /datasets/upload/mag/reserveUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.reserveAttachmentUpload() +POST /datasets/upload/attachment/reserveUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.reserveMagUpload() +GET /datasets/upload/:uploadDomain/getUnfinishedUploads @com.scalableminds.webknossos.datastore.controllers.UploadController.getUnfinishedUploads(organizationName: String, uploadDomain: String) +POST /datasets/upload/:uploadDomain/finishUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.finishUpload(uploadDomain: String) +POST /datasets/upload/:uploadDomain/cancelUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.cancelUpload(uploadDomain: String) # DataSource management -GET /datasets/baseDirAbsolute @com.scalableminds.webknossos.datastore.controllers.DataSourceController.baseDirAbsolute -POST /datasets/measureUsedStorage/:organizationId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.measureUsedStorage(organizationId: String) -PUT /datasets/:datasetId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.updateOnDisk(datasetId: ObjectId) -DELETE /datasets/:datasetId/deleteOnDisk @com.scalableminds.webknossos.datastore.controllers.DataSourceController.deleteOnDisk(datasetId: ObjectId) -DELETE /datasets/deletePaths @com.scalableminds.webknossos.datastore.controllers.DataSourceController.deletePaths() -POST /datasets/exploreRemote @com.scalableminds.webknossos.datastore.controllers.DataSourceController.exploreRemoteDataset() -POST /datasets/validatePaths @com.scalableminds.webknossos.datastore.controllers.DataSourceController.validatePaths() -DELETE /datasets/:datasetId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.invalidateCache(datasetId: ObjectId) +GET /datasets/baseDirAbsolute @com.scalableminds.webknossos.datastore.controllers.DataSourceController.baseDirAbsolute +POST /datasets/measureUsedStorage/:organizationId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.measureUsedStorage(organizationId: String) +PUT /datasets/:datasetId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.updateOnDisk(datasetId: ObjectId) +DELETE /datasets/:datasetId/deleteOnDisk @com.scalableminds.webknossos.datastore.controllers.DataSourceController.deleteOnDisk(datasetId: ObjectId) +DELETE /datasets/deletePaths @com.scalableminds.webknossos.datastore.controllers.DataSourceController.deletePaths() +POST /datasets/exploreRemote @com.scalableminds.webknossos.datastore.controllers.DataSourceController.exploreRemoteDataset() +POST /datasets/validatePaths @com.scalableminds.webknossos.datastore.controllers.DataSourceController.validatePaths() +DELETE /datasets/:datasetId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.invalidateCache(datasetId: ObjectId) # Actions -POST /triggers/checkInboxBlocking @com.scalableminds.webknossos.datastore.controllers.DataSourceController.triggerInboxCheckBlocking(organizationId: Option[String]) -POST /triggers/createOrganizationDirectory @com.scalableminds.webknossos.datastore.controllers.DataSourceController.createOrganizationDirectory(organizationId: String) -POST /triggers/reload/:organizationId/:datasetId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.reload(organizationId: String, datasetId: ObjectId, layerName: Option[String]) +POST /triggers/checkInboxBlocking @com.scalableminds.webknossos.datastore.controllers.DataSourceController.triggerInboxCheckBlocking(organizationId: Option[String]) +POST /triggers/createOrganizationDirectory @com.scalableminds.webknossos.datastore.controllers.DataSourceController.createOrganizationDirectory(organizationId: String) +POST /triggers/reload/:organizationId/:datasetId @com.scalableminds.webknossos.datastore.controllers.DataSourceController.reload(organizationId: String, datasetId: ObjectId, layerName: Option[String]) # Exports -GET /exports/:jobId/download @com.scalableminds.webknossos.datastore.controllers.ExportsController.download(jobId: ObjectId) +GET /exports/:jobId/download @com.scalableminds.webknossos.datastore.controllers.ExportsController.download(jobId: ObjectId) # AI Models -POST /aiModels/effectiveVoxelSize @com.scalableminds.webknossos.datastore.controllers.DSAiModelController.effectiveVoxelSize +POST /aiModels/effectiveVoxelSize @com.scalableminds.webknossos.datastore.controllers.DSAiModelController.effectiveVoxelSize From bd71d24e540126658f19c46cc45acf63b99c206f Mon Sep 17 00:00:00 2001 From: Florian M Date: Thu, 2 Apr 2026 13:26:45 +0200 Subject: [PATCH 08/37] reserve mag + attachment --- .../WKRemoteDataStoreController.scala | 35 ++++++++++------- conf/webknossos.latest.routes | 6 ++- schema/schema.sql | 4 +- .../controllers/UploadController.scala | 39 ++++++++++++++++--- .../datastore/dataformats/MagLocator.scala | 7 +++- .../datasource/DataLayerAttachments.scala | 2 + .../services/DSRemoteWebknossosClient.scala | 34 +++++++++++----- .../uploading/UploadMetadataStore.scala | 14 ++++++- .../services/uploading/UploadService.scala | 38 ++++++++++++++++++ 9 files changed, 141 insertions(+), 38 deletions(-) diff --git a/app/controllers/WKRemoteDataStoreController.scala b/app/controllers/WKRemoteDataStoreController.scala index 945f61bc523..07a4177085e 100644 --- a/app/controllers/WKRemoteDataStoreController.scala +++ b/app/controllers/WKRemoteDataStoreController.scala @@ -6,18 +6,9 @@ import com.scalableminds.util.time.Instant import com.scalableminds.util.tools.Fox import com.scalableminds.webknossos.datastore.controllers.JobExportProperties import com.scalableminds.webknossos.datastore.models.UnfinishedUpload -import com.scalableminds.webknossos.datastore.models.datasource.{ - DataSource, - DataSourceId, - DataSourceStatus, - UnusableDataSource -} +import com.scalableminds.webknossos.datastore.models.datasource.{DataSource, DataSourceId, DataSourceStatus, UnusableDataSource} import com.scalableminds.webknossos.datastore.services.{DataSourcePathInfo, DataStoreStatus} -import com.scalableminds.webknossos.datastore.services.uploading.{ - ReportDatasetUploadParameters, - DatasetUploadAdditionalInfo, - DatasetUploadInfo -} +import com.scalableminds.webknossos.datastore.services.uploading.{AttachmentUploadAdditionalInfo, AttachmentUploadInfo, DatasetUploadAdditionalInfo, DatasetUploadInfo, MagUploadAdditionalInfo, MagUploadInfo, ReportDatasetUploadParameters} import com.typesafe.scalalogging.LazyLogging import models.dataset._ import models.dataset.credential.CredentialDAO @@ -90,10 +81,24 @@ class WKRemoteDataStoreController @Inject()( } } - def getUnfinishedUploadsForUser(name: String, - key: String, - token: String, - organizationId: String): Action[AnyContent] = + def reserveMagUpload(name: String, key: String, token: String): Action[MagUploadInfo] = + Action.async(validateJson[MagUploadInfo]) { implicit request => + dataStoreService.validateAccess(name, key) { dataStore => + Fox.successful(Ok(Json.toJson(MagUploadAdditionalInfo(DataSourceId("", ""))))) + } + } + + def reserveAttachmentUpload(name: String, key: String, token: String): Action[AttachmentUploadInfo] = + Action.async(validateJson[AttachmentUploadInfo]) { implicit request => + dataStoreService.validateAccess(name, key) { dataStore => + Fox.successful(Ok(Json.toJson(AttachmentUploadAdditionalInfo(DataSourceId("", ""))))) + } + } + + def getUnfinishedDatasetUploadsForUser(name: String, + key: String, + token: String, + organizationId: String): Action[AnyContent] = Action.async { implicit request => dataStoreService.validateAccess(name, key) { _ => for { diff --git a/conf/webknossos.latest.routes b/conf/webknossos.latest.routes index 29a2e8b0e21..85a2f4a97aa 100644 --- a/conf/webknossos.latest.routes +++ b/conf/webknossos.latest.routes @@ -139,8 +139,10 @@ PUT /datastores/:name/datasources/realpaths GET /datastores/:name/datasources/:datasetId controllers.WKRemoteDataStoreController.getDataSource(name: String, key: String, datasetId: ObjectId) PUT /datastores/:name/datasources/:datasetId controllers.WKRemoteDataStoreController.updateDataSource(name: String, key: String, datasetId: ObjectId) PATCH /datastores/:name/status controllers.WKRemoteDataStoreController.statusUpdate(name: String, key: String) -POST /datastores/:name/reserveUpload controllers.WKRemoteDataStoreController.reserveDatasetUpload(name: String, key: String, token: String) -GET /datastores/:name/getUnfinishedUploadsForUser controllers.WKRemoteDataStoreController.getUnfinishedUploadsForUser(name: String, key: String, token: String, organizationName: String) +POST /datastores/:name/reserveDatasetUpload controllers.WKRemoteDataStoreController.reserveDatasetUpload(name: String, key: String, token: String) +POST /datastores/:name/reserveMagUpload controllers.WKRemoteDataStoreController.reserveMagUpload(name: String, key: String, token: String) +POST /datastores/:name/reserveAttachmentUpload controllers.WKRemoteDataStoreController.reserveAttachmentUpload(name: String, key: String, token: String) +GET /datastores/:name/getUnfinishedDatasetUploadsForUser controllers.WKRemoteDataStoreController.getUnfinishedDatasetUploadsForUser(name: String, key: String, token: String, organizationName: String) POST /datastores/:name/reportDatasetUpload controllers.WKRemoteDataStoreController.reportDatasetUpload(name: String, key: String, token: String, datasetId: ObjectId) POST /datastores/:name/deleteDataset controllers.WKRemoteDataStoreController.deleteDataset(name: String, key: String) GET /datastores/:name/findDatasetId controllers.WKRemoteDataStoreController.findDatasetId(name: String, key: String, datasetDirectoryName: String, organizationId: String) diff --git a/schema/schema.sql b/schema/schema.sql index 5afab77a9fc..3482703ea07 100644 --- a/schema/schema.sql +++ b/schema/schema.sql @@ -21,7 +21,7 @@ CREATE TABLE webknossos.releaseInformation ( schemaVersion BIGINT NOT NULL ); -INSERT INTO webknossos.releaseInformation(schemaVersion) values(159); +INSERT INTO webknossos.releaseInformation(schemaVersion) values(160); COMMIT TRANSACTION; @@ -177,6 +177,7 @@ CREATE TABLE webknossos.dataset_layer_attachments( type webknossos.LAYER_ATTACHMENT_TYPE NOT NULL, dataFormat webknossos.LAYER_ATTACHMENT_DATAFORMAT NOT NULL, uploadToPathIsPending BOOLEAN NOT NULL DEFAULT FALSE, + uploadIsPending BOOLEAN NOT NULL DEFAULT FALSE, PRIMARY KEY(_dataset, layerName, name, type) ); @@ -197,6 +198,7 @@ CREATE TABLE webknossos.dataset_mags( channelIndex INT, credentialId TEXT, uploadToPathIsPending BOOLEAN NOT NULL DEFAULT FALSE, + uploadIsPending BOOLEAN NOT NULL DEFAULT FALSE, PRIMARY KEY (_dataset, dataLayerName, mag) ); diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala index 58396bfbc8c..b053f9d518b 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala @@ -7,6 +7,7 @@ import com.scalableminds.webknossos.datastore.services.{ UserAccessRequest } import com.scalableminds.webknossos.datastore.services.uploading.{ + AttachmentUploadInfo, CancelUploadInformation, DatasetUploadInfo, MagUploadInfo, @@ -15,6 +16,7 @@ import com.scalableminds.webknossos.datastore.services.uploading.{ UploadService } import com.scalableminds.webknossos.datastore.slacknotification.DSSlackNotificationService +import org.apache.pekko.http.scaladsl.model.HttpHeader.ParsingResult.Ok import play.api.data.Form import play.api.data.Forms.tuple import play.api.i18n.Messages @@ -52,13 +54,36 @@ class UploadController @Inject()( } } - def reserveMagUpload(): Action[MagUploadInfo] = Action.async(validateJson[MagUploadInfo]) { implicit request => - Fox.successful(Ok) - } + def reserveMagUpload(): Action[MagUploadInfo] = + Action.async(validateJson[MagUploadInfo]) { implicit request => + accessTokenService.validateAccessFromTokenContext(UserAccessRequest.writeDataset(request.body.datasetId)) { + for { + isKnownUpload <- uploadService.isKnownUpload(request.body.resumableUploadInfo.uploadId, UploadDomain.mag) + _ <- Fox.runIf(isKnownUpload) { + for { + reserveUploadAdditionalInfo <- dsRemoteWebknossosClient.reserveMagUpload(request.body) ?~> "dataset.upload.validation.failed" + _ <- uploadService.reserveMagUpload(request.body, reserveUploadAdditionalInfo.dataSourceId) + } yield () + } + } yield Ok + } + } - def reserveAttachmentUpload(): Action[MagUploadInfo] = Action.async(validateJson[MagUploadInfo]) { implicit request => - Fox.successful(Ok) - } + def reserveAttachmentUpload(): Action[AttachmentUploadInfo] = + Action.async(validateJson[AttachmentUploadInfo]) { implicit request => + accessTokenService.validateAccessFromTokenContext(UserAccessRequest.writeDataset(request.body.datasetId)) { + for { + isKnownUpload <- uploadService.isKnownUpload(request.body.resumableUploadInfo.uploadId, + UploadDomain.attachment) + _ <- Fox.runIf(isKnownUpload) { + for { + reserveUploadAdditionalInfo <- dsRemoteWebknossosClient.reserveAttachmentUpload(request.body) ?~> "dataset.upload.validation.failed" + _ <- uploadService.reserveAttachmentUpload(request.body, reserveUploadAdditionalInfo.dataSourceId) + } yield () + } + } yield Ok + } + } def getUnfinishedUploads(organizationName: String, uploadDomain: String): Action[AnyContent] = Action.async { implicit request => @@ -169,6 +194,7 @@ class UploadController @Inject()( } } + // TODO uploadId as GET param? def cancelUpload(uploadDomain: String): Action[CancelUploadInformation] = Action.async(validateJson[CancelUploadInformation]) { implicit request => for { @@ -180,6 +206,7 @@ class UploadController @Inject()( result <- datasetIdFox.flatMap { datasetId => accessTokenService.validateAccessFromTokenContext(UserAccessRequest.deleteDataset(datasetId)) { for { + // TODO adapt also to other domains _ <- dsRemoteWebknossosClient.deleteDataset(datasetId) ?~> "dataset.delete.webknossos.failed" _ <- uploadService.cancelUpload(request.body, uploadDomainValidated) ?~> "Could not cancel the upload." } yield Ok diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/MagLocator.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/MagLocator.scala index 0db10442e2b..78663b89836 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/MagLocator.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/MagLocator.scala @@ -3,7 +3,7 @@ package com.scalableminds.webknossos.datastore.dataformats import com.scalableminds.util.geometry.Vec3Int import com.scalableminds.webknossos.datastore.datareaders.AxisOrder import com.scalableminds.webknossos.datastore.helpers.UPath -import com.scalableminds.webknossos.datastore.models.datasource.MagFormatHelper +import com.scalableminds.webknossos.datastore.models.datasource.{LayerAttachment, MagFormatHelper} import com.scalableminds.webknossos.datastore.storage.LegacyDataVaultCredential import play.api.libs.json.{Json, OFormat} @@ -12,7 +12,10 @@ case class MagLocator(mag: Vec3Int, credentials: Option[LegacyDataVaultCredential] = None, axisOrder: Option[AxisOrder] = None, channelIndex: Option[Int] = None, - credentialId: Option[String] = None) + credentialId: Option[String] = None) { + + def withoutCredentials: MagLocator = this.copy(credentials = None, credentialId = None) +} object MagLocator extends MagFormatHelper { implicit val jsonFormat: OFormat[MagLocator] = Json.format[MagLocator] diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataLayerAttachments.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataLayerAttachments.scala index 8f44b048fb5..496fedb4d3e 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataLayerAttachments.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataLayerAttachments.scala @@ -158,6 +158,8 @@ case class LayerAttachment(name: String, def relativizedIn(dataSourcePath: UPath): LayerAttachment = this.copy(path = this.path.relativizedIn(dataSourcePath)) + + def withoutCredential: LayerAttachment = this.copy(credentialId = None) } object LayerAttachment { diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala index 66176f27450..e22d97dea03 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala @@ -15,9 +15,13 @@ import com.scalableminds.webknossos.datastore.models.annotation.AnnotationSource import com.scalableminds.webknossos.datastore.models.datasource.{DataSource, DataSourceId} import com.scalableminds.webknossos.datastore.rpc.RPC import com.scalableminds.webknossos.datastore.services.uploading.{ - ReportDatasetUploadParameters, + AttachmentUploadAdditionalInfo, + AttachmentUploadInfo, DatasetUploadAdditionalInfo, - DatasetUploadInfo + DatasetUploadInfo, + MagUploadAdditionalInfo, + MagUploadInfo, + ReportDatasetUploadParameters } import com.scalableminds.webknossos.datastore.storage.DataVaultCredential import com.typesafe.scalalogging.LazyLogging @@ -117,14 +121,24 @@ class DSRemoteWebknossosClient @Inject()( .silent .putJson(dataSourcePaths) - def reserveDatasetUpload(info: DatasetUploadInfo)( - implicit tc: TokenContext): Fox[DatasetUploadAdditionalInfo] = - for { - reserveUploadInfo <- rpc(s"$webknossosUri/api/datastores/$dataStoreName/reserveUpload") - .addQueryParam("key", dataStoreKey) - .withTokenFromContext - .postJsonWithJsonResponse[DatasetUploadInfo, DatasetUploadAdditionalInfo](info) - } yield reserveUploadInfo + def reserveDatasetUpload(info: DatasetUploadInfo)(implicit tc: TokenContext): Fox[DatasetUploadAdditionalInfo] = + rpc(s"$webknossosUri/api/datastores/$dataStoreName/reserveUpload") + .addQueryParam("key", dataStoreKey) + .withTokenFromContext + .postJsonWithJsonResponse[DatasetUploadInfo, DatasetUploadAdditionalInfo](info) + + def reserveMagUpload(info: MagUploadInfo)(implicit tc: TokenContext): Fox[MagUploadAdditionalInfo] = + rpc(s"$webknossosUri/api/datastores/$dataStoreName/reserveMagUpload") + .addQueryParam("key", dataStoreKey) + .withTokenFromContext + .postJsonWithJsonResponse[MagUploadInfo, MagUploadAdditionalInfo](info) + + def reserveAttachmentUpload(info: AttachmentUploadInfo)( + implicit tc: TokenContext): Fox[AttachmentUploadAdditionalInfo] = + rpc(s"$webknossosUri/api/datastores/$dataStoreName/reserveAttachmentUpload") + .addQueryParam("key", dataStoreKey) + .withTokenFromContext + .postJsonWithJsonResponse[AttachmentUploadInfo, AttachmentUploadAdditionalInfo](info) def updateDataSource(dataSource: DataSource, datasetId: ObjectId)(implicit tc: TokenContext): Fox[_] = rpc(s"$webknossosUri/api/datastores/$dataStoreName/datasources/${datasetId.toString}") diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala index d9b24e0a7e0..2fb1c176545 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala @@ -2,7 +2,9 @@ package com.scalableminds.webknossos.datastore.services.uploading import com.scalableminds.util.objectid.ObjectId import com.scalableminds.util.tools.{Fox, FoxImplicits} -import com.scalableminds.webknossos.datastore.models.datasource.DataSourceId +import com.scalableminds.webknossos.datastore.dataformats.MagLocator +import com.scalableminds.webknossos.datastore.models.datasource.LayerAttachmentType.LayerAttachmentType +import com.scalableminds.webknossos.datastore.models.datasource.{DataSourceId, LayerAttachment} import com.scalableminds.webknossos.datastore.services.uploading.UploadDomain.UploadDomain import com.scalableminds.webknossos.datastore.storage.DataStoreRedisStore import play.api.libs.json.Json @@ -132,7 +134,7 @@ trait UploadMetadataStore extends FoxImplicits { } class DatasetUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) extends UploadMetadataStore { - protected val domain = UploadDomain.dataset + protected val domain: UploadDomain = UploadDomain.dataset private def redisKeyForUploadIdByDataSourceId(datasourceId: DataSourceId): String = s"upload___${Json.stringify(Json.toJson(datasourceId))}___datasourceId" @@ -168,9 +170,17 @@ class DatasetUploadMetadataStore @Inject()(protected val store: DataStoreRedisSt class MagUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) extends UploadMetadataStore { protected val domain: UploadDomain = UploadDomain.mag + def insertMag(uploadId: String, mag: MagLocator): Fox[Unit] = + store.insertSerialized[MagLocator](uploadId, mag) } class AttachmentUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) extends UploadMetadataStore { protected val domain: UploadDomain = UploadDomain.attachment + def insertAttachment(uploadId: String, attachment: LayerAttachment): Fox[Unit] = + store.insertSerialized[LayerAttachment](uploadId, attachment) + + def insertAttachmentType(uploadId: String, attachmentType: LayerAttachmentType): Fox[Unit] = + store.insertSerialized[LayerAttachmentType](uploadId, attachmentType) + } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index 1dfe91d380a..495195027e8 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -19,6 +19,7 @@ import com.scalableminds.webknossos.datastore.datareaders.zarr3.Zarr3ArrayHeader import com.scalableminds.webknossos.datastore.explore.ExploreLocalLayerService import com.scalableminds.webknossos.datastore.helpers.{DatasetDeleter, DirectoryConstants, UPath} import com.scalableminds.webknossos.datastore.models.UnfinishedUpload +import com.scalableminds.webknossos.datastore.models.datasource.LayerAttachmentType.LayerAttachmentType import com.scalableminds.webknossos.datastore.models.datasource.UsableDataSource.FILENAME_DATASOURCE_PROPERTIES_JSON import com.scalableminds.webknossos.datastore.models.datasource._ import com.scalableminds.webknossos.datastore.services.uploading.UploadDomain.UploadDomain @@ -70,6 +71,16 @@ object MagUploadInfo { implicit val jsonFormat: OFormat[MagUploadInfo] = Json.format[MagUploadInfo] } +case class AttachmentUploadInfo( + resumableUploadInfo: ResumableUploadInfo, + datasetId: ObjectId, + attachmentType: LayerAttachmentType, + attachment: LayerAttachment +) +object AttachmentUploadInfo { + implicit val jsonFormat: OFormat[AttachmentUploadInfo] = Json.format[AttachmentUploadInfo] +} + case class DatasetUploadAdditionalInfo(newDatasetId: ObjectId, directoryName: String) object DatasetUploadAdditionalInfo { implicit val jsonFormat: OFormat[DatasetUploadAdditionalInfo] = @@ -82,6 +93,12 @@ object MagUploadAdditionalInfo { Json.format[MagUploadAdditionalInfo] } +case class AttachmentUploadAdditionalInfo(dataSourceId: DataSourceId) +object AttachmentUploadAdditionalInfo { + implicit val jsonFormat: OFormat[AttachmentUploadAdditionalInfo] = + Json.format[AttachmentUploadAdditionalInfo] +} + case class ReportDatasetUploadParameters( needsConversion: Boolean, datasetSizeBytes: Long, @@ -167,6 +184,27 @@ class UploadService @Inject()(dataSourceService: DataSourceService, } yield () } + def reserveMagUpload(magUploadInfo: MagUploadInfo, dataSourceId: DataSourceId): Fox[Unit] = + for { + _ <- reserveResumableUpload(magUploadInfo.resumableUploadInfo, + magUploadInfo.datasetId, + dataSourceId, + UploadDomain.mag) + uploadId = magUploadInfo.resumableUploadInfo.uploadId + _ <- magUploadMetadataStore.insertMag(uploadId, magUploadInfo.mag.withoutCredentials) + } yield () + + def reserveAttachmentUpload(attachmentUploadInfo: AttachmentUploadInfo, dataSourceId: DataSourceId): Fox[Unit] = + for { + _ <- reserveResumableUpload(attachmentUploadInfo.resumableUploadInfo, + attachmentUploadInfo.datasetId, + dataSourceId, + UploadDomain.attachment) + uploadId = attachmentUploadInfo.resumableUploadInfo.uploadId + _ <- attachmentUploadMetadataStore.insertAttachment(uploadId, attachmentUploadInfo.attachment.withoutCredential) + _ <- attachmentUploadMetadataStore.insertAttachmentType(uploadId, attachmentUploadInfo.attachmentType) + } yield () + private def reserveResumableUpload(resumableUploadInfo: ResumableUploadInfo, datasetId: ObjectId, dataSourceId: DataSourceId, From b1ae1261e8c6b276f2c3ca8260c3651257d39b0b Mon Sep 17 00:00:00 2001 From: Florian M Date: Thu, 2 Apr 2026 13:39:09 +0200 Subject: [PATCH 09/37] insert mag + attachment details into redis --- .../WKRemoteDataStoreController.scala | 28 +++++++++++++++-- .../uploading/UploadMetadataStore.scala | 30 +++++++++++++++---- .../services/uploading/UploadService.scala | 4 +++ 3 files changed, 54 insertions(+), 8 deletions(-) diff --git a/app/controllers/WKRemoteDataStoreController.scala b/app/controllers/WKRemoteDataStoreController.scala index 07a4177085e..b7932de7893 100644 --- a/app/controllers/WKRemoteDataStoreController.scala +++ b/app/controllers/WKRemoteDataStoreController.scala @@ -6,9 +6,22 @@ import com.scalableminds.util.time.Instant import com.scalableminds.util.tools.Fox import com.scalableminds.webknossos.datastore.controllers.JobExportProperties import com.scalableminds.webknossos.datastore.models.UnfinishedUpload -import com.scalableminds.webknossos.datastore.models.datasource.{DataSource, DataSourceId, DataSourceStatus, UnusableDataSource} +import com.scalableminds.webknossos.datastore.models.datasource.{ + DataSource, + DataSourceId, + DataSourceStatus, + UnusableDataSource +} import com.scalableminds.webknossos.datastore.services.{DataSourcePathInfo, DataStoreStatus} -import com.scalableminds.webknossos.datastore.services.uploading.{AttachmentUploadAdditionalInfo, AttachmentUploadInfo, DatasetUploadAdditionalInfo, DatasetUploadInfo, MagUploadAdditionalInfo, MagUploadInfo, ReportDatasetUploadParameters} +import com.scalableminds.webknossos.datastore.services.uploading.{ + AttachmentUploadAdditionalInfo, + AttachmentUploadInfo, + DatasetUploadAdditionalInfo, + DatasetUploadInfo, + MagUploadAdditionalInfo, + MagUploadInfo, + ReportDatasetUploadParameters +} import com.typesafe.scalalogging.LazyLogging import models.dataset._ import models.dataset.credential.CredentialDAO @@ -84,7 +97,16 @@ class WKRemoteDataStoreController @Inject()( def reserveMagUpload(name: String, key: String, token: String): Action[MagUploadInfo] = Action.async(validateJson[MagUploadInfo]) { implicit request => dataStoreService.validateAccess(name, key) { dataStore => - Fox.successful(Ok(Json.toJson(MagUploadAdditionalInfo(DataSourceId("", ""))))) + // DS write access was asserted already at this point. + implicit val ctx: DBAccessContext = GlobalAccessContext + for { + dataset <- datasetDAO.findOne(request.body.datasetId) + (dataSource, dataLayer) <- datasetService.getDataSourceAndLayerFor(dataset, request.body.layerName) + _ <- Fox.fromBool(dataset._dataStore == dataStore.name) ?~> "Cannot upload mag to existing dataset via different datastore." + } yield Ok(Json.toJson(MagUploadAdditionalInfo(DataSourceId("", "")))) + // DS must not have the mag + // insert the mag + // return existing datasource id } } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala index 2fb1c176545..1d17fdfa111 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala @@ -137,10 +137,10 @@ class DatasetUploadMetadataStore @Inject()(protected val store: DataStoreRedisSt protected val domain: UploadDomain = UploadDomain.dataset private def redisKeyForUploadIdByDataSourceId(datasourceId: DataSourceId): String = - s"upload___${Json.stringify(Json.toJson(datasourceId))}___datasourceId" + s"${keyPrefix}___${Json.stringify(Json.toJson(datasourceId))}___datasourceId" private def redisKeyForLinkedLayerIdentifier(uploadId: String): String = - s"upload___${uploadId}___linkedLayerIdentifier" + s"$keyPrefix${uploadId}___linkedLayerIdentifier" // TODO make this Fox[String]? def findUploadIdByDataSourceId(dataSourceId: DataSourceId): Fox[Option[String]] = @@ -170,17 +170,37 @@ class DatasetUploadMetadataStore @Inject()(protected val store: DataStoreRedisSt class MagUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) extends UploadMetadataStore { protected val domain: UploadDomain = UploadDomain.mag + private def redisKeyForMag(uploadId: String): String = + s"$keyPrefix${uploadId}___mag" + + private def redisKeyForLayerName(uploadId: String): String = + s"$keyPrefix${uploadId}___layerName" + def insertMag(uploadId: String, mag: MagLocator): Fox[Unit] = - store.insertSerialized[MagLocator](uploadId, mag) + store.insertSerialized[MagLocator](redisKeyForMag(uploadId), mag) + + def insertLayerName(uploadId: String, layerName: String): Fox[Unit] = + store.insert(redisKeyForLayerName(uploadId), layerName) } class AttachmentUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) extends UploadMetadataStore { protected val domain: UploadDomain = UploadDomain.attachment + private def redisKeyForAttachment(uploadId: String): String = + s"$keyPrefix${uploadId}___attachment" + + private def redisKeyForAttachmentType(uploadId: String): String = + s"$keyPrefix${uploadId}___attachmentType" + + private def redisKeyForLayerName(uploadId: String): String = + s"$keyPrefix${uploadId}___layerName" + def insertAttachment(uploadId: String, attachment: LayerAttachment): Fox[Unit] = - store.insertSerialized[LayerAttachment](uploadId, attachment) + store.insertSerialized[LayerAttachment](redisKeyForAttachment(uploadId), attachment) def insertAttachmentType(uploadId: String, attachmentType: LayerAttachmentType): Fox[Unit] = - store.insertSerialized[LayerAttachmentType](uploadId, attachmentType) + store.insertSerialized[LayerAttachmentType](redisKeyForAttachmentType(uploadId), attachmentType) + def insertLayerName(uploadId: String, layerName: String): Fox[Unit] = + store.insert(redisKeyForLayerName(uploadId), layerName) } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index 495195027e8..b14812dbd8a 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -65,6 +65,7 @@ object DatasetUploadInfo { case class MagUploadInfo( resumableUploadInfo: ResumableUploadInfo, datasetId: ObjectId, + layerName: String, mag: MagLocator ) object MagUploadInfo { @@ -74,6 +75,7 @@ object MagUploadInfo { case class AttachmentUploadInfo( resumableUploadInfo: ResumableUploadInfo, datasetId: ObjectId, + layerName: String, attachmentType: LayerAttachmentType, attachment: LayerAttachment ) @@ -192,6 +194,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, UploadDomain.mag) uploadId = magUploadInfo.resumableUploadInfo.uploadId _ <- magUploadMetadataStore.insertMag(uploadId, magUploadInfo.mag.withoutCredentials) + _ <- magUploadMetadataStore.insertLayerName(uploadId, magUploadInfo.layerName) } yield () def reserveAttachmentUpload(attachmentUploadInfo: AttachmentUploadInfo, dataSourceId: DataSourceId): Fox[Unit] = @@ -203,6 +206,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, uploadId = attachmentUploadInfo.resumableUploadInfo.uploadId _ <- attachmentUploadMetadataStore.insertAttachment(uploadId, attachmentUploadInfo.attachment.withoutCredential) _ <- attachmentUploadMetadataStore.insertAttachmentType(uploadId, attachmentUploadInfo.attachmentType) + _ <- magUploadMetadataStore.insertLayerName(uploadId, attachmentUploadInfo.layerName) } yield () private def reserveResumableUpload(resumableUploadInfo: ResumableUploadInfo, From c90b1c9d37443c941ebb2d4016a0b7f753e3cc6d Mon Sep 17 00:00:00 2001 From: Florian M Date: Thu, 2 Apr 2026 14:36:25 +0200 Subject: [PATCH 10/37] rename dao --- app/controllers/DatasetController.scala | 2 +- app/models/dataset/Dataset.scala | 4 ++-- app/models/dataset/DatasetService.scala | 2 +- app/models/dataset/UploadToPathsService.scala | 2 +- app/models/storage/UsedStorageService.scala | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/app/controllers/DatasetController.scala b/app/controllers/DatasetController.scala index 5b6e0ffdb61..713de647719 100755 --- a/app/controllers/DatasetController.scala +++ b/app/controllers/DatasetController.scala @@ -173,7 +173,7 @@ class DatasetController @Inject()(userService: UserService, thumbnailCachingService: ThumbnailCachingService, usedStorageService: UsedStorageService, conf: WkConf, - datasetMagsDAO: DatasetMagsDAO, + datasetMagsDAO: DatasetMagDAO, slackNotificationService: SlackNotificationService, authenticationService: AccessibleBySwitchingService, analyticsService: AnalyticsService, diff --git a/app/models/dataset/Dataset.scala b/app/models/dataset/Dataset.scala index 9dfc460145e..5d20d4895ae 100755 --- a/app/models/dataset/Dataset.scala +++ b/app/models/dataset/Dataset.scala @@ -792,7 +792,7 @@ case class DataSourceMagRow(_dataset: ObjectId, _organization: String, directoryName: String) -class DatasetMagsDAO @Inject()(sqlClient: SqlClient)(implicit ec: ExecutionContext) +class DatasetMagDAO @Inject()(sqlClient: SqlClient)(implicit ec: ExecutionContext) extends SQLDAO[MagWithPaths, DatasetMagsRow, DatasetMags](sqlClient) { protected val collection = DatasetMags @@ -1018,7 +1018,7 @@ class DatasetMagsDAO @Inject()(sqlClient: SqlClient)(implicit ec: ExecutionConte } class DatasetLayerDAO @Inject()(sqlClient: SqlClient, - datasetMagsDAO: DatasetMagsDAO, + datasetMagsDAO: DatasetMagDAO, datasetCoordinateTransformationsDAO: DatasetCoordinateTransformationsDAO, datasetLayerAdditionalAxesDAO: DatasetLayerAdditionalAxesDAO, datasetLayerAttachmentsDAO: DatasetLayerAttachmentsDAO)(implicit ec: ExecutionContext) diff --git a/app/models/dataset/DatasetService.scala b/app/models/dataset/DatasetService.scala index c0656d8c663..c2536dbba63 100644 --- a/app/models/dataset/DatasetService.scala +++ b/app/models/dataset/DatasetService.scala @@ -49,7 +49,7 @@ class DatasetService @Inject()(organizationDAO: OrganizationDAO, dataStoreDAO: DataStoreDAO, datasetLastUsedTimesDAO: DatasetLastUsedTimesDAO, datasetDataLayerDAO: DatasetLayerDAO, - datasetMagsDAO: DatasetMagsDAO, + datasetMagsDAO: DatasetMagDAO, datasetLayerAttachmentsDAO: DatasetLayerAttachmentsDAO, teamDAO: TeamDAO, folderDAO: FolderDAO, diff --git a/app/models/dataset/UploadToPathsService.scala b/app/models/dataset/UploadToPathsService.scala index 8a0290c203c..8497f5892f7 100644 --- a/app/models/dataset/UploadToPathsService.scala +++ b/app/models/dataset/UploadToPathsService.scala @@ -45,7 +45,7 @@ class UploadToPathsService @Inject()(datasetService: DatasetService, dataStoreDAO: DataStoreDAO, layerToLinkService: LayerToLinkService, datasetLayerAttachmentsDAO: DatasetLayerAttachmentsDAO, - datasetMagsDAO: DatasetMagsDAO, + datasetMagsDAO: DatasetMagDAO, pathDeletionService: PathDeletionService, folderDAO: FolderDAO, conf: WkConf) diff --git a/app/models/storage/UsedStorageService.scala b/app/models/storage/UsedStorageService.scala index d4a4f9db436..d584d9a6c0e 100644 --- a/app/models/storage/UsedStorageService.scala +++ b/app/models/storage/UsedStorageService.scala @@ -15,7 +15,7 @@ import models.dataset.{ DataStoreDAO, Dataset, DatasetLayerAttachmentsDAO, - DatasetMagsDAO, + DatasetMagDAO, StorageRelevantDataLayerAttachment, WKRemoteDataStoreClient } @@ -34,7 +34,7 @@ class UsedStorageService @Inject()(val actorSystem: ActorSystem, val lifecycle: ApplicationLifecycle, organizationDAO: OrganizationDAO, dataStoreDAO: DataStoreDAO, - datasetMagDAO: DatasetMagsDAO, + datasetMagDAO: DatasetMagDAO, datasetLayerAttachmentsDAO: DatasetLayerAttachmentsDAO, rpc: RPC, config: WkConf)(implicit val ec: ExecutionContext) From 14181c48b6469aae9c634643c3f3e85b20affaf2 Mon Sep 17 00:00:00 2001 From: Florian M Date: Tue, 7 Apr 2026 09:26:24 +0200 Subject: [PATCH 11/37] evolution --- schema/evolutions/160-upload-mags-attachments.sql | 10 ++++++++++ .../reversions/160-upload-mags-attachments.sql | 10 ++++++++++ 2 files changed, 20 insertions(+) create mode 100644 schema/evolutions/160-upload-mags-attachments.sql create mode 100644 schema/evolutions/reversions/160-upload-mags-attachments.sql diff --git a/schema/evolutions/160-upload-mags-attachments.sql b/schema/evolutions/160-upload-mags-attachments.sql new file mode 100644 index 00000000000..31bc14c0449 --- /dev/null +++ b/schema/evolutions/160-upload-mags-attachments.sql @@ -0,0 +1,10 @@ +START TRANSACTION; + +do $$ begin if (select schemaVersion from webknossos.releaseInformation) <> 159 then raise exception 'Previous schema version mismatch'; end if; end; $$ language plpgsql; + +ALTER TABLE webknossos.dataset_layer_attachments ADD COLUMN uploadIsPending BOOLEAN NOT NULL DEFAULT FALSE; +ALTER TABLE webknossos.dataset_mags ADD COLUMN uploadIsPending BOOLEAN NOT NULL DEFAULT FALSE; + +UPDATE webknossos.releaseInformation SET schemaVersion = 160; + +COMMIT TRANSACTION; diff --git a/schema/evolutions/reversions/160-upload-mags-attachments.sql b/schema/evolutions/reversions/160-upload-mags-attachments.sql new file mode 100644 index 00000000000..03e493d2dd2 --- /dev/null +++ b/schema/evolutions/reversions/160-upload-mags-attachments.sql @@ -0,0 +1,10 @@ +START TRANSACTION; + +do $$ begin if (select schemaVersion from webknossos.releaseInformation) <> 159 then raise exception 'Previous schema version mismatch'; end if; end; $$ language plpgsql; + +ALTER TABLE webknossos.dataset_layer_attachments DROP COLUMN uploadIsPending; +ALTER TABLE webknossos.dataset_mags DROP COLUMN uploadIsPending; + +UPDATE webknossos.releaseInformation SET schemaVersion = 160; + +COMMIT TRANSACTION; From 638cdbd882d3db9d416119cd86022ca11992413d Mon Sep 17 00:00:00 2001 From: Florian M Date: Tue, 7 Apr 2026 09:27:27 +0200 Subject: [PATCH 12/37] changelog --- unreleased_changes/9402.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 unreleased_changes/9402.md diff --git a/unreleased_changes/9402.md b/unreleased_changes/9402.md new file mode 100644 index 00000000000..fbcc74313d5 --- /dev/null +++ b/unreleased_changes/9402.md @@ -0,0 +1,5 @@ +### Added +- Added routes for uploading attachments and mags to existing datasets via the python libs client. + +### Postgres Evolutions +- [131-more-indices-on-users.sql](schema/evolutions/131-more-indices-on-users.sql) From de4c2a326a5b84ab21d114eab81a79c64a152c0b Mon Sep 17 00:00:00 2001 From: Florian M Date: Tue, 7 Apr 2026 09:41:55 +0200 Subject: [PATCH 13/37] respect uploadIsPending bool in DAOs --- app/controllers/DatasetController.scala | 4 +- app/models/dataset/Dataset.scala | 106 +++++++++++------- app/models/dataset/UploadToPathsService.scala | 18 +-- 3 files changed, 78 insertions(+), 50 deletions(-) diff --git a/app/controllers/DatasetController.scala b/app/controllers/DatasetController.scala index 713de647719..8442d52f886 100755 --- a/app/controllers/DatasetController.scala +++ b/app/controllers/DatasetController.scala @@ -726,7 +726,7 @@ class DatasetController @Inject()(userService: UserService, for { dataset <- datasetDAO.findOne(datasetId) ?~> notFoundMessage(datasetId.toString) ~> NOT_FOUND _ <- Fox.assertTrue(datasetService.isEditableBy(dataset, Some(request.identity))) ?~> "notAllowed" ~> FORBIDDEN - _ <- datasetMagsDAO.finishUploadToPath(datasetId, request.body.layerName, request.body.mag) + _ <- datasetMagsDAO.finishUploadOrUploadToPath(datasetId, request.body.layerName, request.body.mag) dataStoreClient <- datasetService.clientFor(dataset) _ <- Fox.runIf(!dataset.isVirtual) { for { @@ -753,7 +753,7 @@ class DatasetController @Inject()(userService: UserService, for { dataset <- datasetDAO.findOne(datasetId) ?~> notFoundMessage(datasetId.toString) ~> NOT_FOUND _ <- Fox.assertTrue(datasetService.isEditableBy(dataset, Some(request.identity))) ?~> "notAllowed" ~> FORBIDDEN - _ <- datasetLayerAttachmentsDAO.finishUploadToPath(datasetId, + _ <- datasetLayerAttachmentsDAO.finishUploadOrUploadToPath(datasetId, request.body.layerName, request.body.attachmentName, request.body.attachmentType) diff --git a/app/models/dataset/Dataset.scala b/app/models/dataset/Dataset.scala index c03972f5ca2..5e45808898c 100755 --- a/app/models/dataset/Dataset.scala +++ b/app/models/dataset/Dataset.scala @@ -801,8 +801,8 @@ class DatasetMagDAO @Inject()(sqlClient: SqlClient)(implicit ec: ExecutionContex def findMagLocatorsForLayer(datasetId: ObjectId, dataLayerName: String): Fox[List[MagLocator]] = for { rows <- run( - q"""SELECT _dataset, dataLayerName, mag, path, realPath, hasLocalData, axisOrder, channelIndex, credentialId, uploadToPathIsPending - FROM webknossos.dataset_mags WHERE _dataset = $datasetId AND dataLayerName = $dataLayerName AND NOT uploadToPathIsPending""" + q"""SELECT _dataset, dataLayerName, mag, path, realPath, hasLocalData, axisOrder, channelIndex, credentialId, uploadToPathIsPending, uploadIsPending + FROM webknossos.dataset_mags WHERE _dataset = $datasetId AND dataLayerName = $dataLayerName AND NOT uploadToPathIsPending AND NOT uploadIsPending""" .as[DatasetMagsRow]) magLocators <- Fox.combined(rows.map(parseMagLocator)) } yield magLocators @@ -841,11 +841,11 @@ class DatasetMagDAO @Inject()(sqlClient: SqlClient)(implicit ec: ExecutionContex def updateMags(datasetId: ObjectId, dataLayers: List[StaticLayer]): Fox[Unit] = { val clearQuery = - q"DELETE FROM webknossos.dataset_mags WHERE _dataset = $datasetId AND NOT uploadToPathIsPending".asUpdate + q"DELETE FROM webknossos.dataset_mags WHERE _dataset = $datasetId AND NOT uploadToPathIsPending AND NOT uploadIsPending".asUpdate val insertQueries = dataLayers.flatMap { layer: StaticLayer => layer.mags.map { mag => - q"""INSERT INTO webknossos.dataset_mags(_dataset, dataLayerName, mag, path, axisOrder, channelIndex, credentialId, uploadToPathIsPending) - VALUES($datasetId, ${layer.name}, ${mag.mag}, ${mag.path}, ${mag.axisOrder.map(Json.toJson(_))}, ${mag.channelIndex}, ${mag.credentialId}, ${false}) + q"""INSERT INTO webknossos.dataset_mags(_dataset, dataLayerName, mag, path, axisOrder, channelIndex, credentialId, uploadToPathIsPending, uploadIsPending) + VALUES($datasetId, ${layer.name}, ${mag.mag}, ${mag.path}, ${mag.axisOrder.map(Json.toJson(_))}, ${mag.channelIndex}, ${mag.credentialId}, ${false}, ${false}) """.asUpdate } } @@ -953,32 +953,45 @@ class DatasetMagDAO @Inject()(sqlClient: SqlClient)(implicit ec: ExecutionContex row.credentialid ) - def insertPending(datasetId: ObjectId, - layerName: String, - mag: Vec3Int, - axisOrder: Option[AxisOrder], - channelIndex: Option[Int], - path: UPath): Fox[Unit] = + def insertWithUploadToPathPending(datasetId: ObjectId, + layerName: String, + mag: Vec3Int, + axisOrder: Option[AxisOrder], + channelIndex: Option[Int], + path: UPath): Fox[Unit] = for { _ <- run( - q"""INSERT INTO webknossos.dataset_mags(_dataset, dataLayerName, mag, path, axisOrder, channelIndex, uploadToPathIsPending) - VALUES($datasetId, $layerName, $mag, $path, ${axisOrder.map(Json.toJson(_))}, $channelIndex, ${true}) - """.asUpdate) + q"""INSERT INTO webknossos.dataset_mags(_dataset, dataLayerName, mag, path, axisOrder, channelIndex, uploadToPathIsPending, uploadIsPending) + VALUES($datasetId, $layerName, $mag, $path, ${axisOrder.map(Json.toJson(_))}, $channelIndex, ${true}, ${false}) + """.asUpdate) + } yield () + + def insertWithUploadPending(datasetId: ObjectId, + layerName: String, + mag: Vec3Int, + axisOrder: Option[AxisOrder], + channelIndex: Option[Int], + path: UPath): Fox[Unit] = + for { + _ <- run( + q"""INSERT INTO webknossos.dataset_mags(_dataset, dataLayerName, mag, path, axisOrder, channelIndex, uploadToPathIsPending, uploadIsPending) + VALUES($datasetId, $layerName, $mag, $path, ${axisOrder + .map(Json.toJson(_))}, $channelIndex, ${false}, ${true})""".asUpdate) } yield () - def finishUploadToPath(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[Unit] = + def finishUploadOrUploadToPath(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[Unit] = for { _ <- run( q"""UPDATE webknossos.dataset_mags - SET uploadToPathIsPending = ${false} + SET uploadToPathIsPending = ${false}, + uploadToPath = ${false}, WHERE _dataset = $datasetId AND dataLayerName = $layerName - AND mag = $mag::webknossos.VECTOR3 - AND uploadToPathIsPending""".asUpdate + AND mag = $mag::webknossos.VECTOR3""".asUpdate ) } yield () - def findPendingMagLocatorPath(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[UPath] = + def findMagLocatorPathWithPendingUploadToPath(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[UPath] = for { rows <- run(q"""SELECT path FROM webknossos.dataset_mags @@ -992,7 +1005,7 @@ class DatasetMagDAO @Inject()(sqlClient: SqlClient)(implicit ec: ExecutionContex firstAsUpath <- UPath.fromString(first).toFox } yield firstAsUpath - def deletePendingMagLocator(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[Unit] = + def deleteMagLocatorWithUploadToPathPending(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[Unit] = for { _ <- run(q"""DELETE FROM webknossos.dataset_mags WHERE _dataset = $datasetId @@ -1219,25 +1232,26 @@ class DatasetLayerAttachmentsDAO @Inject()(sqlClient: SqlClient)(implicit ec: Ex def findAllForDatasetAndDataLayerName(datasetId: ObjectId, layerName: String): Fox[AttachmentWrapper] = for { rows <- run( - q"""SELECT _dataset, layerName, name, path, realpath, hasLocalData, type, dataFormat, uploadToPathIsPending + q"""SELECT _dataset, layerName, name, path, realpath, hasLocalData, type, dataFormat, uploadToPathIsPending, uploadIsPending FROM webknossos.dataset_layer_attachments WHERE _dataset = $datasetId AND layerName = $layerName - AND NOT uploadToPathIsPending""".as[DatasetLayerAttachmentsRow]) + AND NOT uploadToPathIsPending + AND NOT uploadIsPending""".as[DatasetLayerAttachmentsRow]) attachments <- parseAttachments(rows.toList) ?~> "Could not parse attachments" } yield attachments def updateAttachments(datasetId: ObjectId, dataLayers: List[StaticLayer]): Fox[Unit] = { def insertQuery(attachment: LayerAttachment, layerName: String, attachmentType: LayerAttachmentType.Value) = { val query = - q"""INSERT INTO webknossos.dataset_layer_attachments(_dataset, layerName, name, path, type, dataFormat, uploadToPathIsPending) + q"""INSERT INTO webknossos.dataset_layer_attachments(_dataset, layerName, name, path, type, dataFormat, uploadToPathIsPending, uploadIsPending) VALUES($datasetId, $layerName, ${attachment.name}, ${attachment.path}, $attachmentType::webknossos.LAYER_ATTACHMENT_TYPE, - ${attachment.dataFormat}::webknossos.LAYER_ATTACHMENT_DATAFORMAT, ${false})""" + ${attachment.dataFormat}::webknossos.LAYER_ATTACHMENT_DATAFORMAT, ${false}, ${false})""" query.asUpdate } val clearQuery = - q"DELETE FROM webknossos.dataset_layer_attachments WHERE _dataset = $datasetId AND NOT uploadToPathIsPending".asUpdate + q"DELETE FROM webknossos.dataset_layer_attachments WHERE _dataset = $datasetId AND NOT uploadToPathIsPending AND NOT uploadIsPending".asUpdate val insertQueries = dataLayers.flatMap { layer: StaticLayer => layer.attachments match { case Some(attachments) => @@ -1277,17 +1291,30 @@ class DatasetLayerAttachmentsDAO @Inject()(sqlClient: SqlClient)(implicit ec: Ex ) } yield () - def insertPending(datasetId: ObjectId, - layerName: String, - attachmentName: String, - attachmentType: LayerAttachmentType.Value, - attachmentDataformat: LayerAttachmentDataformat.Value, - attachmentPath: UPath): Fox[Unit] = + def insertWithUploadToPathPending(datasetId: ObjectId, + layerName: String, + attachmentName: String, + attachmentType: LayerAttachmentType.Value, + attachmentDataformat: LayerAttachmentDataformat.Value, + attachmentPath: UPath): Fox[Unit] = for { _ <- run( - q"""INSERT INTO webknossos.dataset_layer_attachments(_dataset, layerName, name, path, type, dataFormat, uploadToPathIsPending) - VALUES($datasetId, $layerName, $attachmentName, $attachmentPath, $attachmentType, $attachmentDataformat, ${true}) - """.asUpdate) + q"""INSERT INTO webknossos.dataset_layer_attachments(_dataset, layerName, name, path, type, dataFormat, uploadToPathIsPending, uploadIsPending) + VALUES($datasetId, $layerName, $attachmentName, $attachmentPath, $attachmentType, $attachmentDataformat, ${true}, ${false}) + """.asUpdate) + } yield () + + def insertWithUploadPending(datasetId: ObjectId, + layerName: String, + attachmentName: String, + attachmentType: LayerAttachmentType.Value, + attachmentDataformat: LayerAttachmentDataformat.Value, + attachmentPath: UPath): Fox[Unit] = + for { + _ <- run( + q"""INSERT INTO webknossos.dataset_layer_attachments(_dataset, layerName, name, path, type, dataFormat, uploadToPathIsPending, uploadIsPending) + VALUES($datasetId, $layerName, $attachmentName, $attachmentPath, $attachmentType, $attachmentDataformat, ${false}, ${true}) + """.asUpdate) } yield () def countAttachmentsIncludingPending(datasetId: ObjectId, @@ -1307,13 +1334,14 @@ class DatasetLayerAttachmentsDAO @Inject()(sqlClient: SqlClient)(implicit ec: Ex } yield first } - def finishUploadToPath(datasetId: ObjectId, - layerName: String, - attachmentName: String, - attachmentType: LayerAttachmentType.Value): Fox[Unit] = + def finishUploadOrUploadToPath(datasetId: ObjectId, + layerName: String, + attachmentName: String, + attachmentType: LayerAttachmentType.Value): Fox[Unit] = for { _ <- run(q"""UPDATE webknossos.dataset_layer_attachments - SET uploadToPathIsPending = ${false} + SET uploadToPathIsPending = ${false}, + uploadIsPending = ${false} WHERE _dataset = $datasetId AND layerName = $layerName AND name = $attachmentName diff --git a/app/models/dataset/UploadToPathsService.scala b/app/models/dataset/UploadToPathsService.scala index 8497f5892f7..b78d60adcc5 100644 --- a/app/models/dataset/UploadToPathsService.scala +++ b/app/models/dataset/UploadToPathsService.scala @@ -264,7 +264,7 @@ class UploadToPathsService @Inject()(datasetService: DatasetService, parameters.attachmentDataformat, parameters.attachmentType, datasetPath / parameters.layerName) - _ <- datasetLayerAttachmentsDAO.insertPending(dataset._id, + _ <- datasetLayerAttachmentsDAO.insertWithUploadToPathPending(dataset._id, parameters.layerName, parameters.attachmentName, parameters.attachmentType, @@ -281,12 +281,12 @@ class UploadToPathsService @Inject()(datasetService: DatasetService, datasetParent <- selectPathPrefixDatasetParent(parameters.pathPrefix, dataset._organization) datasetPath = datasetParent / dataset.directoryName magPath = generateMagPath(parameters.mag, datasetPath / parameters.layerName) - _ <- datasetMagsDAO.insertPending(dataset._id, - parameters.layerName, - parameters.mag, - parameters.axisOrder, - parameters.channelIndex, - magPath) + _ <- datasetMagsDAO.insertWithUploadToPathPending(dataset._id, + parameters.layerName, + parameters.mag, + parameters.axisOrder, + parameters.channelIndex, + magPath) } yield magPath private def handleExistingPendingMagIfExists(dataset: Dataset, @@ -294,14 +294,14 @@ class UploadToPathsService @Inject()(datasetService: DatasetService, mag: Vec3Int, overwritePending: Boolean)(implicit ec: ExecutionContext): Fox[Unit] = for { - existingMagLocatorPathBox <- datasetMagsDAO.findPendingMagLocatorPath(dataset._id, layerName, mag).shiftBox + existingMagLocatorPathBox <- datasetMagsDAO.findMagLocatorPathWithPendingUploadToPath(dataset._id, layerName, mag).shiftBox _ <- existingMagLocatorPathBox match { case Full(existingMagLocatorPath) => if (overwritePending) { for { client <- datasetService.clientFor(dataset)(GlobalAccessContext) _ <- pathDeletionService.deletePaths(client, Seq(existingMagLocatorPath)) - _ <- datasetMagsDAO.deletePendingMagLocator(dataset._id, layerName, mag) + _ <- datasetMagsDAO.deleteMagLocatorWithUploadToPathPending(dataset._id, layerName, mag) } yield () } else Fox.failure("dataset.reserveMagUploadToPath.exists") case Empty => Fox.successful(()) From ded3c82d9d41ddcd653f0cc84815930cb3b1887b Mon Sep 17 00:00:00 2001 From: Florian M Date: Tue, 7 Apr 2026 10:18:03 +0200 Subject: [PATCH 14/37] wip reserve routes --- app/controllers/DatasetController.scala | 2 +- .../WKRemoteDataStoreController.scala | 40 +++++++++++++++---- app/models/dataset/Dataset.scala | 4 +- app/models/dataset/DatasetService.scala | 2 +- app/models/dataset/UploadToPathsService.scala | 2 +- app/models/storage/UsedStorageService.scala | 4 +- .../services/DSRemoteWebknossosClient.scala | 2 +- .../services/uploading/UploadService.scala | 3 +- 8 files changed, 43 insertions(+), 16 deletions(-) diff --git a/app/controllers/DatasetController.scala b/app/controllers/DatasetController.scala index 8442d52f886..36d08c705c7 100755 --- a/app/controllers/DatasetController.scala +++ b/app/controllers/DatasetController.scala @@ -166,7 +166,7 @@ class DatasetController @Inject()(userService: UserService, wKRemoteSegmentAnythingClient: WKRemoteSegmentAnythingClient, teamService: TeamService, datasetDAO: DatasetDAO, - datasetLayerAttachmentsDAO: DatasetLayerAttachmentsDAO, + datasetLayerAttachmentsDAO: DatasetLayerAttachmentDAO, datasetUploadToPathsService: UploadToPathsService, folderService: FolderService, thumbnailService: ThumbnailService, diff --git a/app/controllers/WKRemoteDataStoreController.scala b/app/controllers/WKRemoteDataStoreController.scala index b7932de7893..8d91f33646c 100644 --- a/app/controllers/WKRemoteDataStoreController.scala +++ b/app/controllers/WKRemoteDataStoreController.scala @@ -50,6 +50,8 @@ class WKRemoteDataStoreController @Inject()( userDAO: UserDAO, teamDAO: TeamDAO, jobDAO: JobDAO, + datasetMagDAO: DatasetMagDAO, + datasetAttachmentDAO: DatasetLayerAttachmentDAO, credentialDAO: CredentialDAO, wkSilhouetteEnvironment: WkSilhouetteEnvironment)(implicit ec: ExecutionContext, bodyParsers: PlayBodyParsers) extends Controller @@ -98,22 +100,46 @@ class WKRemoteDataStoreController @Inject()( Action.async(validateJson[MagUploadInfo]) { implicit request => dataStoreService.validateAccess(name, key) { dataStore => // DS write access was asserted already at this point. - implicit val ctx: DBAccessContext = GlobalAccessContext for { - dataset <- datasetDAO.findOne(request.body.datasetId) + user <- bearerTokenService.userForToken(token) + dataset <- datasetDAO.findOne(request.body.datasetId)(AuthorizedAccessContext(user)) + _ <- Fox.fromBool(dataset.isVirtual) ?~> "dataset.reserveMagUpload.notVirtual" (dataSource, dataLayer) <- datasetService.getDataSourceAndLayerFor(dataset, request.body.layerName) + _ <- Fox.fromBool(!dataLayer.mags.exists(_.mag.maxDim == request.body.mag.mag.maxDim)) ?~> s"New mag ${request.body.mag.mag} conflicts with existing mag of the layer." _ <- Fox.fromBool(dataset._dataStore == dataStore.name) ?~> "Cannot upload mag to existing dataset via different datastore." - } yield Ok(Json.toJson(MagUploadAdditionalInfo(DataSourceId("", "")))) - // DS must not have the mag - // insert the mag - // return existing datasource id + path <- request.body.mag.path.toFox ?~> "dataset.reserveMagUpload.pathNotSet" // TODO ensure caller sets path + _ <- datasetMagDAO.insertWithUploadPending(request.body.datasetId, + request.body.layerName, + request.body.mag.mag, + request.body.mag.axisOrder, + request.body.mag.channelIndex, + path) + } yield Ok(Json.toJson(MagUploadAdditionalInfo(dataSource.id))) } } def reserveAttachmentUpload(name: String, key: String, token: String): Action[AttachmentUploadInfo] = Action.async(validateJson[AttachmentUploadInfo]) { implicit request => dataStoreService.validateAccess(name, key) { dataStore => - Fox.successful(Ok(Json.toJson(AttachmentUploadAdditionalInfo(DataSourceId("", ""))))) + // DS write access was asserted already at this point. + for { + user <- bearerTokenService.userForToken(token) + dataset <- datasetDAO.findOne(request.body.datasetId)(AuthorizedAccessContext(user)) + _ <- Fox.fromBool(dataset.isVirtual) ?~> "dataset.reserveMagUpload.notVirtual" + (dataSource, dataLayer) <- datasetService.getDataSourceAndLayerFor(dataset, request.body.layerName) + existingAttachmentOpt = dataLayer.attachments.flatMap( + _.getByTypeAndName(request.body.attachmentType, request.body.attachment.name)) + _ <- Fox.fromBool(existingAttachmentOpt.isEmpty) ?~> s"Layer already has ${request.body.attachmentType} attachment named ${request.body.attachment.name}" + _ <- Fox.fromBool(dataset._dataStore == dataStore.name) ?~> "Cannot upload mag to existing dataset via different datastore." + _ <- datasetAttachmentDAO.insertWithUploadPending( + request.body.datasetId, + request.body.layerName, + request.body.attachment.name, + request.body.attachmentType, + request.body.attachment.dataFormat, + request.body.attachment.path // TODO ensure caller sets correct path + ) + } yield Ok(Json.toJson(AttachmentUploadAdditionalInfo(dataSource.id))) } } diff --git a/app/models/dataset/Dataset.scala b/app/models/dataset/Dataset.scala index 5e45808898c..c82e01dbb27 100755 --- a/app/models/dataset/Dataset.scala +++ b/app/models/dataset/Dataset.scala @@ -1020,7 +1020,7 @@ class DatasetLayerDAO @Inject()(sqlClient: SqlClient, datasetMagsDAO: DatasetMagDAO, datasetCoordinateTransformationsDAO: DatasetCoordinateTransformationsDAO, datasetLayerAdditionalAxesDAO: DatasetLayerAdditionalAxesDAO, - datasetLayerAttachmentsDAO: DatasetLayerAttachmentsDAO)(implicit ec: ExecutionContext) + datasetLayerAttachmentsDAO: DatasetLayerAttachmentDAO)(implicit ec: ExecutionContext) extends SimpleSQLDAO(sqlClient) { private def parseRow(row: DatasetLayersRow, datasetId: ObjectId): Fox[StaticLayer] = { @@ -1201,7 +1201,7 @@ case class StorageRelevantDataLayerAttachment( datasetDirectoryName: String, ) -class DatasetLayerAttachmentsDAO @Inject()(sqlClient: SqlClient)(implicit ec: ExecutionContext) +class DatasetLayerAttachmentDAO @Inject()(sqlClient: SqlClient)(implicit ec: ExecutionContext) extends SimpleSQLDAO(sqlClient) { private def parseRow(row: DatasetLayerAttachmentsRow): Fox[LayerAttachment] = diff --git a/app/models/dataset/DatasetService.scala b/app/models/dataset/DatasetService.scala index c2536dbba63..7c1f872aa01 100644 --- a/app/models/dataset/DatasetService.scala +++ b/app/models/dataset/DatasetService.scala @@ -50,7 +50,7 @@ class DatasetService @Inject()(organizationDAO: OrganizationDAO, datasetLastUsedTimesDAO: DatasetLastUsedTimesDAO, datasetDataLayerDAO: DatasetLayerDAO, datasetMagsDAO: DatasetMagDAO, - datasetLayerAttachmentsDAO: DatasetLayerAttachmentsDAO, + datasetLayerAttachmentsDAO: DatasetLayerAttachmentDAO, teamDAO: TeamDAO, folderDAO: FolderDAO, multiUserDAO: MultiUserDAO, diff --git a/app/models/dataset/UploadToPathsService.scala b/app/models/dataset/UploadToPathsService.scala index b78d60adcc5..726adce72e8 100644 --- a/app/models/dataset/UploadToPathsService.scala +++ b/app/models/dataset/UploadToPathsService.scala @@ -44,7 +44,7 @@ class UploadToPathsService @Inject()(datasetService: DatasetService, datasetDAO: DatasetDAO, dataStoreDAO: DataStoreDAO, layerToLinkService: LayerToLinkService, - datasetLayerAttachmentsDAO: DatasetLayerAttachmentsDAO, + datasetLayerAttachmentsDAO: DatasetLayerAttachmentDAO, datasetMagsDAO: DatasetMagDAO, pathDeletionService: PathDeletionService, folderDAO: FolderDAO, diff --git a/app/models/storage/UsedStorageService.scala b/app/models/storage/UsedStorageService.scala index d584d9a6c0e..372a0b5012a 100644 --- a/app/models/storage/UsedStorageService.scala +++ b/app/models/storage/UsedStorageService.scala @@ -14,7 +14,7 @@ import models.dataset.{ DataStore, DataStoreDAO, Dataset, - DatasetLayerAttachmentsDAO, + DatasetLayerAttachmentDAO, DatasetMagDAO, StorageRelevantDataLayerAttachment, WKRemoteDataStoreClient @@ -35,7 +35,7 @@ class UsedStorageService @Inject()(val actorSystem: ActorSystem, organizationDAO: OrganizationDAO, dataStoreDAO: DataStoreDAO, datasetMagDAO: DatasetMagDAO, - datasetLayerAttachmentsDAO: DatasetLayerAttachmentsDAO, + datasetLayerAttachmentsDAO: DatasetLayerAttachmentDAO, rpc: RPC, config: WkConf)(implicit val ec: ExecutionContext) extends LazyLogging diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala index e22d97dea03..90dc18df596 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala @@ -101,7 +101,7 @@ class DSRemoteWebknossosClient @Inject()( .getWithJsonResponse[List[UnfinishedUpload]] } yield unfinishedUploads - def reportUpload(datasetId: ObjectId, parameters: ReportDatasetUploadParameters)(implicit tc: TokenContext): Fox[_] = + def reportDatasetUpload(datasetId: ObjectId, parameters: ReportDatasetUploadParameters)(implicit tc: TokenContext): Fox[_] = rpc(s"$webknossosUri/api/datastores/$dataStoreName/reportDatasetUpload") .addQueryParam("key", dataStoreKey) .addQueryParam("datasetId", datasetId) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index b14812dbd8a..85a7b335d6d 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -118,6 +118,7 @@ object LinkedLayerIdentifier { implicit val jsonFormat: OFormat[LinkedLayerIdentifier] = Json.format[LinkedLayerIdentifier] } +// TODO move needsConversion to Redis, skip it here. Remove this entirely (except for legacy) and just take the uploadId as GET param? case class UploadInformation(uploadId: String, needsConversion: Option[Boolean]) object UploadInformation { @@ -370,7 +371,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, label = s"processing dataset at $unpackToDir") datasetSizeBytes <- tryo(FileUtils.sizeOfDirectoryAsBigInteger(new File(unpackToDir.toString)).longValue).toFox ?~> "dataset.upload.measureTotalSize.failed" dataSourceWithAbsolutePathsOpt <- moveUnpackedToTarget(unpackToDir, needsConversion, datasetId, dataSourceId) ?~> "dataset.upload.moveUnpackedToTarget.failed" - _ <- remoteWebknossosClient.reportUpload( + _ <- remoteWebknossosClient.reportDatasetUpload( datasetId, ReportDatasetUploadParameters( uploadInformation.needsConversion.getOrElse(false), From b008682c08577da8ea445475d312fe2b25bba5ef Mon Sep 17 00:00:00 2001 From: Florian M Date: Tue, 7 Apr 2026 11:29:51 +0200 Subject: [PATCH 15/37] wip report upload from ds to wk --- .../controllers/UploadController.scala | 79 ++++++++++--------- .../services/DSRemoteWebknossosClient.scala | 2 + .../uploading/UploadMetadataStore.scala | 17 ++++ .../services/uploading/UploadService.scala | 59 ++++++++------ .../conf/datastore.latest.routes | 4 +- 5 files changed, 96 insertions(+), 65 deletions(-) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala index b053f9d518b..95e10dbfe7e 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala @@ -8,11 +8,11 @@ import com.scalableminds.webknossos.datastore.services.{ } import com.scalableminds.webknossos.datastore.services.uploading.{ AttachmentUploadInfo, - CancelUploadInformation, + LegacyCancelUploadInformation, DatasetUploadInfo, MagUploadInfo, UploadDomain, - UploadInformation, + LegacyUploadInformation, UploadService } import com.scalableminds.webknossos.datastore.slacknotification.DSSlackNotificationService @@ -173,46 +173,47 @@ class UploadController @Inject()( } yield result } - def finishUpload(uploadDomain: String): Action[UploadInformation] = Action.async(validateJson[UploadInformation]) { - implicit request => - log(Some(slackNotificationService.noticeFailedUploadRequest)) { - logTime(slackNotificationService.noticeSlowRequest) { - for { - uploadDomainValidated <- UploadDomain.fromString(uploadDomain).toFox - datasetId <- uploadService - .getDatasetIdByUploadId(request.body.uploadId, uploadDomainValidated) ?~> s"Cannot find running upload with upload id ${request.body.uploadId}" - response <- accessTokenService.validateAccessFromTokenContext(UserAccessRequest.writeDataset(datasetId)) { - for { - // TODO other domains - _ <- uploadService.finishDatasetUpload(request.body, datasetId) ?~> Messages( - "dataset.upload.finishFailed", - datasetId) - } yield Ok(Json.obj("newDatasetId" -> datasetId)) - } - } yield response - } + // TODO legacy: still needs uploadId as body + def finishUpload(uploadDomain: String, uploadId: String): Action[AnyContent] = Action.async { implicit request => + log(Some(slackNotificationService.noticeFailedUploadRequest)) { + logTime(slackNotificationService.noticeSlowRequest) { + for { + uploadDomainValidated <- UploadDomain.fromString(uploadDomain).toFox + datasetId <- uploadService + .getDatasetIdByUploadId(uploadId, uploadDomainValidated) ?~> s"Cannot find running upload with upload id $uploadId" + response <- accessTokenService.validateAccessFromTokenContext(UserAccessRequest.writeDataset(datasetId)) { + for { + // TODO other domains + _ <- (uploadDomainValidated match { + case UploadDomain.dataset => uploadService.finishDatasetUpload(uploadId, datasetId) + case UploadDomain.mag => uploadService.finishMagUpload(uploadId, datasetId) + case UploadDomain.attachment => uploadService.finishAttachmentUpload(uploadId, datasetId) + }) ?~> Messages("dataset.upload.finishFailed", datasetId) + } yield Ok(Json.obj("datasetId" -> datasetId)) // TODO legacy needs to return this as "newDatasetid" + } + } yield response } + } } - // TODO uploadId as GET param? - def cancelUpload(uploadDomain: String): Action[CancelUploadInformation] = - Action.async(validateJson[CancelUploadInformation]) { implicit request => - for { - uploadDomainValidated <- UploadDomain.fromString(uploadDomain).toFox - datasetIdFox = uploadService.isKnownUpload(request.body.uploadId, uploadDomainValidated).flatMap { - case false => Fox.failure("dataset.upload.validation.failed") - case true => uploadService.getDatasetIdByUploadId(request.body.uploadId, uploadDomainValidated) - } - result <- datasetIdFox.flatMap { datasetId => - accessTokenService.validateAccessFromTokenContext(UserAccessRequest.deleteDataset(datasetId)) { - for { - // TODO adapt also to other domains - _ <- dsRemoteWebknossosClient.deleteDataset(datasetId) ?~> "dataset.delete.webknossos.failed" - _ <- uploadService.cancelUpload(request.body, uploadDomainValidated) ?~> "Could not cancel the upload." - } yield Ok - } + // TODO legacy route needs to take uploadId as body + def cancelUpload(uploadDomain: String, uploadId: String): Action[AnyContent] = Action.async { implicit request => + for { + uploadDomainValidated <- UploadDomain.fromString(uploadDomain).toFox + datasetIdFox = uploadService.isKnownUpload(uploadId, uploadDomainValidated).flatMap { + case false => Fox.failure("dataset.upload.validation.failed") + case true => uploadService.getDatasetIdByUploadId(uploadId, uploadDomainValidated) + } + result <- datasetIdFox.flatMap { datasetId => + accessTokenService.validateAccessFromTokenContext(UserAccessRequest.deleteDataset(datasetId)) { + for { + // TODO adapt also to other domains + _ <- dsRemoteWebknossosClient.deleteDataset(datasetId) ?~> "dataset.delete.webknossos.failed" + _ <- uploadService.cancelUpload(uploadDomainValidated, uploadId) ?~> "Could not cancel the upload." + } yield Ok } - } yield result - } + } + } yield result + } } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala index 90dc18df596..982b15b5503 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala @@ -108,6 +108,8 @@ class DSRemoteWebknossosClient @Inject()( .withTokenFromContext .postJson[ReportDatasetUploadParameters](parameters) + def reportMagUpload(datasetId: ObjectId, parameters: ReportMagUploadParameters) + def reportDataSources(dataSources: List[DataSource], organizationId: Option[String]): Fox[_] = rpc(s"$webknossosUri/api/datastores/$dataStoreName/datasources") .addQueryParam("key", dataStoreKey) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala index 1d17fdfa111..d48345386ad 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala @@ -10,6 +10,7 @@ import com.scalableminds.webknossos.datastore.storage.DataStoreRedisStore import play.api.libs.json.Json import javax.inject.Inject +import scala.collection.immutable.Seq import scala.concurrent.ExecutionContext trait UploadMetadataStore extends FoxImplicits { @@ -142,6 +143,9 @@ class DatasetUploadMetadataStore @Inject()(protected val store: DataStoreRedisSt private def redisKeyForLinkedLayerIdentifier(uploadId: String): String = s"$keyPrefix${uploadId}___linkedLayerIdentifier" + private def redisKeyForNeedsConversion(uploadId: String): String = + s"$keyPrefix${uploadId}___needsConversion" + // TODO make this Fox[String]? def findUploadIdByDataSourceId(dataSourceId: DataSourceId): Fox[Option[String]] = store.find(redisKeyForUploadIdByDataSourceId(dataSourceId)) @@ -149,6 +153,9 @@ class DatasetUploadMetadataStore @Inject()(protected val store: DataStoreRedisSt def findLinkedLayerIdentifiers(uploadId: String)(implicit ec: ExecutionContext): Fox[Seq[LinkedLayerIdentifier]] = store.findParsed[Seq[LinkedLayerIdentifier]](redisKeyForLinkedLayerIdentifier(uploadId)) + def findNeedsConversion(uploadId: String)(implicit ec: ExecutionContext): Fox[Boolean] = + store.findParsed[Boolean](redisKeyForNeedsConversion(uploadId)) + // Only here the uploadId is not key but value. This is used to re-connect to unfinished uploads. def insertUploadIdByDataSourceId(dataSourceId: DataSourceId, uploadId: String): Fox[Unit] = store.insertSerialized(redisKeyForUploadIdByDataSourceId(dataSourceId), uploadId) @@ -157,10 +164,14 @@ class DatasetUploadMetadataStore @Inject()(protected val store: DataStoreRedisSt linkedLayerIdentifiers: Option[Seq[LinkedLayerIdentifier]]): Fox[_] = store.insertSerialized(redisKeyForLinkedLayerIdentifier(uploadId), linkedLayerIdentifiers.getOrElse(Seq.empty)) + def insertNeedsConversion(uploadId: String, needsConversion: Boolean): Fox[_] = + store.insertSerialized(redisKeyForNeedsConversion(uploadId), needsConversion) + override def cleanUp(uploadId: String)(implicit ec: ExecutionContext): Fox[Unit] = for { dataSourceId <- findDataSourceId(uploadId) _ <- store.remove(redisKeyForLinkedLayerIdentifier(uploadId)) + _ <- store.remove(redisKeyForNeedsConversion(uploadId)) _ <- store.remove(redisKeyForUploadIdByDataSourceId(dataSourceId)) _ <- super.cleanUp(uploadId) } yield () @@ -181,6 +192,12 @@ class MagUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) def insertLayerName(uploadId: String, layerName: String): Fox[Unit] = store.insert(redisKeyForLayerName(uploadId), layerName) + + def findMag(uploadId: String)(implicit ec: ExecutionContext): Fox[MagLocator] = + store.findParsed[MagLocator](redisKeyForMag(uploadId)) + + def findLayerName(uploadId: String)(implicit ec: ExecutionContext): Fox[String] = + store.findParsed(redisKeyForLayerName(uploadId)) } class AttachmentUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) extends UploadMetadataStore { diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index 85a7b335d6d..11be4b4ce1c 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -118,16 +118,16 @@ object LinkedLayerIdentifier { implicit val jsonFormat: OFormat[LinkedLayerIdentifier] = Json.format[LinkedLayerIdentifier] } -// TODO move needsConversion to Redis, skip it here. Remove this entirely (except for legacy) and just take the uploadId as GET param? -case class UploadInformation(uploadId: String, needsConversion: Option[Boolean]) +// TODO move to Legacy finishUpload, unpack uploadId +case class LegacyUploadInformation(uploadId: String) -object UploadInformation { - implicit val jsonFormat: OFormat[UploadInformation] = Json.format[UploadInformation] +object LegacyUploadInformation { + implicit val jsonFormat: OFormat[LegacyUploadInformation] = Json.format[LegacyUploadInformation] } -case class CancelUploadInformation(uploadId: String) -object CancelUploadInformation { - implicit val jsonFormat: OFormat[CancelUploadInformation] = Json.format[CancelUploadInformation] +case class LegacyCancelUploadInformation(uploadId: String) +object LegacyCancelUploadInformation { + implicit val jsonFormat: OFormat[LegacyCancelUploadInformation] = Json.format[LegacyCancelUploadInformation] } class UploadService @Inject()(dataSourceService: DataSourceService, @@ -176,14 +176,14 @@ class UploadService @Inject()(dataSourceService: DataSourceService, datasetId: ObjectId, directoryName: String): Fox[Unit] = { val dataSourceId = DataSourceId(directoryName, datasetUploadInfo.organizationId) + val needsConversion = datasetUploadInfo.needsConversion.getOrElse(false) for { - _ <- Fox.fromBool( - !datasetUploadInfo.needsConversion.getOrElse(false) || !datasetUploadInfo.layersToLink - .exists(_.nonEmpty)) ?~> "Cannot use linked layers if the dataset needs conversion" + _ <- Fox.fromBool(!needsConversion || !datasetUploadInfo.layersToLink.exists(_.nonEmpty)) ?~> "Cannot use linked layers if the dataset needs conversion" _ <- reserveResumableUpload(datasetUploadInfo.resumableUploadInfo, datasetId, dataSourceId, UploadDomain.dataset) uploadId = datasetUploadInfo.resumableUploadInfo.uploadId _ <- datasetUploadMetadataStore.insertUploadIdByDataSourceId(dataSourceId, uploadId) _ <- datasetUploadMetadataStore.insertLinkedLayerIdentifiers(uploadId, datasetUploadInfo.layersToLink) + _ <- datasetUploadMetadataStore.insertNeedsConversion(uploadId, needsConversion) } yield () } @@ -321,9 +321,8 @@ class UploadService @Inject()(dataSourceService: DataSourceService, } yield () } - def cancelUpload(cancelUploadInformation: CancelUploadInformation, uploadDomain: UploadDomain): Fox[Unit] = { + def cancelUpload(uploadDomain: UploadDomain, uploadId: String): Fox[Unit] = { val uploadMetadataStore = selectUploadMetadataStore(uploadDomain) - val uploadId = cancelUploadInformation.uploadId for { dataSourceId <- uploadMetadataStore.findDataSourceId(uploadId) datasetId <- uploadMetadataStore.findDatasetId(uploadId) @@ -341,22 +340,19 @@ class UploadService @Inject()(dataSourceService: DataSourceService, private def uploadFullName(uploadId: String, datasetId: ObjectId, dataSourceId: DataSourceId) = s"upload $uploadId for dataset $datasetId ($dataSourceId)" - def finishDatasetUpload(uploadInformation: UploadInformation, datasetId: ObjectId)( - implicit tc: TokenContext): Fox[Unit] = { - val uploadId = uploadInformation.uploadId - + def finishDatasetUpload(uploadId: String, datasetId: ObjectId)(implicit tc: TokenContext): Fox[Unit] = for { dataSourceId <- datasetUploadMetadataStore.findDataSourceId(uploadId) + needsConversion <- datasetUploadMetadataStore.findNeedsConversion(uploadId) _ = logger.info(s"Finishing ${uploadFullName(uploadId, datasetId, dataSourceId)}...") linkedLayerIdentifiers <- datasetUploadMetadataStore.findLinkedLayerIdentifiers(uploadId) - needsConversion = uploadInformation.needsConversion.getOrElse(false) uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId, UploadDomain.dataset) _ <- backupRawUploadedData(uploadDir, uploadBackupDirectoryFor(dataSourceId.organizationId, uploadId), datasetId).toFox _ <- checkWithinRequestedFileSize(uploadDir, uploadId, datasetId, UploadDomain.dataset) ?~> "dataset.upload.fileSizeCheck.failed" _ <- checkAllChunksUploaded(uploadId, UploadDomain.dataset) ?~> "dataset.upload.allChunksUploadedCheck.failed" unpackToDir = unpackToDirFor(dataSourceId) _ <- PathUtils.ensureDirectoryBox(unpackToDir.getParent).toFox ?~> "dataset.import.fileAccessDenied" - unpackResult <- unpackDataset(uploadDir, unpackToDir, datasetId).shiftBox + unpackResult <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId).shiftBox _ <- cleanUpUploadedDataset(uploadDir, uploadId, reason = "Upload complete, data unpacked.", UploadDomain.dataset) _ <- cleanUpOnFailure(unpackResult, datasetId, @@ -369,19 +365,34 @@ class UploadService @Inject()(dataSourceService: DataSourceService, dataSourceId, needsConversion, label = s"processing dataset at $unpackToDir") - datasetSizeBytes <- tryo(FileUtils.sizeOfDirectoryAsBigInteger(new File(unpackToDir.toString)).longValue).toFox ?~> "dataset.upload.measureTotalSize.failed" + datasetSizeBytes <- measureDirectorySizeBytes(unpackToDir) ?~> "dataset.upload.measureTotalSize.failed" dataSourceWithAbsolutePathsOpt <- moveUnpackedToTarget(unpackToDir, needsConversion, datasetId, dataSourceId) ?~> "dataset.upload.moveUnpackedToTarget.failed" _ <- remoteWebknossosClient.reportDatasetUpload( datasetId, ReportDatasetUploadParameters( - uploadInformation.needsConversion.getOrElse(false), + needsConversion, datasetSizeBytes, dataSourceWithAbsolutePathsOpt, linkedLayerIdentifiers ) ) ?~> "dataset.upload.reportUpload.failed" } yield () - } + + private def measureDirectorySizeBytes(path: Path): Fox[Long] = + tryo(FileUtils.sizeOfDirectoryAsBigInteger(path.toFile).longValue).toFox + + def finishMagUpload(uploadId: String, datasetId: ObjectId)(implicit tc: TokenContext): Fox[Unit] = for { + dataSourceId <- magUploadMetadataStore.findDataSourceId(uploadId) + mag <- magUploadMetadataStore.findMag(uploadId) + layerName <- magUploadMetadataStore.findLayerName(uploadId) + uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId, UploadDomain.mag) + unpackToDir = unpackToDirFor(dataSourceId).resolve(mag.mag.toMagLiteral(allowScalar = true)) + magSizeBytes <- measureDirectorySizeBytes(unpackToDir) ?~> "dataset.upload.measureTotalSize.failed" + _ <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId, UploadDomain.mag) + _ <- remoteWebknossosClient.reportMagUpload(datasetId, layerName, mag.mag, magSizeBytes) + } yield () + + def finishAttachmentUpload(uploadId: String, datasetId: ObjectId)(implicit tc: TokenContext): Fox[Unit] = ??? // TODO private def checkWithinRequestedFileSize(uploadDir: Path, uploadId: String, @@ -761,7 +772,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, private def getPathDepth(path: Path) = path.toString.count(_ == '/') - private def unpackDataset(uploadDir: Path, unpackToDir: Path, datasetId: ObjectId): Fox[Unit] = + private def unpackOrMoveUploaded(uploadDir: Path, unpackToDir: Path, datasetId: ObjectId, uploadDomain: UploadDomain): Fox[Unit] = for { shallowFileList <- PathUtils.listFiles(uploadDir, silent = false).toFox excludeFromPrefix = LayerCategory.values.map(_.toString).toList @@ -770,7 +781,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, _.toString.toLowerCase.endsWith(".zip"))) { for { zipFile <- firstFile.toFox - _ = logger.info(s"finishUpload for $datasetId: Unzipping dataset to $unpackToDir...") + _ = logger.info(s"finishUpload for $datasetId: Unzipping $uploadDomain to $unpackToDir...") _ <- ZipIO .unzipToDirectory( zipFile.toFile, @@ -789,7 +800,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, _ <- Fox.fromBool(deepFileList.nonEmpty) ?~> "dataset.upload.noFiles" commonPrefixPreliminary = PathUtils.commonPrefix(deepFileList) _ = logger.info( - s"Detected dataset root during upload of $datasetId from ${deepFileList.length} files in $uploadDir with commonPrefixPreliminary=$commonPrefixPreliminary") + s"Detected $uploadDomain root during finishUpload of $datasetId from ${deepFileList.length} files in $uploadDir with commonPrefixPreliminary=$commonPrefixPreliminary") strippedPrefix = PathUtils.cutOffPathAtLastOccurrenceOf(commonPrefixPreliminary, excludeFromPrefix) commonPrefix = PathUtils.removeSingleFileNameFromPrefix(strippedPrefix, deepFileList.map(_.getFileName.toString)) diff --git a/webknossos-datastore/conf/datastore.latest.routes b/webknossos-datastore/conf/datastore.latest.routes index b114d3a0b55..8d92d64c411 100644 --- a/webknossos-datastore/conf/datastore.latest.routes +++ b/webknossos-datastore/conf/datastore.latest.routes @@ -112,8 +112,8 @@ POST /datasets/upload/dataset/reserveUpload POST /datasets/upload/mag/reserveUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.reserveAttachmentUpload() POST /datasets/upload/attachment/reserveUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.reserveMagUpload() GET /datasets/upload/:uploadDomain/getUnfinishedUploads @com.scalableminds.webknossos.datastore.controllers.UploadController.getUnfinishedUploads(organizationName: String, uploadDomain: String) -POST /datasets/upload/:uploadDomain/finishUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.finishUpload(uploadDomain: String) -POST /datasets/upload/:uploadDomain/cancelUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.cancelUpload(uploadDomain: String) +POST /datasets/upload/:uploadDomain/finishUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.finishUpload(uploadDomain: String, uploadId: String) +POST /datasets/upload/:uploadDomain/cancelUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.cancelUpload(uploadDomain: String, uploadId: String) # DataSource management GET /datasets/baseDirAbsolute @com.scalableminds.webknossos.datastore.controllers.DataSourceController.baseDirAbsolute From 4ef05404e991b327e0c2b8c1c4d7d515724e6b84 Mon Sep 17 00:00:00 2001 From: Florian M Date: Tue, 7 Apr 2026 12:18:39 +0200 Subject: [PATCH 16/37] adapt frontend ds upload, wip finish checks --- .../admin/dataset/dataset_upload_view.tsx | 18 ++--- frontend/javascripts/admin/rest_api.ts | 45 ++++++----- .../controllers/UploadController.scala | 3 - .../services/DSRemoteWebknossosClient.scala | 22 +++--- .../uploading/UploadMetadataStore.scala | 11 ++- .../services/uploading/UploadService.scala | 74 +++++++++++++++---- 6 files changed, 112 insertions(+), 61 deletions(-) diff --git a/frontend/javascripts/admin/dataset/dataset_upload_view.tsx b/frontend/javascripts/admin/dataset/dataset_upload_view.tsx index 1aad0f9336c..becaa1901a5 100644 --- a/frontend/javascripts/admin/dataset/dataset_upload_view.tsx +++ b/frontend/javascripts/admin/dataset/dataset_upload_view.tsx @@ -347,7 +347,7 @@ class DatasetUploadView extends React.Component { totalFileCount: formValues.zipFile.length, filePaths: filePaths, totalFileSizeInBytes, - } + }; const datasetUploadInfo = { resumableUploadInfo, datasetName: newDatasetName, @@ -384,15 +384,11 @@ class DatasetUploadView extends React.Component { throw new Error("Form couldn't be initialized."); } - const uploadInfo = { - uploadId, - needsConversion: this.state.needsConversion, - }; this.setState({ isFinishing: true, }); - finishDatasetUpload(datastoreUrl, uploadInfo).then( - async ({ newDatasetId }) => { + finishDatasetUpload(datastoreUrl, uploadId).then( + async ({ datasetId }) => { let maybeError; if (this.state.needsConversion) { @@ -404,7 +400,7 @@ class DatasetUploadView extends React.Component { } await startConvertToWkwJob( - newDatasetId, + datasetId, formValues.voxelSizeFactor, formValues.voxelSizeUnit, ); @@ -430,7 +426,7 @@ class DatasetUploadView extends React.Component { name: "", zipFile: [], }); - this.props.onUploaded(newDatasetId, newDatasetName, this.state.needsConversion); + this.props.onUploaded(datasetId, newDatasetName, this.state.needsConversion); } }, (error) => { @@ -498,9 +494,7 @@ class DatasetUploadView extends React.Component { resumableUpload.cancel(); if (uploadId) { - await cancelDatasetUpload(datastoreUrl, { - uploadId, - }); + await cancelDatasetUpload(datastoreUrl, uploadId); } this.setState({ isUploading: false, diff --git a/frontend/javascripts/admin/rest_api.ts b/frontend/javascripts/admin/rest_api.ts index 197d63e63b9..1e8be6f2136 100644 --- a/frontend/javascripts/admin/rest_api.ts +++ b/frontend/javascripts/admin/rest_api.ts @@ -1196,7 +1196,7 @@ export function createResumableUpload( const resumable = new ResumableUpload({ testChunks: true, - target: `${datastoreUrl}/data/datasets`, + target: `${datastoreUrl}/data/datasets/upload/dataset`, query: function () { return { token: activeToken, @@ -1249,9 +1249,9 @@ type ResumableUploadInfo = { totalFileCount: number; filePaths: Array; totalFileSizeInBytes: number; -} +}; type DatasetUploadInfo = { - resumableUploadInfo: ResumableUploadInfo, + resumableUploadInfo: ResumableUploadInfo; datasetName: string; organizationId: string; layersToLink: Array; // Always set as empty by frontend, only used by libs @@ -1265,7 +1265,7 @@ export function reserveDatasetUpload( datasetUploadInfo: DatasetUploadInfo, ): Promise { return doWithToken((token) => - Request.sendJSONReceiveJSON(`/data/datasets/reserveUpload?token=${token}`, { + Request.sendJSONReceiveJSON(`/data/datasets/upload/dataset/reserveUpload?token=${token}`, { data: datasetUploadInfo, host: datastoreHost, }), @@ -1300,29 +1300,34 @@ type NewDatasetReply = { newDatasetId: string; }; +type FinishUploadReply = { + datasetId: string; +}; + export function finishDatasetUpload( datastoreHost: string, - uploadInformation: ArbitraryObject, -): Promise { + uploadId: string, +): Promise { return doWithToken((token) => - Request.sendJSONReceiveJSON(`/data/datasets/finishUpload?token=${token}`, { - data: uploadInformation, - host: datastoreHost, - }), + Request.receiveJSON( + `/data/datasets/upload/dataset/finishUpload?uploadId=${uploadId}&token=${token}`, + { + host: datastoreHost, + method: "POST", + }, + ), ); } -export function cancelDatasetUpload( - datastoreHost: string, - cancelUploadInformation: { - uploadId: string; - }, -): Promise { +export function cancelDatasetUpload(datastoreHost: string, uploadId: string): Promise { return doWithToken((token) => - Request.sendJSONReceiveJSON(`/data/datasets/cancelUpload?token=${token}`, { - data: cancelUploadInformation, - host: datastoreHost, - }), + Request.receiveJSON( + `/data/datasets/upload/dataset/cancelUpload?uploadId=${uploadId}&token=${token}`, + { + host: datastoreHost, + method: "POST", + }, + ), ); } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala index 95e10dbfe7e..27ad3effe3c 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala @@ -8,15 +8,12 @@ import com.scalableminds.webknossos.datastore.services.{ } import com.scalableminds.webknossos.datastore.services.uploading.{ AttachmentUploadInfo, - LegacyCancelUploadInformation, DatasetUploadInfo, MagUploadInfo, UploadDomain, - LegacyUploadInformation, UploadService } import com.scalableminds.webknossos.datastore.slacknotification.DSSlackNotificationService -import org.apache.pekko.http.scaladsl.model.HttpHeader.ParsingResult.Ok import play.api.data.Form import play.api.data.Forms.tuple import play.api.i18n.Messages diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala index 982b15b5503..97b7d261bd6 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala @@ -14,15 +14,7 @@ import com.scalableminds.webknossos.datastore.models.UnfinishedUpload import com.scalableminds.webknossos.datastore.models.annotation.AnnotationSource import com.scalableminds.webknossos.datastore.models.datasource.{DataSource, DataSourceId} import com.scalableminds.webknossos.datastore.rpc.RPC -import com.scalableminds.webknossos.datastore.services.uploading.{ - AttachmentUploadAdditionalInfo, - AttachmentUploadInfo, - DatasetUploadAdditionalInfo, - DatasetUploadInfo, - MagUploadAdditionalInfo, - MagUploadInfo, - ReportDatasetUploadParameters -} +import com.scalableminds.webknossos.datastore.services.uploading.{AttachmentUploadAdditionalInfo, AttachmentUploadInfo, DatasetUploadAdditionalInfo, DatasetUploadInfo, MagUploadAdditionalInfo, MagUploadInfo, ReportAttachmentUploadParameters, ReportDatasetUploadParameters, ReportMagUploadParameters} import com.scalableminds.webknossos.datastore.storage.DataVaultCredential import com.typesafe.scalalogging.LazyLogging import play.api.inject.ApplicationLifecycle @@ -108,7 +100,17 @@ class DSRemoteWebknossosClient @Inject()( .withTokenFromContext .postJson[ReportDatasetUploadParameters](parameters) - def reportMagUpload(datasetId: ObjectId, parameters: ReportMagUploadParameters) + def reportMagUpload(parameters: ReportMagUploadParameters)(implicit tc: TokenContext): Fox[_] = + rpc(s"$webknossosUri/api/datastores/$dataStoreName/reportMagUpload") + .addQueryParam("key", dataStoreKey) + .withTokenFromContext + .postJson[ReportMagUploadParameters](parameters) + + def reportAttachmentUpload(parameters: ReportAttachmentUploadParameters)(implicit tc: TokenContext): Fox[_] = + rpc(s"$webknossosUri/api/datastores/$dataStoreName/reportAttachmentUpload") + .addQueryParam("key", dataStoreKey) + .withTokenFromContext + .postJson[ReportAttachmentUploadParameters](parameters) def reportDataSources(dataSources: List[DataSource], organizationId: Option[String]): Fox[_] = rpc(s"$webknossosUri/api/datastores/$dataStoreName/datasources") diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala index d48345386ad..90e6b8d8bb3 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala @@ -197,7 +197,7 @@ class MagUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) store.findParsed[MagLocator](redisKeyForMag(uploadId)) def findLayerName(uploadId: String)(implicit ec: ExecutionContext): Fox[String] = - store.findParsed(redisKeyForLayerName(uploadId)) + store.find(redisKeyForLayerName(uploadId)).map(_.toFox).flatten } class AttachmentUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) extends UploadMetadataStore { @@ -220,4 +220,13 @@ class AttachmentUploadMetadataStore @Inject()(protected val store: DataStoreRedi def insertLayerName(uploadId: String, layerName: String): Fox[Unit] = store.insert(redisKeyForLayerName(uploadId), layerName) + + def findAttachment(uploadId: String)(implicit ec: ExecutionContext): Fox[LayerAttachment] = + store.findParsed[LayerAttachment](redisKeyForAttachment(uploadId)) + + def findAttachmentType(uploadId: String)(implicit ec: ExecutionContext): Fox[LayerAttachmentType] = + store.findParsed[LayerAttachmentType](redisKeyForAttachment(uploadId)) + + def findLayerName(uploadId: String)(implicit ec: ExecutionContext): Fox[String] = + store.find(redisKeyForLayerName(uploadId)).map(_.toFox).flatten } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index 11be4b4ce1c..2a5af6d907a 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -111,6 +111,25 @@ object ReportDatasetUploadParameters { implicit val jsonFormat: OFormat[ReportDatasetUploadParameters] = Json.format[ReportDatasetUploadParameters] } +case class ReportMagUploadParameters( + datasetId: ObjectId, + layerName: String, + mag: MagLocator, + magSizeBytes: Long +) +object ReportMagUploadParameters { + implicit val jsonFormat: OFormat[ReportMagUploadParameters] = Json.format[ReportMagUploadParameters] +} +case class ReportAttachmentUploadParameters( + datasetId: ObjectId, + layerName: String, + attachmentType: LayerAttachmentType, + attachment: LayerAttachment, + attachmentSizeBytes: Long +) +object ReportAttachmentUploadParameters { + implicit val jsonFormat: OFormat[ReportAttachmentUploadParameters] = Json.format[ReportAttachmentUploadParameters] +} case class LinkedLayerIdentifier(datasetId: ObjectId, layerName: String, newLayerName: Option[String] = None) @@ -351,8 +370,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, _ <- checkWithinRequestedFileSize(uploadDir, uploadId, datasetId, UploadDomain.dataset) ?~> "dataset.upload.fileSizeCheck.failed" _ <- checkAllChunksUploaded(uploadId, UploadDomain.dataset) ?~> "dataset.upload.allChunksUploadedCheck.failed" unpackToDir = unpackToDirFor(dataSourceId) - _ <- PathUtils.ensureDirectoryBox(unpackToDir.getParent).toFox ?~> "dataset.import.fileAccessDenied" - unpackResult <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId).shiftBox + unpackResult <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId, UploadDomain.dataset).shiftBox _ <- cleanUpUploadedDataset(uploadDir, uploadId, reason = "Upload complete, data unpacked.", UploadDomain.dataset) _ <- cleanUpOnFailure(unpackResult, datasetId, @@ -381,18 +399,40 @@ class UploadService @Inject()(dataSourceService: DataSourceService, private def measureDirectorySizeBytes(path: Path): Fox[Long] = tryo(FileUtils.sizeOfDirectoryAsBigInteger(path.toFile).longValue).toFox - def finishMagUpload(uploadId: String, datasetId: ObjectId)(implicit tc: TokenContext): Fox[Unit] = for { - dataSourceId <- magUploadMetadataStore.findDataSourceId(uploadId) - mag <- magUploadMetadataStore.findMag(uploadId) - layerName <- magUploadMetadataStore.findLayerName(uploadId) - uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId, UploadDomain.mag) - unpackToDir = unpackToDirFor(dataSourceId).resolve(mag.mag.toMagLiteral(allowScalar = true)) - magSizeBytes <- measureDirectorySizeBytes(unpackToDir) ?~> "dataset.upload.measureTotalSize.failed" - _ <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId, UploadDomain.mag) - _ <- remoteWebknossosClient.reportMagUpload(datasetId, layerName, mag.mag, magSizeBytes) - } yield () + def finishMagUpload(uploadId: String, datasetId: ObjectId)(implicit tc: TokenContext): Fox[Unit] = + for { + dataSourceId <- magUploadMetadataStore.findDataSourceId(uploadId) + mag <- magUploadMetadataStore.findMag(uploadId) + layerName <- magUploadMetadataStore.findLayerName(uploadId) + uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId, UploadDomain.mag) + unpackToDir = unpackToDirFor(dataSourceId).resolve(mag.mag.toMagLiteral(allowScalar = true)) + _ <- checkWithinRequestedFileSize(uploadDir, uploadId, datasetId, UploadDomain.mag) ?~> "dataset.upload.fileSizeCheck.failed" + _ <- checkAllChunksUploaded(uploadId, UploadDomain.mag) ?~> "dataset.upload.allChunksUploadedCheck.failed" + // TODO clean up on failure, clean up on success + magSizeBytes <- measureDirectorySizeBytes(unpackToDir) ?~> "dataset.upload.measureTotalSize.failed" + _ <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId, UploadDomain.mag) + magAdapted = mag.copy(path = Some(UPath.fromLocalPath(unpackToDir))) + _ <- remoteWebknossosClient.reportMagUpload( + ReportMagUploadParameters(datasetId, layerName, magAdapted, magSizeBytes)) + } yield () - def finishAttachmentUpload(uploadId: String, datasetId: ObjectId)(implicit tc: TokenContext): Fox[Unit] = ??? // TODO + def finishAttachmentUpload(uploadId: String, datasetId: ObjectId)(implicit tc: TokenContext): Fox[Unit] = + for { + dataSourceId <- attachmentUploadMetadataStore.findDataSourceId(uploadId) + attachment <- attachmentUploadMetadataStore.findAttachment(uploadId) + attachmentType <- attachmentUploadMetadataStore.findAttachmentType(uploadId) + layerName <- attachmentUploadMetadataStore.findLayerName(uploadId) + uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId, UploadDomain.attachment) + unpackToDir = unpackToDirFor(dataSourceId).resolve(attachment.dataFormat.toString).resolve(attachment.name) + _ <- checkWithinRequestedFileSize(uploadDir, uploadId, datasetId, UploadDomain.mag) ?~> "dataset.upload.fileSizeCheck.failed" + _ <- checkAllChunksUploaded(uploadId, UploadDomain.mag) ?~> "dataset.upload.allChunksUploadedCheck.failed" + // TODO clean up on failure, clean up on success + attachmentSizeBytes <- measureDirectorySizeBytes(unpackToDir) ?~> "dataset.upload.measureTotalSize.failed" + _ <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId, UploadDomain.attachment) + attachmentAdapted = attachment.copy(path = UPath.fromLocalPath(unpackToDir)) + _ <- remoteWebknossosClient.reportAttachmentUpload( + ReportAttachmentUploadParameters(datasetId, layerName, attachmentType, attachmentAdapted, attachmentSizeBytes)) + } yield () private def checkWithinRequestedFileSize(uploadDir: Path, uploadId: String, @@ -407,7 +447,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, _ <- if (actualFileSize > reservedFileSize) { cleanUpDatasetExceedingSize(uploadDir, uploadId, uploadDomain) Fox.failure( - f"Uploaded dataset $datasetId exceeds the reserved size of $reservedFileSize bytes, got $actualFileSize bytes.") + f"Uploaded $uploadDomain $datasetId exceeds the reserved size of $reservedFileSize bytes, got $actualFileSize bytes.") } else Fox.successful(()) } yield () }.getOrElse(Fox.successful(())) @@ -772,8 +812,12 @@ class UploadService @Inject()(dataSourceService: DataSourceService, private def getPathDepth(path: Path) = path.toString.count(_ == '/') - private def unpackOrMoveUploaded(uploadDir: Path, unpackToDir: Path, datasetId: ObjectId, uploadDomain: UploadDomain): Fox[Unit] = + private def unpackOrMoveUploaded(uploadDir: Path, + unpackToDir: Path, + datasetId: ObjectId, + uploadDomain: UploadDomain): Fox[Unit] = for { + _ <- PathUtils.ensureDirectoryBox(unpackToDir.getParent).toFox ?~> "dataset.import.fileAccessDenied" shallowFileList <- PathUtils.listFiles(uploadDir, silent = false).toFox excludeFromPrefix = LayerCategory.values.map(_.toString).toList firstFile = shallowFileList.headOption From d5a038fa7ceca3e6d15df0b31fc3065a50686aa4 Mon Sep 17 00:00:00 2001 From: Florian M Date: Tue, 7 Apr 2026 13:06:18 +0200 Subject: [PATCH 17/37] report routes --- app/controllers/DatasetController.scala | 6 ++-- .../WKRemoteDataStoreController.scala | 33 ++++++++++++++++++- app/models/dataset/Dataset.scala | 6 ++-- conf/webknossos.latest.routes | 2 ++ .../160-upload-mags-attachments.sql | 4 +-- 5 files changed, 42 insertions(+), 9 deletions(-) diff --git a/app/controllers/DatasetController.scala b/app/controllers/DatasetController.scala index 36d08c705c7..86bfa8ddd56 100755 --- a/app/controllers/DatasetController.scala +++ b/app/controllers/DatasetController.scala @@ -754,9 +754,9 @@ class DatasetController @Inject()(userService: UserService, dataset <- datasetDAO.findOne(datasetId) ?~> notFoundMessage(datasetId.toString) ~> NOT_FOUND _ <- Fox.assertTrue(datasetService.isEditableBy(dataset, Some(request.identity))) ?~> "notAllowed" ~> FORBIDDEN _ <- datasetLayerAttachmentsDAO.finishUploadOrUploadToPath(datasetId, - request.body.layerName, - request.body.attachmentName, - request.body.attachmentType) + request.body.layerName, + request.body.attachmentType, + request.body.attachmentName) dataStoreClient <- datasetService.clientFor(dataset) _ <- Fox.runIf(!dataset.isVirtual) { for { diff --git a/app/controllers/WKRemoteDataStoreController.scala b/app/controllers/WKRemoteDataStoreController.scala index 8d91f33646c..9b7c8241ee5 100644 --- a/app/controllers/WKRemoteDataStoreController.scala +++ b/app/controllers/WKRemoteDataStoreController.scala @@ -20,7 +20,9 @@ import com.scalableminds.webknossos.datastore.services.uploading.{ DatasetUploadInfo, MagUploadAdditionalInfo, MagUploadInfo, - ReportDatasetUploadParameters + ReportAttachmentUploadParameters, + ReportDatasetUploadParameters, + ReportMagUploadParameters } import com.typesafe.scalalogging.LazyLogging import models.dataset._ @@ -203,6 +205,35 @@ class WKRemoteDataStoreController @Inject()( } } + def reportMagUpload(name: String, key: String, token: String): Action[ReportMagUploadParameters] = + Action.async(validateJson[ReportMagUploadParameters]) { implicit request => + dataStoreService.validateAccess(name, key) { _ => + for { + dataset <- datasetDAO.findOne(request.body.datasetId)(GlobalAccessContext) ?~> Messages( + "dataset.notFound", + request.body.datasetId) ~> NOT_FOUND + _ <- datasetMagDAO.finishUploadOrUploadToPath(request.body.datasetId, + request.body.layerName, + request.body.mag.mag) + } yield Ok + } + } + + def reportAttachmentUpload(name: String, key: String, token: String): Action[ReportAttachmentUploadParameters] = + Action.async(validateJson[ReportAttachmentUploadParameters]) { implicit request => + dataStoreService.validateAccess(name, key) { _ => + for { + dataset <- datasetDAO.findOne(request.body.datasetId)(GlobalAccessContext) ?~> Messages( + "dataset.notFound", + request.body.datasetId) ~> NOT_FOUND + _ <- datasetAttachmentDAO.finishUploadOrUploadToPath(request.body.datasetId, + request.body.layerName, + request.body.attachmentType, + request.body.attachment.name) + } yield Ok + } + } + def statusUpdate(name: String, key: String): Action[DataStoreStatus] = Action.async(validateJson[DataStoreStatus]) { implicit request => dataStoreService.validateAccess(name, key) { _ => diff --git a/app/models/dataset/Dataset.scala b/app/models/dataset/Dataset.scala index c82e01dbb27..6e3b12bdd84 100755 --- a/app/models/dataset/Dataset.scala +++ b/app/models/dataset/Dataset.scala @@ -1336,16 +1336,16 @@ class DatasetLayerAttachmentDAO @Inject()(sqlClient: SqlClient)(implicit ec: Exe def finishUploadOrUploadToPath(datasetId: ObjectId, layerName: String, - attachmentName: String, - attachmentType: LayerAttachmentType.Value): Fox[Unit] = + attachmentType: LayerAttachmentType.Value, + attachmentName: String): Fox[Unit] = for { _ <- run(q"""UPDATE webknossos.dataset_layer_attachments SET uploadToPathIsPending = ${false}, uploadIsPending = ${false} WHERE _dataset = $datasetId AND layerName = $layerName - AND name = $attachmentName AND type = $attachmentType + AND name = $attachmentName """.asUpdate) } yield () diff --git a/conf/webknossos.latest.routes b/conf/webknossos.latest.routes index d30a6e44ceb..455e24ce8cd 100644 --- a/conf/webknossos.latest.routes +++ b/conf/webknossos.latest.routes @@ -144,6 +144,8 @@ POST /datastores/:name/reserveMagUpload POST /datastores/:name/reserveAttachmentUpload controllers.WKRemoteDataStoreController.reserveAttachmentUpload(name: String, key: String, token: String) GET /datastores/:name/getUnfinishedDatasetUploadsForUser controllers.WKRemoteDataStoreController.getUnfinishedDatasetUploadsForUser(name: String, key: String, token: String, organizationName: String) POST /datastores/:name/reportDatasetUpload controllers.WKRemoteDataStoreController.reportDatasetUpload(name: String, key: String, token: String, datasetId: ObjectId) +POST /datastores/:name/reportMagUpload controllers.WKRemoteDataStoreController.reportMagUpload(name: String, key: String, token: String) +POST /datastores/:name/reportAttachmentUpload controllers.WKRemoteDataStoreController.reportAttachmentUpload(name: String, key: String, token: String) POST /datastores/:name/deleteDataset controllers.WKRemoteDataStoreController.deleteDataset(name: String, key: String) GET /datastores/:name/findDatasetId controllers.WKRemoteDataStoreController.findDatasetId(name: String, key: String, datasetDirectoryName: String, organizationId: String) GET /datastores/:name/jobExportProperties controllers.WKRemoteDataStoreController.jobExportProperties(name: String, key: String, jobId: ObjectId) diff --git a/schema/evolutions/reversions/160-upload-mags-attachments.sql b/schema/evolutions/reversions/160-upload-mags-attachments.sql index 03e493d2dd2..041f3a142f5 100644 --- a/schema/evolutions/reversions/160-upload-mags-attachments.sql +++ b/schema/evolutions/reversions/160-upload-mags-attachments.sql @@ -1,10 +1,10 @@ START TRANSACTION; -do $$ begin if (select schemaVersion from webknossos.releaseInformation) <> 159 then raise exception 'Previous schema version mismatch'; end if; end; $$ language plpgsql; +do $$ begin if (select schemaVersion from webknossos.releaseInformation) <> 160 then raise exception 'Previous schema version mismatch'; end if; end; $$ language plpgsql; ALTER TABLE webknossos.dataset_layer_attachments DROP COLUMN uploadIsPending; ALTER TABLE webknossos.dataset_mags DROP COLUMN uploadIsPending; -UPDATE webknossos.releaseInformation SET schemaVersion = 160; +UPDATE webknossos.releaseInformation SET schemaVersion = 159; COMMIT TRANSACTION; From fa6547798343dfc8c914b39a32ccde0b3e2a3335 Mon Sep 17 00:00:00 2001 From: Florian M Date: Tue, 7 Apr 2026 13:08:28 +0200 Subject: [PATCH 18/37] bump schema version --- ...d-mags-attachments.sql => 161-upload-mags-attachments.sql} | 4 ++-- ...d-mags-attachments.sql => 161-upload-mags-attachments.sql} | 4 ++-- schema/schema.sql | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) rename schema/evolutions/{160-upload-mags-attachments.sql => 161-upload-mags-attachments.sql} (73%) rename schema/evolutions/reversions/{160-upload-mags-attachments.sql => 161-upload-mags-attachments.sql} (69%) diff --git a/schema/evolutions/160-upload-mags-attachments.sql b/schema/evolutions/161-upload-mags-attachments.sql similarity index 73% rename from schema/evolutions/160-upload-mags-attachments.sql rename to schema/evolutions/161-upload-mags-attachments.sql index 31bc14c0449..85a091a84e2 100644 --- a/schema/evolutions/160-upload-mags-attachments.sql +++ b/schema/evolutions/161-upload-mags-attachments.sql @@ -1,10 +1,10 @@ START TRANSACTION; -do $$ begin if (select schemaVersion from webknossos.releaseInformation) <> 159 then raise exception 'Previous schema version mismatch'; end if; end; $$ language plpgsql; +do $$ begin if (select schemaVersion from webknossos.releaseInformation) <> 160 then raise exception 'Previous schema version mismatch'; end if; end; $$ language plpgsql; ALTER TABLE webknossos.dataset_layer_attachments ADD COLUMN uploadIsPending BOOLEAN NOT NULL DEFAULT FALSE; ALTER TABLE webknossos.dataset_mags ADD COLUMN uploadIsPending BOOLEAN NOT NULL DEFAULT FALSE; -UPDATE webknossos.releaseInformation SET schemaVersion = 160; +UPDATE webknossos.releaseInformation SET schemaVersion = 161; COMMIT TRANSACTION; diff --git a/schema/evolutions/reversions/160-upload-mags-attachments.sql b/schema/evolutions/reversions/161-upload-mags-attachments.sql similarity index 69% rename from schema/evolutions/reversions/160-upload-mags-attachments.sql rename to schema/evolutions/reversions/161-upload-mags-attachments.sql index 041f3a142f5..8d26649fb3d 100644 --- a/schema/evolutions/reversions/160-upload-mags-attachments.sql +++ b/schema/evolutions/reversions/161-upload-mags-attachments.sql @@ -1,10 +1,10 @@ START TRANSACTION; -do $$ begin if (select schemaVersion from webknossos.releaseInformation) <> 160 then raise exception 'Previous schema version mismatch'; end if; end; $$ language plpgsql; +do $$ begin if (select schemaVersion from webknossos.releaseInformation) <> 161 then raise exception 'Previous schema version mismatch'; end if; end; $$ language plpgsql; ALTER TABLE webknossos.dataset_layer_attachments DROP COLUMN uploadIsPending; ALTER TABLE webknossos.dataset_mags DROP COLUMN uploadIsPending; -UPDATE webknossos.releaseInformation SET schemaVersion = 159; +UPDATE webknossos.releaseInformation SET schemaVersion = 160; COMMIT TRANSACTION; diff --git a/schema/schema.sql b/schema/schema.sql index c08c3b48216..8d3566055fe 100644 --- a/schema/schema.sql +++ b/schema/schema.sql @@ -21,7 +21,7 @@ CREATE TABLE webknossos.releaseInformation ( schemaVersion BIGINT NOT NULL ); -INSERT INTO webknossos.releaseInformation(schemaVersion) values(160); +INSERT INTO webknossos.releaseInformation(schemaVersion) values(161); COMMIT TRANSACTION; From 6763bae87875d3be5dd08ec32a16daed89ef7c9d Mon Sep 17 00:00:00 2001 From: Florian M Date: Tue, 7 Apr 2026 13:10:05 +0200 Subject: [PATCH 19/37] changelog --- unreleased_changes/9402.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unreleased_changes/9402.md b/unreleased_changes/9402.md index fbcc74313d5..253e5347def 100644 --- a/unreleased_changes/9402.md +++ b/unreleased_changes/9402.md @@ -2,4 +2,4 @@ - Added routes for uploading attachments and mags to existing datasets via the python libs client. ### Postgres Evolutions -- [131-more-indices-on-users.sql](schema/evolutions/131-more-indices-on-users.sql) +- [161-upload-mags-attachments.sql](schema/evolutions/161-upload-mags-attachments.sql) From 05522d43cde2c990675a0643fe3be58b8f150a42 Mon Sep 17 00:00:00 2001 From: Florian M Date: Tue, 7 Apr 2026 13:28:51 +0200 Subject: [PATCH 20/37] fix route names --- frontend/javascripts/admin/rest_api.ts | 2 +- .../controllers/UploadController.scala | 2 +- .../datastore/dataformats/MagLocator.scala | 2 +- .../services/DSRemoteWebknossosClient.scala | 19 +++++++++++++++---- .../conf/datastore.latest.routes | 2 +- 5 files changed, 19 insertions(+), 8 deletions(-) diff --git a/frontend/javascripts/admin/rest_api.ts b/frontend/javascripts/admin/rest_api.ts index 1e8be6f2136..797e1c67a61 100644 --- a/frontend/javascripts/admin/rest_api.ts +++ b/frontend/javascripts/admin/rest_api.ts @@ -1287,7 +1287,7 @@ export function getUnfinishedUploads( ): Promise { return doWithToken(async (token) => { const unfinishedUploads = (await Request.receiveJSON( - `/data/datasets/getUnfinishedUploads?token=${token}&organizationName=${organizationName}`, + `/data/datasets/upload/dataset/unfinishedUploads?token=${token}&organizationName=${organizationName}`, { host: datastoreHost, }, diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala index 27ad3effe3c..30cb05c3369 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala @@ -39,7 +39,7 @@ class UploadController @Inject()( UserAccessRequest.administrateDatasets(request.body.organizationId)) { for { isKnownUpload <- uploadService.isKnownUpload(request.body.resumableUploadInfo.uploadId, UploadDomain.dataset) - _ <- Fox.runIf(isKnownUpload) { + _ <- Fox.runIf(!isKnownUpload) { for { reserveUploadAdditionalInfo <- dsRemoteWebknossosClient.reserveDatasetUpload(request.body) ?~> "dataset.upload.validation.failed" _ <- uploadService.reserveDatasetUpload(request.body, diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/MagLocator.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/MagLocator.scala index 78663b89836..d12eba3c025 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/MagLocator.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/MagLocator.scala @@ -3,7 +3,7 @@ package com.scalableminds.webknossos.datastore.dataformats import com.scalableminds.util.geometry.Vec3Int import com.scalableminds.webknossos.datastore.datareaders.AxisOrder import com.scalableminds.webknossos.datastore.helpers.UPath -import com.scalableminds.webknossos.datastore.models.datasource.{LayerAttachment, MagFormatHelper} +import com.scalableminds.webknossos.datastore.models.datasource.MagFormatHelper import com.scalableminds.webknossos.datastore.storage.LegacyDataVaultCredential import play.api.libs.json.{Json, OFormat} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala index 97b7d261bd6..c82a0e33424 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala @@ -14,7 +14,17 @@ import com.scalableminds.webknossos.datastore.models.UnfinishedUpload import com.scalableminds.webknossos.datastore.models.annotation.AnnotationSource import com.scalableminds.webknossos.datastore.models.datasource.{DataSource, DataSourceId} import com.scalableminds.webknossos.datastore.rpc.RPC -import com.scalableminds.webknossos.datastore.services.uploading.{AttachmentUploadAdditionalInfo, AttachmentUploadInfo, DatasetUploadAdditionalInfo, DatasetUploadInfo, MagUploadAdditionalInfo, MagUploadInfo, ReportAttachmentUploadParameters, ReportDatasetUploadParameters, ReportMagUploadParameters} +import com.scalableminds.webknossos.datastore.services.uploading.{ + AttachmentUploadAdditionalInfo, + AttachmentUploadInfo, + DatasetUploadAdditionalInfo, + DatasetUploadInfo, + MagUploadAdditionalInfo, + MagUploadInfo, + ReportAttachmentUploadParameters, + ReportDatasetUploadParameters, + ReportMagUploadParameters +} import com.scalableminds.webknossos.datastore.storage.DataVaultCredential import com.typesafe.scalalogging.LazyLogging import play.api.inject.ApplicationLifecycle @@ -86,14 +96,15 @@ class DSRemoteWebknossosClient @Inject()( def getUnfinishedUploadsForUser(organizationName: String)(implicit tc: TokenContext): Fox[List[UnfinishedUpload]] = for { - unfinishedUploads <- rpc(s"$webknossosUri/api/datastores/$dataStoreName/getUnfinishedUploadsForUser") + unfinishedUploads <- rpc(s"$webknossosUri/api/datastores/$dataStoreName/getUnfinishedDatasetUploadsForUser") .addQueryParam("key", dataStoreKey) .addQueryParam("organizationName", organizationName) .withTokenFromContext .getWithJsonResponse[List[UnfinishedUpload]] } yield unfinishedUploads - def reportDatasetUpload(datasetId: ObjectId, parameters: ReportDatasetUploadParameters)(implicit tc: TokenContext): Fox[_] = + def reportDatasetUpload(datasetId: ObjectId, parameters: ReportDatasetUploadParameters)( + implicit tc: TokenContext): Fox[_] = rpc(s"$webknossosUri/api/datastores/$dataStoreName/reportDatasetUpload") .addQueryParam("key", dataStoreKey) .addQueryParam("datasetId", datasetId) @@ -126,7 +137,7 @@ class DSRemoteWebknossosClient @Inject()( .putJson(dataSourcePaths) def reserveDatasetUpload(info: DatasetUploadInfo)(implicit tc: TokenContext): Fox[DatasetUploadAdditionalInfo] = - rpc(s"$webknossosUri/api/datastores/$dataStoreName/reserveUpload") + rpc(s"$webknossosUri/api/datastores/$dataStoreName/reserveDatasetUpload") .addQueryParam("key", dataStoreKey) .withTokenFromContext .postJsonWithJsonResponse[DatasetUploadInfo, DatasetUploadAdditionalInfo](info) diff --git a/webknossos-datastore/conf/datastore.latest.routes b/webknossos-datastore/conf/datastore.latest.routes index 8d92d64c411..fdaf1943514 100644 --- a/webknossos-datastore/conf/datastore.latest.routes +++ b/webknossos-datastore/conf/datastore.latest.routes @@ -111,7 +111,7 @@ POST /datasets/upload/:uploadDomain POST /datasets/upload/dataset/reserveUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.reserveDatasetUpload() POST /datasets/upload/mag/reserveUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.reserveAttachmentUpload() POST /datasets/upload/attachment/reserveUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.reserveMagUpload() -GET /datasets/upload/:uploadDomain/getUnfinishedUploads @com.scalableminds.webknossos.datastore.controllers.UploadController.getUnfinishedUploads(organizationName: String, uploadDomain: String) +GET /datasets/upload/:uploadDomain/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.UploadController.getUnfinishedUploads(organizationName: String, uploadDomain: String) POST /datasets/upload/:uploadDomain/finishUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.finishUpload(uploadDomain: String, uploadId: String) POST /datasets/upload/:uploadDomain/cancelUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.cancelUpload(uploadDomain: String, uploadId: String) From bbebdd90dc3684785c38fdf4e6f60489af3339c2 Mon Sep 17 00:00:00 2001 From: Florian M Date: Tue, 7 Apr 2026 13:37:12 +0200 Subject: [PATCH 21/37] adapt test db --- test/db/dataset_mags.csv | 52 ++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/test/db/dataset_mags.csv b/test/db/dataset_mags.csv index c1d580b1e6e..4ac65a6d11b 100644 --- a/test/db/dataset_mags.csv +++ b/test/db/dataset_mags.csv @@ -1,26 +1,26 @@ -_dataset,dataLayerName,mag,path,realPath,hasLocalData,channelIndex,credentialId,uploadToPathIsPending -'59e9cfbdba632ac2ab8b23b3','color_1','(1,1,1)','',,false,,,,false -'59e9cfbdba632ac2ab8b23b3','color_1','(2,2,2)','',,false,,,,false -'59e9cfbdba632ac2ab8b23b3','color_1','(4,4,4)','',,false,,,,false -'59e9cfbdba632ac2ab8b23b3','color_1','(8,8,8)','',,false,,,,false -'59e9cfbdba632ac2ab8b23b3','color_1','(16,16,16)','',,false,,,,false -'59e9cfbdba632ac2ab8b23b3','color_2','(1,1,1)','',,false,,,,false -'59e9cfbdba632ac2ab8b23b3','color_2','(2,2,2)','',,false,,,,false -'59e9cfbdba632ac2ab8b23b3','color_2','(4,4,4)','',,false,,,,false -'59e9cfbdba632ac2ab8b23b3','color_2','(8,8,8)','',,false,,,,false -'59e9cfbdba632ac2ab8b23b3','color_2','(16,16,16)','',,false,,,,false -'59e9cfbdba632ac2ab8b23b3','color_3','(1,1,1)','',,false,,,,false -'59e9cfbdba632ac2ab8b23b3','color_3','(2,2,2)','',,false,,,,false -'59e9cfbdba632ac2ab8b23b3','color_3','(4,4,4)','',,false,,,,false -'59e9cfbdba632ac2ab8b23b3','color_3','(8,8,8)','',,false,,,,false -'59e9cfbdba632ac2ab8b23b3','color_3','(16,16,16)','',,false,,,,false -'59e9cfbdba632ac2ab8b23b5','color','(1,1,1)','./color/1',,false,,,,false -'59e9cfbdba632ac2ab8b23b5','color','(2,2,1)','./color/2-2-1',,false,,,,false -'59e9cfbdba632ac2ab8b23b5','color','(4,4,1)','./color/4-4-1',,false,,,,false -'59e9cfbdba632ac2ab8b23b5','color','(8,8,2)','./color/8-8-2',,false,,,,false -'59e9cfbdba632ac2ab8b23b5','color','(16,16,4)','./color/16-16-4',,false,,,,false -'59e9cfbdba632ac2ab8b23b5','segmentation','(1,1,1)','./segmentation/1',,false,,,,false -'59e9cfbdba632ac2ab8b23b5','segmentation','(2,2,1)','./segmentation/2-2-1',,false,,,,false -'59e9cfbdba632ac2ab8b23b5','segmentation','(4,4,1)','./segmentation/4-4-1',,false,,,,false -'59e9cfbdba632ac2ab8b23b5','segmentation','(8,8,2)','./segmentation/8-8-2',,false,,,,false -'59e9cfbdba632ac2ab8b23b5','segmentation','(16,16,4)','./segmentation/16-16-4',,false,,,,false +_dataset,dataLayerName,mag,path,realPath,hasLocalData,channelIndex,credentialId,uploadToPathIsPending,uploadIsPending +'59e9cfbdba632ac2ab8b23b3','color_1','(1,1,1)','',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b3','color_1','(2,2,2)','',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b3','color_1','(4,4,4)','',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b3','color_1','(8,8,8)','',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b3','color_1','(16,16,16)','',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b3','color_2','(1,1,1)','',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b3','color_2','(2,2,2)','',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b3','color_2','(4,4,4)','',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b3','color_2','(8,8,8)','',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b3','color_2','(16,16,16)','',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b3','color_3','(1,1,1)','',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b3','color_3','(2,2,2)','',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b3','color_3','(4,4,4)','',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b3','color_3','(8,8,8)','',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b3','color_3','(16,16,16)','',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b5','color','(1,1,1)','./color/1',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b5','color','(2,2,1)','./color/2-2-1',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b5','color','(4,4,1)','./color/4-4-1',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b5','color','(8,8,2)','./color/8-8-2',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b5','color','(16,16,4)','./color/16-16-4',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b5','segmentation','(1,1,1)','./segmentation/1',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b5','segmentation','(2,2,1)','./segmentation/2-2-1',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b5','segmentation','(4,4,1)','./segmentation/4-4-1',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b5','segmentation','(8,8,2)','./segmentation/8-8-2',,false,,,,false,false +'59e9cfbdba632ac2ab8b23b5','segmentation','(16,16,4)','./segmentation/16-16-4',,false,,,,false,false From 0e6b8c1111495eda47f97a8ecd73bc1730c3ff45 Mon Sep 17 00:00:00 2001 From: Florian M Date: Tue, 7 Apr 2026 13:42:26 +0200 Subject: [PATCH 22/37] assert cancel domain --- .../webknossos/datastore/controllers/UploadController.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala index 30cb05c3369..5cdd0daae22 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala @@ -180,7 +180,6 @@ class UploadController @Inject()( .getDatasetIdByUploadId(uploadId, uploadDomainValidated) ?~> s"Cannot find running upload with upload id $uploadId" response <- accessTokenService.validateAccessFromTokenContext(UserAccessRequest.writeDataset(datasetId)) { for { - // TODO other domains _ <- (uploadDomainValidated match { case UploadDomain.dataset => uploadService.finishDatasetUpload(uploadId, datasetId) case UploadDomain.mag => uploadService.finishMagUpload(uploadId, datasetId) @@ -197,6 +196,8 @@ class UploadController @Inject()( def cancelUpload(uploadDomain: String, uploadId: String): Action[AnyContent] = Action.async { implicit request => for { uploadDomainValidated <- UploadDomain.fromString(uploadDomain).toFox + _ <- Fox + .fromBool(uploadDomainValidated == UploadDomain.dataset) ?~> "Cancel upload is only supported for datasets." datasetIdFox = uploadService.isKnownUpload(uploadId, uploadDomainValidated).flatMap { case false => Fox.failure("dataset.upload.validation.failed") case true => uploadService.getDatasetIdByUploadId(uploadId, uploadDomainValidated) @@ -204,7 +205,6 @@ class UploadController @Inject()( result <- datasetIdFox.flatMap { datasetId => accessTokenService.validateAccessFromTokenContext(UserAccessRequest.deleteDataset(datasetId)) { for { - // TODO adapt also to other domains _ <- dsRemoteWebknossosClient.deleteDataset(datasetId) ?~> "dataset.delete.webknossos.failed" _ <- uploadService.cancelUpload(uploadDomainValidated, uploadId) ?~> "Could not cancel the upload." } yield Ok From 3920836c5e0ec2357168708e11eaca8037d595d0 Mon Sep 17 00:00:00 2001 From: Florian M Date: Wed, 8 Apr 2026 08:33:36 +0200 Subject: [PATCH 23/37] format --- app/models/dataset/UploadToPathsService.scala | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/app/models/dataset/UploadToPathsService.scala b/app/models/dataset/UploadToPathsService.scala index 726adce72e8..3ce0a568b9b 100644 --- a/app/models/dataset/UploadToPathsService.scala +++ b/app/models/dataset/UploadToPathsService.scala @@ -265,11 +265,11 @@ class UploadToPathsService @Inject()(datasetService: DatasetService, parameters.attachmentType, datasetPath / parameters.layerName) _ <- datasetLayerAttachmentsDAO.insertWithUploadToPathPending(dataset._id, - parameters.layerName, - parameters.attachmentName, - parameters.attachmentType, - parameters.attachmentDataformat, - attachmentPath) + parameters.layerName, + parameters.attachmentName, + parameters.attachmentType, + parameters.attachmentDataformat, + attachmentPath) } yield attachmentPath def reserveMagUploadToPath(dataset: Dataset, parameters: ReserveMagUploadToPathRequest)( @@ -294,7 +294,9 @@ class UploadToPathsService @Inject()(datasetService: DatasetService, mag: Vec3Int, overwritePending: Boolean)(implicit ec: ExecutionContext): Fox[Unit] = for { - existingMagLocatorPathBox <- datasetMagsDAO.findMagLocatorPathWithPendingUploadToPath(dataset._id, layerName, mag).shiftBox + existingMagLocatorPathBox <- datasetMagsDAO + .findMagLocatorPathWithPendingUploadToPath(dataset._id, layerName, mag) + .shiftBox _ <- existingMagLocatorPathBox match { case Full(existingMagLocatorPath) => if (overwritePending) { From 59410bb48418eb64daeeb13ad6800ae0841f42ae Mon Sep 17 00:00:00 2001 From: Florian M Date: Wed, 8 Apr 2026 09:37:48 +0200 Subject: [PATCH 24/37] adapt e2e test; retry on cache lookup failed for type postgres error, bump api version --- app/utils/sql/SimpleSQLDAO.scala | 5 ++- conf/webknossos.versioned.routes | 3 ++ .../backend_snapshot_tests/datasets.e2e.ts | 41 ++++++++++--------- .../util/mvc/ApiVersioning.scala | 2 +- .../services/uploading/UploadService.scala | 8 +++- .../conf/datastore.versioned.routes | 3 ++ .../conf/tracingstore.versioned.routes | 2 + 7 files changed, 40 insertions(+), 24 deletions(-) diff --git a/app/utils/sql/SimpleSQLDAO.scala b/app/utils/sql/SimpleSQLDAO.scala index ae6e1a69d06..a350aef2b55 100644 --- a/app/utils/sql/SimpleSQLDAO.scala +++ b/app/utils/sql/SimpleSQLDAO.scala @@ -23,7 +23,10 @@ class SimpleSQLDAO @Inject()(sqlClient: SqlClient)(implicit ec: ExecutionContext implicit protected def sqlInterpolationWrapper(s: StringContext): SqlInterpolator = sqlInterpolation(s) + // Concurrent access for Serializable transactions leads to this error, can be solved by retry. protected lazy val transactionSerializationError = "could not serialize access" + // This error tends to occur only after schema changes (type recreation), e.g. during tests. Can be solved by retry. + private lazy val cacheLookupFailedForTypeError = "cache lookup failed for type" protected def run[R](query: DBIOAction[R, NoStream, Nothing], retryCount: Int = 0, @@ -76,7 +79,7 @@ class SimpleSQLDAO @Inject()(sqlClient: SqlClient)(implicit ec: ExecutionContext _ <- run( composedQuery.transactionally.withTransactionIsolation(Serializable), retryCount = 50, - retryIfErrorContains = List(transactionSerializationError) + retryIfErrorContains = List(transactionSerializationError, cacheLookupFailedForTypeError) ) } yield () } diff --git a/conf/webknossos.versioned.routes b/conf/webknossos.versioned.routes index f7b8feb1d9e..be9e92d9204 100644 --- a/conf/webknossos.versioned.routes +++ b/conf/webknossos.versioned.routes @@ -4,6 +4,7 @@ # Note: keep this in sync with the reported version numbers in the com.scalableminds.util.mvc.ApiVersioning trait # version log + # chnaged in v14: Dataset upload routes and parameters have been refactored, introduced upload domain # changed in v13: Attachments not mentioned in the dataSource passed to updatePartial will now be deleted. # changed in v12: Dataset upload now expects layersToLink in new format with datasetId instead of orgaId+directoryName # changed in v11: Datasets reserveManualUpload flow via WK side. Note: older versions of the route are *not* supported for security reasons. @@ -17,6 +18,8 @@ # new in v3: annotation info and finish request now take timestamp # new in v2: annotation json contains visibility enum instead of booleans +-> /v14/ webknossos.latest.Routes + -> /v13/ webknossos.latest.Routes PATCH /v12/datasets/:datasetId/updatePartial controllers.LegacyApiController.updatePartialV12(datasetId: ObjectId) diff --git a/frontend/javascripts/test/backend_snapshot_tests/datasets.e2e.ts b/frontend/javascripts/test/backend_snapshot_tests/datasets.e2e.ts index 3df84a6fbab..6fb607ac7f8 100644 --- a/frontend/javascripts/test/backend_snapshot_tests/datasets.e2e.ts +++ b/frontend/javascripts/test/backend_snapshot_tests/datasets.e2e.ts @@ -188,20 +188,22 @@ describe("Dataset API (E2E)", () => { it("Dataset upload", async () => { const uploadId = "test-dataset-upload-" + Date.now(); - const reserveUpload = await fetch("/data/datasets/reserveUpload", { + const reserveUpload = await fetch("/data/datasets/upload/dataset/reserveUpload", { method: "POST", headers: new Headers({ "Content-Type": "application/json", }), body: JSON.stringify({ - filePaths: ["test-dataset-upload.zip"], + resumableUploadInfo: { + filePaths: ["test-dataset-upload.zip"], + totalFileCount: 1, + uploadId: uploadId, + }, folderId: "570b9f4e4bb848d0885ea917", - initialTeams: [], + initialTeamIds: [], layersToLink: [], - name: "test-dataset-upload", - organization: "Organization_X", - totalFileCount: 1, - uploadId: uploadId, + datasetName: "test-dataset-upload", + organizationId: "Organization_X", }), }); @@ -248,7 +250,7 @@ describe("Dataset API (E2E)", () => { let content_type = `multipart/form-data; boundary=${boundary}`; - const uploadResult = await fetch("/data/datasets", { + const uploadResult = await fetch("/data/datasets/upload/dataset", { method: "POST", headers: new Headers({ "Content-Type": content_type, @@ -260,23 +262,22 @@ describe("Dataset API (E2E)", () => { expect.fail("Dataset upload failed"); } - const finishResult = await fetch("/data/datasets/finishUpload", { - method: "POST", - headers: new Headers({ - "Content-Type": "application/json", - }), - body: JSON.stringify({ - uploadId: uploadId, - needsConversion: false, - }), - }); + const finishResult = await fetch( + `/data/datasets/upload/dataset/finishUpload?uploadId=${uploadId}`, + { + method: "POST", + headers: new Headers({ + "Content-Type": "application/json", + }), + }, + ); if (finishResult.status !== 200) { expect.fail("Dataset upload failed at finish"); } - const { newDatasetId } = await finishResult.json(); - const result = await fetch(`/api/datasets/${newDatasetId}/health`, { + const { datasetId } = await finishResult.json(); + const result = await fetch(`/api/datasets/${datasetId}/health`, { headers: new Headers(), }); diff --git a/util/src/main/scala/com/scalableminds/util/mvc/ApiVersioning.scala b/util/src/main/scala/com/scalableminds/util/mvc/ApiVersioning.scala index 82d821beaf7..114a240de12 100644 --- a/util/src/main/scala/com/scalableminds/util/mvc/ApiVersioning.scala +++ b/util/src/main/scala/com/scalableminds/util/mvc/ApiVersioning.scala @@ -5,7 +5,7 @@ import play.api.mvc.RequestHeader trait ApiVersioning { - protected val CURRENT_API_VERSION: Int = 13 + protected val CURRENT_API_VERSION: Int = 14 protected val OLDEST_SUPPORTED_API_VERSION: Int = 5 protected lazy val apiVersioningInfo: JsObject = diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index 2a5af6d907a..ccd02257009 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -66,7 +66,8 @@ case class MagUploadInfo( resumableUploadInfo: ResumableUploadInfo, datasetId: ObjectId, layerName: String, - mag: MagLocator + mag: MagLocator, + overwritePending: Boolean ) object MagUploadInfo { implicit val jsonFormat: OFormat[MagUploadInfo] = Json.format[MagUploadInfo] @@ -77,7 +78,8 @@ case class AttachmentUploadInfo( datasetId: ObjectId, layerName: String, attachmentType: LayerAttachmentType, - attachment: LayerAttachment + attachment: LayerAttachment, + overwritePending: Boolean ) object AttachmentUploadInfo { implicit val jsonFormat: OFormat[AttachmentUploadInfo] = Json.format[AttachmentUploadInfo] @@ -208,6 +210,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, def reserveMagUpload(magUploadInfo: MagUploadInfo, dataSourceId: DataSourceId): Fox[Unit] = for { + // TODO if overwritePending, cancel pending if exists (disk, redis, postgres) _ <- reserveResumableUpload(magUploadInfo.resumableUploadInfo, magUploadInfo.datasetId, dataSourceId, @@ -219,6 +222,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, def reserveAttachmentUpload(attachmentUploadInfo: AttachmentUploadInfo, dataSourceId: DataSourceId): Fox[Unit] = for { + // TODO if overwritePending, cancel pending if exists (disk, redis, postgres) _ <- reserveResumableUpload(attachmentUploadInfo.resumableUploadInfo, attachmentUploadInfo.datasetId, dataSourceId, diff --git a/webknossos-datastore/conf/datastore.versioned.routes b/webknossos-datastore/conf/datastore.versioned.routes index ca58be8906d..edc1656e6f4 100644 --- a/webknossos-datastore/conf/datastore.versioned.routes +++ b/webknossos-datastore/conf/datastore.versioned.routes @@ -1,4 +1,7 @@ # Note: keep this in sync with the reported version numbers in the com.scalableminds.util.mvc.ApiVersioning trait +# Version log in webknossos.versioned.routes + +-> /v14/ datastore.latest.Routes -> /v13/ datastore.latest.Routes -> /v12/ datastore.latest.Routes diff --git a/webknossos-tracingstore/conf/tracingstore.versioned.routes b/webknossos-tracingstore/conf/tracingstore.versioned.routes index ffba2a9b1fb..78fa34e430e 100644 --- a/webknossos-tracingstore/conf/tracingstore.versioned.routes +++ b/webknossos-tracingstore/conf/tracingstore.versioned.routes @@ -1,5 +1,7 @@ # Note: keep this in sync with the reported version numbers in the com.scalableminds.util.mvc.ApiVersioning trait +# Version log in webknossos.versioned.routes +-> /v14/ tracingstore.latest.Routes -> /v13/ tracingstore.latest.Routes -> /v12/ tracingstore.latest.Routes -> /v11/ tracingstore.latest.Routes From fcdf4f5d8601134b2ba09a14ee47d3587f7a2c47 Mon Sep 17 00:00:00 2001 From: Florian M Date: Wed, 8 Apr 2026 12:00:04 +0200 Subject: [PATCH 25/37] fixes --- .../webknossos/datastore/controllers/UploadController.scala | 4 ++-- .../datastore/services/uploading/UploadService.scala | 4 ++-- webknossos-datastore/conf/datastore.latest.routes | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala index 5cdd0daae22..ce9f868b436 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala @@ -56,7 +56,7 @@ class UploadController @Inject()( accessTokenService.validateAccessFromTokenContext(UserAccessRequest.writeDataset(request.body.datasetId)) { for { isKnownUpload <- uploadService.isKnownUpload(request.body.resumableUploadInfo.uploadId, UploadDomain.mag) - _ <- Fox.runIf(isKnownUpload) { + _ <- Fox.runIf(!isKnownUpload) { for { reserveUploadAdditionalInfo <- dsRemoteWebknossosClient.reserveMagUpload(request.body) ?~> "dataset.upload.validation.failed" _ <- uploadService.reserveMagUpload(request.body, reserveUploadAdditionalInfo.dataSourceId) @@ -72,7 +72,7 @@ class UploadController @Inject()( for { isKnownUpload <- uploadService.isKnownUpload(request.body.resumableUploadInfo.uploadId, UploadDomain.attachment) - _ <- Fox.runIf(isKnownUpload) { + _ <- Fox.runIf(!isKnownUpload) { for { reserveUploadAdditionalInfo <- dsRemoteWebknossosClient.reserveAttachmentUpload(request.body) ?~> "dataset.upload.validation.failed" _ <- uploadService.reserveAttachmentUpload(request.body, reserveUploadAdditionalInfo.dataSourceId) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index ccd02257009..370b0422901 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -413,8 +413,8 @@ class UploadService @Inject()(dataSourceService: DataSourceService, _ <- checkWithinRequestedFileSize(uploadDir, uploadId, datasetId, UploadDomain.mag) ?~> "dataset.upload.fileSizeCheck.failed" _ <- checkAllChunksUploaded(uploadId, UploadDomain.mag) ?~> "dataset.upload.allChunksUploadedCheck.failed" // TODO clean up on failure, clean up on success - magSizeBytes <- measureDirectorySizeBytes(unpackToDir) ?~> "dataset.upload.measureTotalSize.failed" _ <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId, UploadDomain.mag) + magSizeBytes <- measureDirectorySizeBytes(unpackToDir) ?~> "dataset.upload.measureTotalSize.failed" magAdapted = mag.copy(path = Some(UPath.fromLocalPath(unpackToDir))) _ <- remoteWebknossosClient.reportMagUpload( ReportMagUploadParameters(datasetId, layerName, magAdapted, magSizeBytes)) @@ -431,8 +431,8 @@ class UploadService @Inject()(dataSourceService: DataSourceService, _ <- checkWithinRequestedFileSize(uploadDir, uploadId, datasetId, UploadDomain.mag) ?~> "dataset.upload.fileSizeCheck.failed" _ <- checkAllChunksUploaded(uploadId, UploadDomain.mag) ?~> "dataset.upload.allChunksUploadedCheck.failed" // TODO clean up on failure, clean up on success - attachmentSizeBytes <- measureDirectorySizeBytes(unpackToDir) ?~> "dataset.upload.measureTotalSize.failed" _ <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId, UploadDomain.attachment) + attachmentSizeBytes <- measureDirectorySizeBytes(unpackToDir) ?~> "dataset.upload.measureTotalSize.failed" attachmentAdapted = attachment.copy(path = UPath.fromLocalPath(unpackToDir)) _ <- remoteWebknossosClient.reportAttachmentUpload( ReportAttachmentUploadParameters(datasetId, layerName, attachmentType, attachmentAdapted, attachmentSizeBytes)) diff --git a/webknossos-datastore/conf/datastore.latest.routes b/webknossos-datastore/conf/datastore.latest.routes index fdaf1943514..0550b45a488 100644 --- a/webknossos-datastore/conf/datastore.latest.routes +++ b/webknossos-datastore/conf/datastore.latest.routes @@ -109,8 +109,8 @@ POST /datasets/:datasetId/layers/:dataLayerName/segmentStatistics/surfa GET /datasets/upload/:uploadDomain @com.scalableminds.webknossos.datastore.controllers.UploadController.testChunk(resumableChunkNumber: Int, resumableIdentifier: String, uploadDomain: String) POST /datasets/upload/:uploadDomain @com.scalableminds.webknossos.datastore.controllers.UploadController.uploadChunk(uploadDomain: String) POST /datasets/upload/dataset/reserveUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.reserveDatasetUpload() -POST /datasets/upload/mag/reserveUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.reserveAttachmentUpload() -POST /datasets/upload/attachment/reserveUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.reserveMagUpload() +POST /datasets/upload/mag/reserveUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.reserveMagUpload() +POST /datasets/upload/attachment/reserveUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.reserveAttachmentUpload() GET /datasets/upload/:uploadDomain/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.UploadController.getUnfinishedUploads(organizationName: String, uploadDomain: String) POST /datasets/upload/:uploadDomain/finishUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.finishUpload(uploadDomain: String, uploadId: String) POST /datasets/upload/:uploadDomain/cancelUpload @com.scalableminds.webknossos.datastore.controllers.UploadController.cancelUpload(uploadDomain: String, uploadId: String) From 118ef594cb5407803f252ca3162c5a3eb82eb8a1 Mon Sep 17 00:00:00 2001 From: Florian M Date: Wed, 8 Apr 2026 14:12:07 +0200 Subject: [PATCH 26/37] move to target; misc fixes --- app/controllers/DatasetController.scala | 10 +-- .../WKRemoteDataStoreController.scala | 29 ++++--- app/models/dataset/Dataset.scala | 55 ++++++++++--- app/models/dataset/UploadToPathsService.scala | 4 +- .../uploading/UploadMetadataStore.scala | 2 +- .../services/uploading/UploadService.scala | 80 +++++++++++++++---- 6 files changed, 133 insertions(+), 47 deletions(-) diff --git a/app/controllers/DatasetController.scala b/app/controllers/DatasetController.scala index 86bfa8ddd56..352ff33be18 100755 --- a/app/controllers/DatasetController.scala +++ b/app/controllers/DatasetController.scala @@ -726,7 +726,7 @@ class DatasetController @Inject()(userService: UserService, for { dataset <- datasetDAO.findOne(datasetId) ?~> notFoundMessage(datasetId.toString) ~> NOT_FOUND _ <- Fox.assertTrue(datasetService.isEditableBy(dataset, Some(request.identity))) ?~> "notAllowed" ~> FORBIDDEN - _ <- datasetMagsDAO.finishUploadOrUploadToPath(datasetId, request.body.layerName, request.body.mag) + _ <- datasetMagsDAO.finishUploadToPath(datasetId, request.body.layerName, request.body.mag) dataStoreClient <- datasetService.clientFor(dataset) _ <- Fox.runIf(!dataset.isVirtual) { for { @@ -753,10 +753,10 @@ class DatasetController @Inject()(userService: UserService, for { dataset <- datasetDAO.findOne(datasetId) ?~> notFoundMessage(datasetId.toString) ~> NOT_FOUND _ <- Fox.assertTrue(datasetService.isEditableBy(dataset, Some(request.identity))) ?~> "notAllowed" ~> FORBIDDEN - _ <- datasetLayerAttachmentsDAO.finishUploadOrUploadToPath(datasetId, - request.body.layerName, - request.body.attachmentType, - request.body.attachmentName) + _ <- datasetLayerAttachmentsDAO.finishUploadToPath(datasetId, + request.body.layerName, + request.body.attachmentType, + request.body.attachmentName) dataStoreClient <- datasetService.clientFor(dataset) _ <- Fox.runIf(!dataset.isVirtual) { for { diff --git a/app/controllers/WKRemoteDataStoreController.scala b/app/controllers/WKRemoteDataStoreController.scala index 9b7c8241ee5..dec72d62b85 100644 --- a/app/controllers/WKRemoteDataStoreController.scala +++ b/app/controllers/WKRemoteDataStoreController.scala @@ -5,6 +5,7 @@ import com.scalableminds.util.objectid.ObjectId import com.scalableminds.util.time.Instant import com.scalableminds.util.tools.Fox import com.scalableminds.webknossos.datastore.controllers.JobExportProperties +import com.scalableminds.webknossos.datastore.helpers.UPath import com.scalableminds.webknossos.datastore.models.UnfinishedUpload import com.scalableminds.webknossos.datastore.models.datasource.{ DataSource, @@ -109,13 +110,11 @@ class WKRemoteDataStoreController @Inject()( (dataSource, dataLayer) <- datasetService.getDataSourceAndLayerFor(dataset, request.body.layerName) _ <- Fox.fromBool(!dataLayer.mags.exists(_.mag.maxDim == request.body.mag.mag.maxDim)) ?~> s"New mag ${request.body.mag.mag} conflicts with existing mag of the layer." _ <- Fox.fromBool(dataset._dataStore == dataStore.name) ?~> "Cannot upload mag to existing dataset via different datastore." - path <- request.body.mag.path.toFox ?~> "dataset.reserveMagUpload.pathNotSet" // TODO ensure caller sets path _ <- datasetMagDAO.insertWithUploadPending(request.body.datasetId, request.body.layerName, request.body.mag.mag, request.body.mag.axisOrder, - request.body.mag.channelIndex, - path) + request.body.mag.channelIndex) } yield Ok(Json.toJson(MagUploadAdditionalInfo(dataSource.id))) } } @@ -133,13 +132,14 @@ class WKRemoteDataStoreController @Inject()( _.getByTypeAndName(request.body.attachmentType, request.body.attachment.name)) _ <- Fox.fromBool(existingAttachmentOpt.isEmpty) ?~> s"Layer already has ${request.body.attachmentType} attachment named ${request.body.attachment.name}" _ <- Fox.fromBool(dataset._dataStore == dataStore.name) ?~> "Cannot upload mag to existing dataset via different datastore." + dummyAttachmentPath <- UPath.fromString("").toFox _ <- datasetAttachmentDAO.insertWithUploadPending( request.body.datasetId, request.body.layerName, request.body.attachment.name, request.body.attachmentType, request.body.attachment.dataFormat, - request.body.attachment.path // TODO ensure caller sets correct path + dummyAttachmentPath ) } yield Ok(Json.toJson(AttachmentUploadAdditionalInfo(dataSource.id))) } @@ -209,12 +209,13 @@ class WKRemoteDataStoreController @Inject()( Action.async(validateJson[ReportMagUploadParameters]) { implicit request => dataStoreService.validateAccess(name, key) { _ => for { - dataset <- datasetDAO.findOne(request.body.datasetId)(GlobalAccessContext) ?~> Messages( + _ <- datasetDAO.findOne(request.body.datasetId)(GlobalAccessContext) ?~> Messages( "dataset.notFound", request.body.datasetId) ~> NOT_FOUND - _ <- datasetMagDAO.finishUploadOrUploadToPath(request.body.datasetId, - request.body.layerName, - request.body.mag.mag) + // TODO assert pending exists? + _ <- request.body.mag.path.toFox ?~> "dataset.finishMagUpload.pathNotSet" + _ <- datasetMagDAO.finishUpload(request.body.datasetId, request.body.layerName, request.body.mag) + // TODO clear ds cache } yield Ok } } @@ -223,13 +224,15 @@ class WKRemoteDataStoreController @Inject()( Action.async(validateJson[ReportAttachmentUploadParameters]) { implicit request => dataStoreService.validateAccess(name, key) { _ => for { - dataset <- datasetDAO.findOne(request.body.datasetId)(GlobalAccessContext) ?~> Messages( + _ <- datasetDAO.findOne(request.body.datasetId)(GlobalAccessContext) ?~> Messages( "dataset.notFound", request.body.datasetId) ~> NOT_FOUND - _ <- datasetAttachmentDAO.finishUploadOrUploadToPath(request.body.datasetId, - request.body.layerName, - request.body.attachmentType, - request.body.attachment.name) + // TODO assert pending exists? + _ <- datasetAttachmentDAO.finishUpload(request.body.datasetId, + request.body.layerName, + request.body.attachmentType, + request.body.attachment) + // TODO clear ds cache } yield Ok } } diff --git a/app/models/dataset/Dataset.scala b/app/models/dataset/Dataset.scala index 6e3b12bdd84..f4db43c661a 100755 --- a/app/models/dataset/Dataset.scala +++ b/app/models/dataset/Dataset.scala @@ -38,6 +38,7 @@ import models.dataset.DatasetCreationType.DatasetCreationType import javax.inject.Inject import models.organization.OrganizationDAO +import org.apache.pekko.http.scaladsl.model.headers.ContentDispositionTypes.attachment import play.api.i18n.{Messages, MessagesProvider} import play.api.libs.json._ import slick.dbio.DBIO @@ -970,24 +971,36 @@ class DatasetMagDAO @Inject()(sqlClient: SqlClient)(implicit ec: ExecutionContex layerName: String, mag: Vec3Int, axisOrder: Option[AxisOrder], - channelIndex: Option[Int], - path: UPath): Fox[Unit] = + channelIndex: Option[Int]): Fox[Unit] = for { _ <- run( q"""INSERT INTO webknossos.dataset_mags(_dataset, dataLayerName, mag, path, axisOrder, channelIndex, uploadToPathIsPending, uploadIsPending) - VALUES($datasetId, $layerName, $mag, $path, ${axisOrder + VALUES($datasetId, $layerName, $mag, $None, ${axisOrder .map(Json.toJson(_))}, $channelIndex, ${false}, ${true})""".asUpdate) } yield () - def finishUploadOrUploadToPath(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[Unit] = + def finishUploadToPath(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[Unit] = for { _ <- run( q"""UPDATE webknossos.dataset_mags - SET uploadToPathIsPending = ${false}, - uploadToPath = ${false}, - WHERE _dataset = $datasetId - AND dataLayerName = $layerName - AND mag = $mag::webknossos.VECTOR3""".asUpdate + SET uploadToPathIsPending = ${false}, + uploadIsPending = ${false} + WHERE _dataset = $datasetId + AND dataLayerName = $layerName + AND mag = $mag::webknossos.VECTOR3""".asUpdate + ) + } yield () + + def finishUpload(datasetId: ObjectId, layerName: String, mag: MagLocator): Fox[Unit] = + for { + _ <- run( + q"""UPDATE webknossos.dataset_mags + SET uploadToPathIsPending = ${false}, + uploadIsPending = ${false}, + path = ${mag.path} + WHERE _dataset = $datasetId + AND dataLayerName = $layerName + AND mag = ${mag.mag}::webknossos.VECTOR3""".asUpdate ) } yield () @@ -1334,10 +1347,10 @@ class DatasetLayerAttachmentDAO @Inject()(sqlClient: SqlClient)(implicit ec: Exe } yield first } - def finishUploadOrUploadToPath(datasetId: ObjectId, - layerName: String, - attachmentType: LayerAttachmentType.Value, - attachmentName: String): Fox[Unit] = + def finishUploadToPath(datasetId: ObjectId, + layerName: String, + attachmentType: LayerAttachmentType.Value, + attachmentName: String): Fox[Unit] = for { _ <- run(q"""UPDATE webknossos.dataset_layer_attachments SET uploadToPathIsPending = ${false}, @@ -1349,6 +1362,22 @@ class DatasetLayerAttachmentDAO @Inject()(sqlClient: SqlClient)(implicit ec: Exe """.asUpdate) } yield () + def finishUpload(datasetId: ObjectId, + layerName: String, + attachmentType: LayerAttachmentType.Value, + attachment: LayerAttachment): Fox[Unit] = + for { + _ <- run(q"""UPDATE webknossos.dataset_layer_attachments + SET uploadToPathIsPending = ${false}, + uploadIsPending = ${false}, + path = ${attachment.path} + WHERE _dataset = $datasetId + AND layerName = $layerName + AND type = $attachmentType + AND name = ${attachment.name} + """.asUpdate) + } yield () + implicit def GetResultStorageRelevantDataLayerAttachment: GetResult[StorageRelevantDataLayerAttachment] = GetResult( r => diff --git a/app/models/dataset/UploadToPathsService.scala b/app/models/dataset/UploadToPathsService.scala index 3ce0a568b9b..78ec2ef697a 100644 --- a/app/models/dataset/UploadToPathsService.scala +++ b/app/models/dataset/UploadToPathsService.scala @@ -232,7 +232,9 @@ class UploadToPathsService @Inject()(datasetService: DatasetService, val defaultDirName = LayerAttachmentType.defaultDirectoryNameFor(attachmentType) val suffix = LayerAttachmentDataformat.suffixFor(attachmentDataformat) val safeAttachmentName = - TextUtils.normalizeStrong(attachmentName).getOrElse(s"$attachmentType-${ObjectId.generate}") + TextUtils + .normalizeStrong(attachmentName) + .getOrElse(s"${attachmentType}__${RandomIDGenerator.generateBlocking(12)}") layerPath / defaultDirName / (safeAttachmentName + suffix) } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala index 90e6b8d8bb3..bc4c0d463ed 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala @@ -225,7 +225,7 @@ class AttachmentUploadMetadataStore @Inject()(protected val store: DataStoreRedi store.findParsed[LayerAttachment](redisKeyForAttachment(uploadId)) def findAttachmentType(uploadId: String)(implicit ec: ExecutionContext): Fox[LayerAttachmentType] = - store.findParsed[LayerAttachmentType](redisKeyForAttachment(uploadId)) + store.findParsed[LayerAttachmentType](redisKeyForAttachmentType(uploadId)) def findLayerName(uploadId: String)(implicit ec: ExecutionContext): Fox[String] = store.find(redisKeyForLayerName(uploadId)).map(_.toFox).flatten diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index 370b0422901..9d1a96e531b 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -27,6 +27,7 @@ import com.scalableminds.webknossos.datastore.services.{DSRemoteWebknossosClient import com.scalableminds.webknossos.datastore.storage.DataVaultService import com.typesafe.scalalogging.LazyLogging import org.apache.commons.io.FileUtils +import org.apache.pekko.http.scaladsl.model.headers.ContentDispositionTypes.attachment import play.api.libs.json.{Json, OFormat} import software.amazon.awssdk.transfer.s3.model.UploadDirectoryRequest @@ -230,7 +231,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, uploadId = attachmentUploadInfo.resumableUploadInfo.uploadId _ <- attachmentUploadMetadataStore.insertAttachment(uploadId, attachmentUploadInfo.attachment.withoutCredential) _ <- attachmentUploadMetadataStore.insertAttachmentType(uploadId, attachmentUploadInfo.attachmentType) - _ <- magUploadMetadataStore.insertLayerName(uploadId, attachmentUploadInfo.layerName) + _ <- attachmentUploadMetadataStore.insertLayerName(uploadId, attachmentUploadInfo.layerName) } yield () private def reserveResumableUpload(resumableUploadInfo: ResumableUploadInfo, @@ -373,7 +374,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, _ <- backupRawUploadedData(uploadDir, uploadBackupDirectoryFor(dataSourceId.organizationId, uploadId), datasetId).toFox _ <- checkWithinRequestedFileSize(uploadDir, uploadId, datasetId, UploadDomain.dataset) ?~> "dataset.upload.fileSizeCheck.failed" _ <- checkAllChunksUploaded(uploadId, UploadDomain.dataset) ?~> "dataset.upload.allChunksUploadedCheck.failed" - unpackToDir = unpackToDirFor(dataSourceId) + unpackToDir = unpackToDirFor(dataSourceId, UploadDomain.dataset, uploadId) unpackResult <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId, UploadDomain.dataset).shiftBox _ <- cleanUpUploadedDataset(uploadDir, uploadId, reason = "Upload complete, data unpacked.", UploadDomain.dataset) _ <- cleanUpOnFailure(unpackResult, @@ -388,7 +389,11 @@ class UploadService @Inject()(dataSourceService: DataSourceService, needsConversion, label = s"processing dataset at $unpackToDir") datasetSizeBytes <- measureDirectorySizeBytes(unpackToDir) ?~> "dataset.upload.measureTotalSize.failed" - dataSourceWithAbsolutePathsOpt <- moveUnpackedToTarget(unpackToDir, needsConversion, datasetId, dataSourceId) ?~> "dataset.upload.moveUnpackedToTarget.failed" + dataSourceWithAbsolutePathsOpt <- moveUnpackedDatasetToTarget( + unpackToDir, + needsConversion, + datasetId, + dataSourceId) ?~> "dataset.upload.moveUnpackedToTarget.failed" _ <- remoteWebknossosClient.reportDatasetUpload( datasetId, ReportDatasetUploadParameters( @@ -409,13 +414,20 @@ class UploadService @Inject()(dataSourceService: DataSourceService, mag <- magUploadMetadataStore.findMag(uploadId) layerName <- magUploadMetadataStore.findLayerName(uploadId) uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId, UploadDomain.mag) - unpackToDir = unpackToDirFor(dataSourceId).resolve(mag.mag.toMagLiteral(allowScalar = true)) + unpackToDir = unpackToDirFor(dataSourceId, UploadDomain.mag, uploadId) + .resolve(mag.mag.toMagLiteral(allowScalar = true)) _ <- checkWithinRequestedFileSize(uploadDir, uploadId, datasetId, UploadDomain.mag) ?~> "dataset.upload.fileSizeCheck.failed" _ <- checkAllChunksUploaded(uploadId, UploadDomain.mag) ?~> "dataset.upload.allChunksUploadedCheck.failed" // TODO clean up on failure, clean up on success _ <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId, UploadDomain.mag) magSizeBytes <- measureDirectorySizeBytes(unpackToDir) ?~> "dataset.upload.measureTotalSize.failed" - magAdapted = mag.copy(path = Some(UPath.fromLocalPath(unpackToDir))) + finalPath <- moveUnpackedMagOrAttachmentToTarget(unpackToDir, + layerName, + datasetId, + dataSourceId, + s"${mag.mag.toMagLiteral(true)}__${ObjectId.generate}", + UploadDomain.attachment) + magAdapted = mag.copy(path = Some(finalPath)) _ <- remoteWebknossosClient.reportMagUpload( ReportMagUploadParameters(datasetId, layerName, magAdapted, magSizeBytes)) } yield () @@ -427,13 +439,21 @@ class UploadService @Inject()(dataSourceService: DataSourceService, attachmentType <- attachmentUploadMetadataStore.findAttachmentType(uploadId) layerName <- attachmentUploadMetadataStore.findLayerName(uploadId) uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId, UploadDomain.attachment) - unpackToDir = unpackToDirFor(dataSourceId).resolve(attachment.dataFormat.toString).resolve(attachment.name) - _ <- checkWithinRequestedFileSize(uploadDir, uploadId, datasetId, UploadDomain.mag) ?~> "dataset.upload.fileSizeCheck.failed" - _ <- checkAllChunksUploaded(uploadId, UploadDomain.mag) ?~> "dataset.upload.allChunksUploadedCheck.failed" + unpackToDir = unpackToDirFor(dataSourceId, UploadDomain.attachment, uploadId) + _ <- checkWithinRequestedFileSize(uploadDir, uploadId, datasetId, UploadDomain.attachment) ?~> "dataset.upload.fileSizeCheck.failed" + _ <- checkAllChunksUploaded(uploadId, UploadDomain.attachment) ?~> "dataset.upload.allChunksUploadedCheck.failed" // TODO clean up on failure, clean up on success _ <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId, UploadDomain.attachment) attachmentSizeBytes <- measureDirectorySizeBytes(unpackToDir) ?~> "dataset.upload.measureTotalSize.failed" - attachmentAdapted = attachment.copy(path = UPath.fromLocalPath(unpackToDir)) + finalPath <- moveUnpackedMagOrAttachmentToTarget( + unpackToDir, + layerName, + datasetId, + dataSourceId, + s"$attachmentType/${TextUtils.normalizeStrong(attachment.name)}__${ObjectId.generate}", + UploadDomain.attachment + ) + attachmentAdapted = attachment.copy(path = finalPath) _ <- remoteWebknossosClient.reportAttachmentUpload( ReportAttachmentUploadParameters(datasetId, layerName, attachmentType, attachmentAdapted, attachmentSizeBytes)) } yield () @@ -481,10 +501,40 @@ class UploadService @Inject()(dataSourceService: DataSourceService, }) } yield () - private def moveUnpackedToTarget(unpackedDir: Path, - needsConversion: Boolean, - datasetId: ObjectId, - dataSourceId: DataSourceId): Fox[Option[UsableDataSource]] = + private def moveUnpackedMagOrAttachmentToTarget(unpackedDir: Path, + layerName: String, + datasetId: ObjectId, + dataSourceId: DataSourceId, + dirName: String, + domain: UploadDomain): Fox[UPath] = + if (dataStoreConfig.Datastore.S3Upload.enabled) { + for { + s3UploadBucket <- managedS3Service.s3UploadBucketOpt.toFox + _ = logger.info(s"finishUpload for $domain ($datasetId): Copying data to s3 bucket $s3UploadBucket...") + beforeS3Upload = Instant.now + s3ObjectKey = s"${dataStoreConfig.Datastore.S3Upload.objectKeyPrefix}/${dataSourceId.organizationId}/${dataSourceId.directoryName}/$layerName/$dirName" + _ <- uploadDirectoryToS3(unpackedDir, s3UploadBucket, s3ObjectKey) + _ = Instant.logSince(beforeS3Upload, s"Forwarding of uploaded mag for $datasetId ($dataSourceId) to S3", logger) + endPointHost = new URI(dataStoreConfig.Datastore.S3Upload.credentialName).getHost + finalUploadedS3Path <- UPath.fromString(s"s3://$endPointHost/$s3UploadBucket/$s3ObjectKey").toFox + } yield finalUploadedS3Path + } else { + val finalUploadedLocalPath = + dataBaseDir + .resolve(dataSourceId.organizationId) + .resolve(dataSourceId.directoryName) + .resolve(layerName) + .resolve(dirName) + logger.info(s"finishUpload for $domain ($datasetId): Moving data to final local path $finalUploadedLocalPath...") + for { + _ <- tryo(FileUtils.moveDirectory(unpackedDir.toFile, finalUploadedLocalPath.toFile)).toFox + } yield UPath.fromLocalPath(finalUploadedLocalPath) + } + + private def moveUnpackedDatasetToTarget(unpackedDir: Path, + needsConversion: Boolean, + datasetId: ObjectId, + dataSourceId: DataSourceId): Fox[Option[UsableDataSource]] = if (needsConversion) { logger.info(s"finishUpload for $datasetId: Moving data to input dir for worker conversion...") val forConversionPath = @@ -678,11 +728,13 @@ class UploadService @Inject()(dataSourceService: DataSourceService, } yield () } - private def unpackToDirFor(dataSourceId: DataSourceId): Path = + private def unpackToDirFor(dataSourceId: DataSourceId, domain: UploadDomain, uploadId: String): Path = dataBaseDir .resolve(dataSourceId.organizationId) .resolve(uploadingDir) .resolve(unpackedDir) + .resolve(domain.toString) + .resolve(uploadId) .resolve(dataSourceId.directoryName) private def guessTypeOfUploadedDataSource(dataSourceDir: Path): UploadedDataSourceType.Value = From 7da534d9f7401cfcb590dbf533e80d21cf03a664 Mon Sep 17 00:00:00 2001 From: Florian M Date: Wed, 8 Apr 2026 14:30:40 +0200 Subject: [PATCH 27/37] invalidate cache --- app/controllers/LegacyApiController.scala | 6 ++++++ app/controllers/WKRemoteDataStoreController.scala | 14 ++++++++------ conf/webknossos.latest.routes | 4 ++-- .../services/DSRemoteWebknossosClient.scala | 2 -- .../services/uploading/UploadService.scala | 13 ------------- 5 files changed, 16 insertions(+), 23 deletions(-) diff --git a/app/controllers/LegacyApiController.scala b/app/controllers/LegacyApiController.scala index fafe35d3170..ceca56d89f6 100644 --- a/app/controllers/LegacyApiController.scala +++ b/app/controllers/LegacyApiController.scala @@ -46,6 +46,12 @@ object LegacyTaskParameters { implicit val taskParametersFormat: Format[LegacyTaskParameters] = Json.format[LegacyTaskParameters] } +case class LegacyUploadInformation(uploadId: String) + +object LegacyUploadInformation { + implicit val jsonFormat: OFormat[LegacyUploadInformation] = Json.format[LegacyUploadInformation] +} + class LegacyApiController @Inject()(datasetController: DatasetController, projectController: ProjectController, taskController: TaskController, diff --git a/app/controllers/WKRemoteDataStoreController.scala b/app/controllers/WKRemoteDataStoreController.scala index dec72d62b85..0517b505927 100644 --- a/app/controllers/WKRemoteDataStoreController.scala +++ b/app/controllers/WKRemoteDataStoreController.scala @@ -205,26 +205,27 @@ class WKRemoteDataStoreController @Inject()( } } - def reportMagUpload(name: String, key: String, token: String): Action[ReportMagUploadParameters] = + def reportMagUpload(name: String, key: String): Action[ReportMagUploadParameters] = Action.async(validateJson[ReportMagUploadParameters]) { implicit request => dataStoreService.validateAccess(name, key) { _ => for { - _ <- datasetDAO.findOne(request.body.datasetId)(GlobalAccessContext) ?~> Messages( + dataset <- datasetDAO.findOne(request.body.datasetId)(GlobalAccessContext) ?~> Messages( "dataset.notFound", request.body.datasetId) ~> NOT_FOUND // TODO assert pending exists? _ <- request.body.mag.path.toFox ?~> "dataset.finishMagUpload.pathNotSet" _ <- datasetMagDAO.finishUpload(request.body.datasetId, request.body.layerName, request.body.mag) - // TODO clear ds cache + dataStoreClient <- datasetService.clientFor(dataset)(GlobalAccessContext) + _ <- dataStoreClient.invalidateDatasetInDSCache(dataset._id) } yield Ok } } - def reportAttachmentUpload(name: String, key: String, token: String): Action[ReportAttachmentUploadParameters] = + def reportAttachmentUpload(name: String, key: String): Action[ReportAttachmentUploadParameters] = Action.async(validateJson[ReportAttachmentUploadParameters]) { implicit request => dataStoreService.validateAccess(name, key) { _ => for { - _ <- datasetDAO.findOne(request.body.datasetId)(GlobalAccessContext) ?~> Messages( + dataset <- datasetDAO.findOne(request.body.datasetId)(GlobalAccessContext) ?~> Messages( "dataset.notFound", request.body.datasetId) ~> NOT_FOUND // TODO assert pending exists? @@ -232,7 +233,8 @@ class WKRemoteDataStoreController @Inject()( request.body.layerName, request.body.attachmentType, request.body.attachment) - // TODO clear ds cache + dataStoreClient <- datasetService.clientFor(dataset)(GlobalAccessContext) + _ <- dataStoreClient.invalidateDatasetInDSCache(dataset._id) } yield Ok } } diff --git a/conf/webknossos.latest.routes b/conf/webknossos.latest.routes index 455e24ce8cd..bf053b997e7 100644 --- a/conf/webknossos.latest.routes +++ b/conf/webknossos.latest.routes @@ -143,8 +143,8 @@ POST /datastores/:name/reserveDatasetUpload POST /datastores/:name/reserveMagUpload controllers.WKRemoteDataStoreController.reserveMagUpload(name: String, key: String, token: String) POST /datastores/:name/reserveAttachmentUpload controllers.WKRemoteDataStoreController.reserveAttachmentUpload(name: String, key: String, token: String) GET /datastores/:name/getUnfinishedDatasetUploadsForUser controllers.WKRemoteDataStoreController.getUnfinishedDatasetUploadsForUser(name: String, key: String, token: String, organizationName: String) -POST /datastores/:name/reportDatasetUpload controllers.WKRemoteDataStoreController.reportDatasetUpload(name: String, key: String, token: String, datasetId: ObjectId) -POST /datastores/:name/reportMagUpload controllers.WKRemoteDataStoreController.reportMagUpload(name: String, key: String, token: String) +POST /datastores/:name/reportDatasetUpload controllers.WKRemoteDataStoreController.reportDatasetUpload(name: String, key: String, datasetId: ObjectId) +POST /datastores/:name/reportMagUpload controllers.WKRemoteDataStoreController.reportMagUpload(name: String, key: String) POST /datastores/:name/reportAttachmentUpload controllers.WKRemoteDataStoreController.reportAttachmentUpload(name: String, key: String, token: String) POST /datastores/:name/deleteDataset controllers.WKRemoteDataStoreController.deleteDataset(name: String, key: String) GET /datastores/:name/findDatasetId controllers.WKRemoteDataStoreController.findDatasetId(name: String, key: String, datasetDirectoryName: String, organizationId: String) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala index c82a0e33424..47ca3ab2cfb 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala @@ -114,13 +114,11 @@ class DSRemoteWebknossosClient @Inject()( def reportMagUpload(parameters: ReportMagUploadParameters)(implicit tc: TokenContext): Fox[_] = rpc(s"$webknossosUri/api/datastores/$dataStoreName/reportMagUpload") .addQueryParam("key", dataStoreKey) - .withTokenFromContext .postJson[ReportMagUploadParameters](parameters) def reportAttachmentUpload(parameters: ReportAttachmentUploadParameters)(implicit tc: TokenContext): Fox[_] = rpc(s"$webknossosUri/api/datastores/$dataStoreName/reportAttachmentUpload") .addQueryParam("key", dataStoreKey) - .withTokenFromContext .postJson[ReportAttachmentUploadParameters](parameters) def reportDataSources(dataSources: List[DataSource], organizationId: Option[String]): Fox[_] = diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index 9d1a96e531b..3176bdd991c 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -27,7 +27,6 @@ import com.scalableminds.webknossos.datastore.services.{DSRemoteWebknossosClient import com.scalableminds.webknossos.datastore.storage.DataVaultService import com.typesafe.scalalogging.LazyLogging import org.apache.commons.io.FileUtils -import org.apache.pekko.http.scaladsl.model.headers.ContentDispositionTypes.attachment import play.api.libs.json.{Json, OFormat} import software.amazon.awssdk.transfer.s3.model.UploadDirectoryRequest @@ -140,18 +139,6 @@ object LinkedLayerIdentifier { implicit val jsonFormat: OFormat[LinkedLayerIdentifier] = Json.format[LinkedLayerIdentifier] } -// TODO move to Legacy finishUpload, unpack uploadId -case class LegacyUploadInformation(uploadId: String) - -object LegacyUploadInformation { - implicit val jsonFormat: OFormat[LegacyUploadInformation] = Json.format[LegacyUploadInformation] -} - -case class LegacyCancelUploadInformation(uploadId: String) -object LegacyCancelUploadInformation { - implicit val jsonFormat: OFormat[LegacyCancelUploadInformation] = Json.format[LegacyCancelUploadInformation] -} - class UploadService @Inject()(dataSourceService: DataSourceService, datasetUploadMetadataStore: DatasetUploadMetadataStore, magUploadMetadataStore: MagUploadMetadataStore, From fc6feb73f21199dfd108e2cf9938329c56921bf0 Mon Sep 17 00:00:00 2001 From: Florian M Date: Wed, 8 Apr 2026 14:56:24 +0200 Subject: [PATCH 28/37] unused param --- conf/webknossos.latest.routes | 4 ++-- .../datastore/services/DSRemoteWebknossosClient.scala | 4 ++-- .../datastore/services/uploading/UploadService.scala | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/conf/webknossos.latest.routes b/conf/webknossos.latest.routes index bf053b997e7..7340bdf73ec 100644 --- a/conf/webknossos.latest.routes +++ b/conf/webknossos.latest.routes @@ -143,9 +143,9 @@ POST /datastores/:name/reserveDatasetUpload POST /datastores/:name/reserveMagUpload controllers.WKRemoteDataStoreController.reserveMagUpload(name: String, key: String, token: String) POST /datastores/:name/reserveAttachmentUpload controllers.WKRemoteDataStoreController.reserveAttachmentUpload(name: String, key: String, token: String) GET /datastores/:name/getUnfinishedDatasetUploadsForUser controllers.WKRemoteDataStoreController.getUnfinishedDatasetUploadsForUser(name: String, key: String, token: String, organizationName: String) -POST /datastores/:name/reportDatasetUpload controllers.WKRemoteDataStoreController.reportDatasetUpload(name: String, key: String, datasetId: ObjectId) +POST /datastores/:name/reportDatasetUpload controllers.WKRemoteDataStoreController.reportDatasetUpload(name: String, key: String, token: String, datasetId: ObjectId) POST /datastores/:name/reportMagUpload controllers.WKRemoteDataStoreController.reportMagUpload(name: String, key: String) -POST /datastores/:name/reportAttachmentUpload controllers.WKRemoteDataStoreController.reportAttachmentUpload(name: String, key: String, token: String) +POST /datastores/:name/reportAttachmentUpload controllers.WKRemoteDataStoreController.reportAttachmentUpload(name: String, key: String) POST /datastores/:name/deleteDataset controllers.WKRemoteDataStoreController.deleteDataset(name: String, key: String) GET /datastores/:name/findDatasetId controllers.WKRemoteDataStoreController.findDatasetId(name: String, key: String, datasetDirectoryName: String, organizationId: String) GET /datastores/:name/jobExportProperties controllers.WKRemoteDataStoreController.jobExportProperties(name: String, key: String, jobId: ObjectId) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala index 47ca3ab2cfb..66037dd7395 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebknossosClient.scala @@ -111,12 +111,12 @@ class DSRemoteWebknossosClient @Inject()( .withTokenFromContext .postJson[ReportDatasetUploadParameters](parameters) - def reportMagUpload(parameters: ReportMagUploadParameters)(implicit tc: TokenContext): Fox[_] = + def reportMagUpload(parameters: ReportMagUploadParameters): Fox[_] = rpc(s"$webknossosUri/api/datastores/$dataStoreName/reportMagUpload") .addQueryParam("key", dataStoreKey) .postJson[ReportMagUploadParameters](parameters) - def reportAttachmentUpload(parameters: ReportAttachmentUploadParameters)(implicit tc: TokenContext): Fox[_] = + def reportAttachmentUpload(parameters: ReportAttachmentUploadParameters): Fox[_] = rpc(s"$webknossosUri/api/datastores/$dataStoreName/reportAttachmentUpload") .addQueryParam("key", dataStoreKey) .postJson[ReportAttachmentUploadParameters](parameters) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index 3176bdd991c..94fa6675f74 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -395,7 +395,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, private def measureDirectorySizeBytes(path: Path): Fox[Long] = tryo(FileUtils.sizeOfDirectoryAsBigInteger(path.toFile).longValue).toFox - def finishMagUpload(uploadId: String, datasetId: ObjectId)(implicit tc: TokenContext): Fox[Unit] = + def finishMagUpload(uploadId: String, datasetId: ObjectId): Fox[Unit] = for { dataSourceId <- magUploadMetadataStore.findDataSourceId(uploadId) mag <- magUploadMetadataStore.findMag(uploadId) @@ -419,7 +419,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, ReportMagUploadParameters(datasetId, layerName, magAdapted, magSizeBytes)) } yield () - def finishAttachmentUpload(uploadId: String, datasetId: ObjectId)(implicit tc: TokenContext): Fox[Unit] = + def finishAttachmentUpload(uploadId: String, datasetId: ObjectId): Fox[Unit] = for { dataSourceId <- attachmentUploadMetadataStore.findDataSourceId(uploadId) attachment <- attachmentUploadMetadataStore.findAttachment(uploadId) From a11fd2e8726210e9075855ed3700323081d616ec Mon Sep 17 00:00:00 2001 From: Florian M Date: Wed, 8 Apr 2026 15:34:59 +0200 Subject: [PATCH 29/37] v13 api backwards compatibility --- app/models/dataset/Dataset.scala | 1 - .../controllers/DSLegacyApiController.scala | 95 +++++++++++++++++-- .../controllers/UploadController.scala | 4 +- .../conf/datastore.versioned.routes | 57 +++++++++++ 4 files changed, 144 insertions(+), 13 deletions(-) diff --git a/app/models/dataset/Dataset.scala b/app/models/dataset/Dataset.scala index f4db43c661a..f21e097f716 100755 --- a/app/models/dataset/Dataset.scala +++ b/app/models/dataset/Dataset.scala @@ -38,7 +38,6 @@ import models.dataset.DatasetCreationType.DatasetCreationType import javax.inject.Inject import models.organization.OrganizationDAO -import org.apache.pekko.http.scaladsl.model.headers.ContentDispositionTypes.attachment import play.api.i18n.{Messages, MessagesProvider} import play.api.libs.json._ import slick.dbio.DBIO diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala index 09971e27807..1a6d0e3499e 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala @@ -15,7 +15,8 @@ import com.scalableminds.webknossos.datastore.services.mesh.FullMeshRequest import com.scalableminds.webknossos.datastore.services.uploading.{ DatasetUploadInfo, LinkedLayerIdentifier, - ResumableUploadInfo + ResumableUploadInfo, + UploadDomain } import com.scalableminds.webknossos.datastore.services.{ DSRemoteWebknossosClient, @@ -24,8 +25,9 @@ import com.scalableminds.webknossos.datastore.services.{ DatasetCache, UserAccessRequest } -import play.api.libs.json.{Json, OFormat} -import play.api.mvc.{Action, AnyContent, PlayBodyParsers, RawBuffer, Result} +import play.api.libs.Files +import play.api.libs.json.{JsObject, Json, OFormat} +import play.api.mvc.{Action, AnyContent, MultipartFormData, PlayBodyParsers, RawBuffer, Result} import scala.concurrent.{ExecutionContext, Future} @@ -41,7 +43,7 @@ object LegacyReserveManualUploadInformation { Json.format[LegacyReserveManualUploadInformation] } -case class LegacyReserveUploadInformation( +case class LegacyReserveUploadInformationV11( uploadId: String, // upload id that was also used in chunk upload (this time without file paths) name: String, // dataset name organization: String, @@ -51,10 +53,12 @@ case class LegacyReserveUploadInformation( layersToLink: Option[List[LegacyLinkedLayerIdentifier]], initialTeams: List[ObjectId], // team ids folderId: Option[ObjectId], - requireUniqueName: Option[Boolean] + requireUniqueName: Option[Boolean], + isVirtual: Option[Boolean], // Only set (to false) for legacy manual uploads + needsConversion: Option[Boolean] // None means false ) -object LegacyReserveUploadInformation { - implicit val jsonFormat: OFormat[LegacyReserveUploadInformation] = Json.format[LegacyReserveUploadInformation] +object LegacyReserveUploadInformationV11 { + implicit val jsonFormat: OFormat[LegacyReserveUploadInformationV11] = Json.format[LegacyReserveUploadInformationV11] } case class LegacyLinkedLayerIdentifier(organizationId: Option[String], @@ -77,6 +81,30 @@ object LegacyLinkedLayerIdentifier { implicit val jsonFormat: OFormat[LegacyLinkedLayerIdentifier] = Json.format[LegacyLinkedLayerIdentifier] } +case class LegacyUploadInformation(uploadId: String, needsConversion: Option[Boolean]) + +object LegacyUploadInformation { + implicit val jsonFormat: OFormat[LegacyUploadInformation] = Json.format[LegacyUploadInformation] +} + +case class ReserveUploadInformationV13( + uploadId: String, // upload id that was also used in chunk upload (this time without file paths) + name: String, // dataset name + organization: String, + totalFileCount: Long, + filePaths: Option[List[String]], + totalFileSizeInBytes: Option[Long], + layersToLink: Option[List[LinkedLayerIdentifier]], + initialTeams: List[ObjectId], // team ids + folderId: Option[ObjectId], + requireUniqueName: Option[Boolean], + isVirtual: Option[Boolean], // Only set (to false) for legacy manual uploads + needsConversion: Option[Boolean] // None means false +) +object ReserveUploadInformationV13 { + implicit val jsonFormat: OFormat[ReserveUploadInformationV13] = Json.format[ReserveUploadInformationV13] +} + class DSLegacyApiController @Inject()( accessTokenService: DataStoreAccessTokenService, remoteWebknossosClient: DSRemoteWebknossosClient, @@ -94,8 +122,57 @@ class DSLegacyApiController @Inject()( override def allowRemoteOrigin: Boolean = true - def reserveUploadV11(): Action[LegacyReserveUploadInformation] = - Action.async(validateJson[LegacyReserveUploadInformation]) { implicit request => + def testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String): Action[AnyContent] = + uploadController.testChunk(resumableChunkNumber, resumableIdentifier, UploadDomain.dataset.toString) + + def finishUploadV13(): Action[LegacyUploadInformation] = Action.async(validateJson[LegacyUploadInformation]) { + implicit request => + for { + result <- uploadController.finishUpload(UploadDomain.dataset.toString, request.body.uploadId)( + request.withBody(play.api.mvc.AnyContentAsEmpty)) + } yield if (result.header.status == OK) { + result.body match { + case play.api.http.HttpEntity.Strict(data, _) => + val json = Json.parse(data.toArray).as[JsObject] + Ok((json - "datasetId") ++ Json.obj("newDatasetId" -> (json \ "datasetId").get)) + case _ => result + } + } else result + } + + def reserveDatasetUploadV13(): Action[ReserveUploadInformationV13] = + Action.async(validateJson[ReserveUploadInformationV13]) { implicit request => + uploadController.reserveDatasetUpload()( + request.withBody(DatasetUploadInfo( + resumableUploadInfo = ResumableUploadInfo( + uploadId = request.body.uploadId, + totalFileCount = request.body.totalFileCount, + filePaths = request.body.filePaths, + totalFileSizeInBytes = request.body.totalFileSizeInBytes + ), + datasetName = request.body.name, + organizationId = request.body.organization, + layersToLink = request.body.layersToLink, + initialTeamIds = request.body.initialTeams, + folderId = request.body.folderId, + requireUniqueName = request.body.requireUniqueName, + isVirtual = request.body.isVirtual, + needsConversion = request.body.needsConversion + ))) + } + + def uploadChunkV13(): Action[MultipartFormData[Files.TemporaryFile]] = + Action.async(parse.multipartFormData) { implicit request => + uploadController.uploadChunk(UploadDomain.dataset.toString)(request) + } + + def getUnfinishedUploadsV13(organizationName: String): Action[AnyContent] = + Action.async { implicit request => + uploadController.getUnfinishedUploads(organizationName, UploadDomain.dataset.toString)(request) + } + + def reserveUploadV11(): Action[LegacyReserveUploadInformationV11] = + Action.async(validateJson[LegacyReserveUploadInformationV11]) { implicit request => accessTokenService.validateAccessFromTokenContext( UserAccessRequest.administrateDatasets(request.body.organization)) { diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala index ce9f868b436..add3636fc49 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/UploadController.scala @@ -170,7 +170,6 @@ class UploadController @Inject()( } yield result } - // TODO legacy: still needs uploadId as body def finishUpload(uploadDomain: String, uploadId: String): Action[AnyContent] = Action.async { implicit request => log(Some(slackNotificationService.noticeFailedUploadRequest)) { logTime(slackNotificationService.noticeSlowRequest) { @@ -185,14 +184,13 @@ class UploadController @Inject()( case UploadDomain.mag => uploadService.finishMagUpload(uploadId, datasetId) case UploadDomain.attachment => uploadService.finishAttachmentUpload(uploadId, datasetId) }) ?~> Messages("dataset.upload.finishFailed", datasetId) - } yield Ok(Json.obj("datasetId" -> datasetId)) // TODO legacy needs to return this as "newDatasetid" + } yield Ok(Json.obj("datasetId" -> datasetId)) } } yield response } } } - // TODO legacy route needs to take uploadId as body def cancelUpload(uploadDomain: String, uploadId: String): Action[AnyContent] = Action.async { implicit request => for { uploadDomainValidated <- UploadDomain.fromString(uploadDomain).toFox diff --git a/webknossos-datastore/conf/datastore.versioned.routes b/webknossos-datastore/conf/datastore.versioned.routes index edc1656e6f4..9c531574585 100644 --- a/webknossos-datastore/conf/datastore.versioned.routes +++ b/webknossos-datastore/conf/datastore.versioned.routes @@ -3,15 +3,42 @@ -> /v14/ datastore.latest.Routes +# Dataset upload +GET /v13/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) +POST /v13/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() +POST /v13/datasets/reserveUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveDatasetUploadV13() +GET /v13/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) +POST /v13/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() + -> /v13/ datastore.latest.Routes + +# Dataset upload +GET /v12/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) +POST /v12/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() +POST /v12/datasets/reserveUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveDatasetUploadV13() +GET /v12/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) +POST /v12/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() + -> /v12/ datastore.latest.Routes +# Dataset upload +GET /v11/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) +POST /v11/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() +GET /v11/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) +POST /v11/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() +# Dataset upload (v11 and older is separate!) POST /v11/datasets/reserveUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveUploadV11() -> /v11/ datastore.latest.Routes POST /v10/datasets/reserveManualUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveManualUploadV10() +# Dataset upload +GET /v10/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) +POST /v10/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() +GET /v10/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) +POST /v10/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() +# Dataset upload (v11 and older is separate!) POST /v10/datasets/reserveUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveUploadV11() -> /v10/ datastore.latest.Routes @@ -59,6 +86,12 @@ GET /v9/zarr3_experimental/:organizationId/:datasetDirectoryName/:data POST /v9/datasets/reserveManualUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveManualUploadV10() +# Dataset upload +GET /v9/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) +POST /v9/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() +GET /v9/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) +POST /v9/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() +# Dataset upload (v11 and older is separate!) POST /v9/datasets/reserveUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveUploadV11() -> /v9/ datastore.latest.Routes @@ -107,6 +140,12 @@ GET /v8/zarr3_experimental/:organizationId/:datasetDirectoryName/:data POST /v8/datasets/reserveManualUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveManualUploadV10() +# Dataset upload +GET /v8/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) +POST /v8/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() +GET /v8/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) +POST /v8/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() +# Dataset upload (v11 and older is separate!) POST /v8/datasets/reserveUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveUploadV11() -> /v8/ datastore.latest.Routes @@ -154,6 +193,12 @@ GET /v7/zarr3_experimental/:organizationId/:datasetDirectoryName/:data POST /v7/datasets/reserveManualUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveManualUploadV10() +# Dataset upload +GET /v7/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) +POST /v7/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() +GET /v7/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) +POST /v7/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() +# Dataset upload (v11 and older is separate!) POST /v7/datasets/reserveUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveUploadV11() -> /v7/ datastore.latest.Routes @@ -201,6 +246,12 @@ GET /v6/zarr3_experimental/:organizationId/:datasetDirectoryName/:data POST /v6/datasets/reserveManualUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveManualUploadV10() +# Dataset upload +GET /v6/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) +POST /v6/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() +GET /v6/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) +POST /v6/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() +# Dataset upload (v11 and older is separate!) POST /v6/datasets/reserveUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveUploadV11() -> /v6/ datastore.latest.Routes @@ -246,6 +297,12 @@ GET /v5/zarr3_experimental/:organizationId/:datasetDirectoryName/:data POST /v5/datasets/reserveManualUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveManualUploadV10() +# Dataset upload +GET /v5/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) +POST /v5/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() +GET /v5/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) +POST /v5/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() +# Dataset upload (v11 and older is separate!) POST /v5/datasets/reserveUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveUploadV11() -> /v5/ datastore.latest.Routes From 173fedd51693814b1b9f6243cdcd0af4c430872c Mon Sep 17 00:00:00 2001 From: Florian M Date: Thu, 9 Apr 2026 09:07:45 +0200 Subject: [PATCH 30/37] add unversioned upload/test chunk routes for compat with old libs --- .../conf/datastore.versioned.routes | 44 ++++++++++--------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/webknossos-datastore/conf/datastore.versioned.routes b/webknossos-datastore/conf/datastore.versioned.routes index 9c531574585..bca20dfd0af 100644 --- a/webknossos-datastore/conf/datastore.versioned.routes +++ b/webknossos-datastore/conf/datastore.versioned.routes @@ -87,10 +87,10 @@ GET /v9/zarr3_experimental/:organizationId/:datasetDirectoryName/:data POST /v9/datasets/reserveManualUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveManualUploadV10() # Dataset upload -GET /v9/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) -POST /v9/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() -GET /v9/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) -POST /v9/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() +GET /v9/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) +POST /v9/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() +GET /v9/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) +POST /v9/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() # Dataset upload (v11 and older is separate!) POST /v9/datasets/reserveUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveUploadV11() @@ -141,10 +141,10 @@ GET /v8/zarr3_experimental/:organizationId/:datasetDirectoryName/:data POST /v8/datasets/reserveManualUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveManualUploadV10() # Dataset upload -GET /v8/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) -POST /v8/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() -GET /v8/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) -POST /v8/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() +GET /v8/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) +POST /v8/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() +GET /v8/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) +POST /v8/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() # Dataset upload (v11 and older is separate!) POST /v8/datasets/reserveUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveUploadV11() @@ -194,10 +194,10 @@ GET /v7/zarr3_experimental/:organizationId/:datasetDirectoryName/:data POST /v7/datasets/reserveManualUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveManualUploadV10() # Dataset upload -GET /v7/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) -POST /v7/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() -GET /v7/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) -POST /v7/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() +GET /v7/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) +POST /v7/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() +GET /v7/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) +POST /v7/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() # Dataset upload (v11 and older is separate!) POST /v7/datasets/reserveUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveUploadV11() @@ -247,10 +247,10 @@ GET /v6/zarr3_experimental/:organizationId/:datasetDirectoryName/:data POST /v6/datasets/reserveManualUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveManualUploadV10() # Dataset upload -GET /v6/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) -POST /v6/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() -GET /v6/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) -POST /v6/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() +GET /v6/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) +POST /v6/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() +GET /v6/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) +POST /v6/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() # Dataset upload (v11 and older is separate!) POST /v6/datasets/reserveUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveUploadV11() @@ -298,12 +298,16 @@ GET /v5/zarr3_experimental/:organizationId/:datasetDirectoryName/:data POST /v5/datasets/reserveManualUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveManualUploadV10() # Dataset upload -GET /v5/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) -POST /v5/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() -GET /v5/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) -POST /v5/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() +GET /v5/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) +POST /v5/datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() +GET /v5/datasets/unfinishedUploads @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.getUnfinishedUploadsV13(organizationName: String) +POST /v5/datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.finishUploadV13() # Dataset upload (v11 and older is separate!) POST /v5/datasets/reserveUpload @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.reserveUploadV11() -> /v5/ datastore.latest.Routes -> / datastore.latest.Routes + +# Dataset upload without version number: Old libs clients perform the chunk requests without version number. +GET /datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.testChunkV13(resumableChunkNumber: Int, resumableIdentifier: String) +POST /datasets @com.scalableminds.webknossos.datastore.controllers.DSLegacyApiController.uploadChunkV13() From 131554605e88dbdce04a681492fb491d26d81fd0 Mon Sep 17 00:00:00 2001 From: Florian M Date: Thu, 9 Apr 2026 09:42:51 +0200 Subject: [PATCH 31/37] WIP handle pending; clean up signatures in RedisTemporaryStore --- app/controllers/DatasetController.scala | 2 + .../WKRemoteDataStoreController.scala | 12 +- app/models/dataset/Dataset.scala | 107 +++++++++++++++++- app/models/dataset/UploadToPathsService.scala | 1 + .../uploading/UploadMetadataStore.scala | 17 ++- .../services/uploading/UploadService.scala | 19 ++-- .../storage/RedisTemporaryStore.scala | 11 +- 7 files changed, 139 insertions(+), 30 deletions(-) diff --git a/app/controllers/DatasetController.scala b/app/controllers/DatasetController.scala index 352ff33be18..5ea540c8372 100755 --- a/app/controllers/DatasetController.scala +++ b/app/controllers/DatasetController.scala @@ -727,6 +727,7 @@ class DatasetController @Inject()(userService: UserService, dataset <- datasetDAO.findOne(datasetId) ?~> notFoundMessage(datasetId.toString) ~> NOT_FOUND _ <- Fox.assertTrue(datasetService.isEditableBy(dataset, Some(request.identity))) ?~> "notAllowed" ~> FORBIDDEN _ <- datasetMagsDAO.finishUploadToPath(datasetId, request.body.layerName, request.body.mag) + // TODO assert pending exists dataStoreClient <- datasetService.clientFor(dataset) _ <- Fox.runIf(!dataset.isVirtual) { for { @@ -753,6 +754,7 @@ class DatasetController @Inject()(userService: UserService, for { dataset <- datasetDAO.findOne(datasetId) ?~> notFoundMessage(datasetId.toString) ~> NOT_FOUND _ <- Fox.assertTrue(datasetService.isEditableBy(dataset, Some(request.identity))) ?~> "notAllowed" ~> FORBIDDEN + // TODO assert pending exists _ <- datasetLayerAttachmentsDAO.finishUploadToPath(datasetId, request.body.layerName, request.body.attachmentType, diff --git a/app/controllers/WKRemoteDataStoreController.scala b/app/controllers/WKRemoteDataStoreController.scala index 0517b505927..fed382f4829 100644 --- a/app/controllers/WKRemoteDataStoreController.scala +++ b/app/controllers/WKRemoteDataStoreController.scala @@ -110,6 +110,7 @@ class WKRemoteDataStoreController @Inject()( (dataSource, dataLayer) <- datasetService.getDataSourceAndLayerFor(dataset, request.body.layerName) _ <- Fox.fromBool(!dataLayer.mags.exists(_.mag.maxDim == request.body.mag.mag.maxDim)) ?~> s"New mag ${request.body.mag.mag} conflicts with existing mag of the layer." _ <- Fox.fromBool(dataset._dataStore == dataStore.name) ?~> "Cannot upload mag to existing dataset via different datastore." + // TODO if pending exists, and overwritePending is set, remove it and delete its paths _ <- datasetMagDAO.insertWithUploadPending(request.body.datasetId, request.body.layerName, request.body.mag.mag, @@ -133,6 +134,7 @@ class WKRemoteDataStoreController @Inject()( _ <- Fox.fromBool(existingAttachmentOpt.isEmpty) ?~> s"Layer already has ${request.body.attachmentType} attachment named ${request.body.attachment.name}" _ <- Fox.fromBool(dataset._dataStore == dataStore.name) ?~> "Cannot upload mag to existing dataset via different datastore." dummyAttachmentPath <- UPath.fromString("").toFox + // TODO if pending exists, and overwritePending is set, remove it and delete its paths _ <- datasetAttachmentDAO.insertWithUploadPending( request.body.datasetId, request.body.layerName, @@ -212,7 +214,9 @@ class WKRemoteDataStoreController @Inject()( dataset <- datasetDAO.findOne(request.body.datasetId)(GlobalAccessContext) ?~> Messages( "dataset.notFound", request.body.datasetId) ~> NOT_FOUND - // TODO assert pending exists? + _ <- datasetMagDAO.findOneWithPendingUpload(request.body.datasetId, + request.body.layerName, + request.body.mag.mag) ?~> "dataset.finishMagUpload.notPending" _ <- request.body.mag.path.toFox ?~> "dataset.finishMagUpload.pathNotSet" _ <- datasetMagDAO.finishUpload(request.body.datasetId, request.body.layerName, request.body.mag) dataStoreClient <- datasetService.clientFor(dataset)(GlobalAccessContext) @@ -228,7 +232,11 @@ class WKRemoteDataStoreController @Inject()( dataset <- datasetDAO.findOne(request.body.datasetId)(GlobalAccessContext) ?~> Messages( "dataset.notFound", request.body.datasetId) ~> NOT_FOUND - // TODO assert pending exists? + _ <- datasetAttachmentDAO.findOneWithPendingUpload( + request.body.datasetId, + request.body.layerName, + request.body.attachmentType, + request.body.attachment.name) ?~> "dataset.finishAttachmentUpload.notPending" _ <- datasetAttachmentDAO.finishUpload(request.body.datasetId, request.body.layerName, request.body.attachmentType, diff --git a/app/models/dataset/Dataset.scala b/app/models/dataset/Dataset.scala index f21e097f716..4c9bc88612e 100755 --- a/app/models/dataset/Dataset.scala +++ b/app/models/dataset/Dataset.scala @@ -1003,6 +1003,34 @@ class DatasetMagDAO @Inject()(sqlClient: SqlClient)(implicit ec: ExecutionContex ) } yield () + def findOneWithPendingUpload(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[MagLocator] = + for { + rows <- run( + q"""SELECT _dataset, dataLayerName, mag, path, realPath, hasLocalData, axisOrder, channelIndex, credentialId, uploadToPathIsPending, uploadIsPending + FROM webknossos.dataset_mags + WHERE _dataset = $datasetId + AND dataLayerName = $layerName + AND mag = $mag::webknossos.VECTOR3 + AND uploadIsPending + LIMIT 1""".as[DatasetMagsRow]) + row <- rows.headOption.toFox + magLocator <- parseMagLocator(row) + } yield magLocator + + def findOneWithPendingUploadToPath(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[MagLocator] = + for { + rows <- run( + q"""SELECT _dataset, dataLayerName, mag, path, realPath, hasLocalData, axisOrder, channelIndex, credentialId, uploadToPathIsPending, uploadIsPending + FROM webknossos.dataset_mags + WHERE _dataset = $datasetId + AND dataLayerName = $layerName + AND mag = $mag::webknossos.VECTOR3 + AND uploadToPathIsPending + LIMIT 1""".as[DatasetMagsRow]) + row <- rows.headOption.toFox + magLocator <- parseMagLocator(row) + } yield magLocator + def findMagLocatorPathWithPendingUploadToPath(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[UPath] = for { rows <- run(q"""SELECT path @@ -1020,10 +1048,19 @@ class DatasetMagDAO @Inject()(sqlClient: SqlClient)(implicit ec: ExecutionContex def deleteMagLocatorWithUploadToPathPending(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[Unit] = for { _ <- run(q"""DELETE FROM webknossos.dataset_mags - WHERE _dataset = $datasetId - AND dataLayerName = $layerName - AND mag = $mag::webknossos.VECTOR3 - AND uploadToPathIsPending""".asUpdate) + WHERE _dataset = $datasetId + AND dataLayerName = $layerName + AND mag = $mag::webknossos.VECTOR3 + AND uploadToPathIsPending""".asUpdate) + } yield () + + def deleteMagLocatorWithUploadPending(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[Unit] = + for { + _ <- run(q"""DELETE FROM webknossos.dataset_mags + WHERE _dataset = $datasetId + AND dataLayerName = $layerName + AND mag = $mag::webknossos.VECTOR3 + AND uploadIsPending""".asUpdate) } yield () } @@ -1361,6 +1398,68 @@ class DatasetLayerAttachmentDAO @Inject()(sqlClient: SqlClient)(implicit ec: Exe """.asUpdate) } yield () + def findOneWithPendingUpload(datasetId: ObjectId, + layerName: String, + attachmentType: LayerAttachmentType.Value, + attachmentName: String): Fox[LayerAttachment] = + for { + rows <- run( + q"""SELECT _dataset, layerName, name, path, realpath, hasLocalData, type, dataFormat, uploadToPathIsPending, uploadIsPending + FROM webknossos.dataset_layer_attachments + WHERE _dataset = $datasetId + AND layerName = $layerName + AND type = $attachmentType + AND name = $attachmentName + AND uploadIsPending + LIMIT 1""".as[DatasetLayerAttachmentsRow]) + row <- rows.headOption.toFox + attachment <- parseRow(row) + } yield attachment + + def findOneWithPendingUploadToPath(datasetId: ObjectId, + layerName: String, + attachmentType: LayerAttachmentType.Value, + attachmentName: String): Fox[LayerAttachment] = + for { + rows <- run( + q"""SELECT _dataset, layerName, name, path, realpath, hasLocalData, type, dataFormat, uploadToPathIsPending, uploadIsPending + FROM webknossos.dataset_layer_attachments + WHERE _dataset = $datasetId + AND layerName = $layerName + AND type = $attachmentType + AND name = $attachmentName + AND uploadToPathIsPending + LIMIT 1""".as[DatasetLayerAttachmentsRow]) + row <- rows.headOption.toFox + attachment <- parseRow(row) + } yield attachment + + def deleteAttachmentWithUploadToPathPending(datasetId: ObjectId, + layerName: String, + attachmentType: LayerAttachmentType.Value, + attachmentName: String): Fox[Unit] = + for { + _ <- run(q"""DELETE FROM webknossos.dataset_layer_attachments + WHERE _dataset = $datasetId + AND layerName = $layerName + AND type = $attachmentType + AND name = $attachmentName + AND uploadToPathIsPending""".asUpdate) + } yield () + + def deleteAttachmentWithUploadPending(datasetId: ObjectId, + layerName: String, + attachmentType: LayerAttachmentType.Value, + attachmentName: String): Fox[Unit] = + for { + _ <- run(q"""DELETE FROM webknossos.dataset_layer_attachments + WHERE _dataset = $datasetId + AND layerName = $layerName + AND type = $attachmentType + AND name = $attachmentName + AND uploadIsPending""".asUpdate) + } yield () + def finishUpload(datasetId: ObjectId, layerName: String, attachmentType: LayerAttachmentType.Value, diff --git a/app/models/dataset/UploadToPathsService.scala b/app/models/dataset/UploadToPathsService.scala index 78ec2ef697a..de2f0e0540a 100644 --- a/app/models/dataset/UploadToPathsService.scala +++ b/app/models/dataset/UploadToPathsService.scala @@ -252,6 +252,7 @@ class UploadToPathsService @Inject()(datasetService: DatasetService, mp: MessagesProvider): Fox[UPath] = for { _ <- datasetService.usableDataSourceFor(dataset) + // TODO overwrite pending functionality here too? isSingletonAttachment = LayerAttachmentType.isSingletonAttachment(parameters.attachmentType) existingAttachmentsCount <- datasetLayerAttachmentsDAO.countAttachmentsIncludingPending( dataset._id, diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala index bc4c0d463ed..1f9fcf6de18 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala @@ -61,16 +61,16 @@ trait UploadMetadataStore extends FoxImplicits { store.findParsed[Seq[String]](redisKeyForFilePaths(uploadId)) // TODO make this Fox[Long]? - def findTotalFileSizeInBytes(uploadId: String): Fox[Option[Long]] = + def findTotalFileSizeInBytes(uploadId: String): Fox[Long] = store.findLong(redisKeyForTotalFileSizeInBytes(uploadId)) - def findFileCount(uploadId: String): Fox[Option[Long]] = + def findFileCount(uploadId: String): Fox[Long] = store.findLong(redisKeyForFileCount(uploadId)) def findFileNames(uploadId: String): Fox[Set[String]] = store.findSet(redisKeyForFileNameSet(uploadId)) - def findFileChunkCount(uploadId: String, filePath: String): Fox[Option[Long]] = + def findFileChunkCount(uploadId: String, filePath: String): Fox[Long] = store.findLong(redisKeyForFileChunkCount(uploadId, filePath)) def findFileChunkSet(uploadId: String, filePath: String): Fox[Set[String]] = @@ -146,8 +146,7 @@ class DatasetUploadMetadataStore @Inject()(protected val store: DataStoreRedisSt private def redisKeyForNeedsConversion(uploadId: String): String = s"$keyPrefix${uploadId}___needsConversion" - // TODO make this Fox[String]? - def findUploadIdByDataSourceId(dataSourceId: DataSourceId): Fox[Option[String]] = + def findUploadIdByDataSourceId(dataSourceId: DataSourceId): Fox[String] = store.find(redisKeyForUploadIdByDataSourceId(dataSourceId)) def findLinkedLayerIdentifiers(uploadId: String)(implicit ec: ExecutionContext): Fox[Seq[LinkedLayerIdentifier]] = @@ -196,8 +195,8 @@ class MagUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) def findMag(uploadId: String)(implicit ec: ExecutionContext): Fox[MagLocator] = store.findParsed[MagLocator](redisKeyForMag(uploadId)) - def findLayerName(uploadId: String)(implicit ec: ExecutionContext): Fox[String] = - store.find(redisKeyForLayerName(uploadId)).map(_.toFox).flatten + def findLayerName(uploadId: String): Fox[String] = + store.find(redisKeyForLayerName(uploadId)) } class AttachmentUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) extends UploadMetadataStore { @@ -227,6 +226,6 @@ class AttachmentUploadMetadataStore @Inject()(protected val store: DataStoreRedi def findAttachmentType(uploadId: String)(implicit ec: ExecutionContext): Fox[LayerAttachmentType] = store.findParsed[LayerAttachmentType](redisKeyForAttachmentType(uploadId)) - def findLayerName(uploadId: String)(implicit ec: ExecutionContext): Fox[String] = - store.find(redisKeyForLayerName(uploadId)).map(_.toFox).flatten + def findLayerName(uploadId: String): Fox[String] = + store.find(redisKeyForLayerName(uploadId)) } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index 94fa6675f74..85fe71fd221 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -46,7 +46,6 @@ object ResumableUploadInfo { implicit val jsonFormat: OFormat[ResumableUploadInfo] = Json.format[ResumableUploadInfo] } -// TODO build from legacy param set for LegacyApiController case class DatasetUploadInfo( resumableUploadInfo: ResumableUploadInfo, datasetName: String, @@ -245,8 +244,10 @@ class UploadService @Inject()(dataSourceService: DataSourceService, unfinishedUploadsWithoutIds.map( unfinishedUpload => { for { - uploadIdOpt <- datasetUploadMetadataStore.findUploadIdByDataSourceId(unfinishedUpload.dataSourceId) - updatedUploadOpt = uploadIdOpt.map(uploadId => unfinishedUpload.copy(uploadId = uploadId)) + uploadIdBox <- datasetUploadMetadataStore + .findUploadIdByDataSourceId(unfinishedUpload.dataSourceId) + .shiftBox + updatedUploadOpt = uploadIdBox.toOption.map(uploadId => unfinishedUpload.copy(uploadId = uploadId)) updatedUploadWithFilePathsOpt <- Fox.runOptional(updatedUploadOpt)(updatedUpload => for { filePaths <- datasetUploadMetadataStore.findFilePaths(updatedUpload.uploadId) @@ -451,8 +452,10 @@ class UploadService @Inject()(dataSourceService: DataSourceService, uploadDomain: UploadDomain): Fox[Unit] = { val uploadMetadataStore = selectUploadMetadataStore(uploadDomain) for { - totalFileSizeInBytesOpt <- uploadMetadataStore.findTotalFileSizeInBytes(uploadId) ?~> "Could not look up reserved total file size" - _ <- totalFileSizeInBytesOpt.map { reservedFileSize => + totalFileSizeInBytesBox <- uploadMetadataStore + .findTotalFileSizeInBytes(uploadId) + .shiftBox ?~> "Could not look up reserved total file size" + _ <- totalFileSizeInBytesBox.map { reservedFileSize => for { actualFileSize <- tryo(FileUtils.sizeOfDirectoryAsBigInteger(uploadDir.toFile).longValue).toFox ?~> "Could not measure actual file size" _ <- if (actualFileSize > reservedFileSize) { @@ -700,14 +703,12 @@ class UploadService @Inject()(dataSourceService: DataSourceService, private def checkAllChunksUploaded(uploadId: String, uploadDomain: UploadDomain): Fox[Unit] = { val uploadMetadataStore = selectUploadMetadataStore(uploadDomain) for { - fileCountOpt <- uploadMetadataStore.findFileCount(uploadId) ?~> "Could not look up reserved file count." - fileCount <- fileCountOpt.toFox ?~> "Could not look up reserved file count." + fileCount <- uploadMetadataStore.findFileCount(uploadId) ?~> "Could not look up reserved file count." fileNames <- uploadMetadataStore.findFileNames(uploadId) ?~> "Could not look up reserved file names." _ <- Fox.fromBool(fileCount == fileNames.size) ?~> "Reserved file count does not match file names length." _ <- Fox.serialCombined(fileNames) { fileName => for { - chunkCountOpt <- uploadMetadataStore.findFileChunkCount(uploadId, fileName) ?~> "Could not look up file chunk count." - chunkCount <- chunkCountOpt.toFox + chunkCount <- uploadMetadataStore.findFileChunkCount(uploadId, fileName) ?~> "Could not look up file chunk count." chunkSet <- uploadMetadataStore.findFileChunkSet(uploadId, fileName) ?~> "Could not look up file chunk set." _ <- Fox.fromBool(chunkCount == chunkSet.size) ?~> s"Chunks missing for uploaded file $fileName: expected $chunkCount, got ${chunkSet.size}." } yield () diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/RedisTemporaryStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/RedisTemporaryStore.scala index a2afc42be79..c3b11d70af2 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/RedisTemporaryStore.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/RedisTemporaryStore.scala @@ -15,11 +15,11 @@ trait RedisTemporaryStore extends LazyLogging with FoxImplicits { lazy val authority: String = f"$address:$port" private lazy val r = new RedisClientPool(address, port) - def find(id: String): Fox[Option[String]] = - withExceptionHandler(_.get(id)) + def find(id: String): Fox[String] = + withExceptionHandler(_.get(id)).map(_.toFox).flatten - def findLong(id: String): Fox[Option[Long]] = - withExceptionHandler(_.get(id).map(s => s.toLong)) + def findLong(id: String): Fox[Long] = + withExceptionHandler(_.get(id).map(s => s.toLong)).map(_.toFox).flatten def removeAllConditional(pattern: String): Fox[Unit] = withExceptionHandler { client => @@ -84,8 +84,7 @@ trait RedisTemporaryStore extends LazyLogging with FoxImplicits { def findParsed[T: Reads](key: String)(implicit ec: ExecutionContext): Fox[T] = for { - objectStringOption <- find(key) - objectString <- objectStringOption.toFox + objectString <- find(key) parsed <- JsonHelper.parseAs[T](objectString).toFox } yield parsed From 3db30910c1256e2057bbf7b8484add662a850616 Mon Sep 17 00:00:00 2001 From: Florian M Date: Thu, 9 Apr 2026 13:55:12 +0200 Subject: [PATCH 32/37] cleanup, overwritePending --- app/controllers/DatasetController.scala | 11 +- .../WKRemoteDataStoreController.scala | 12 +- app/models/dataset/Dataset.scala | 56 ++-------- app/models/dataset/UploadToPathsService.scala | 85 +++++++++----- .../datastore/helpers/DatasetDeleter.scala | 6 +- .../uploading/UploadMetadataStore.scala | 1 - .../services/uploading/UploadService.scala | 105 +++++++++++------- 7 files changed, 151 insertions(+), 125 deletions(-) diff --git a/app/controllers/DatasetController.scala b/app/controllers/DatasetController.scala index 5ea540c8372..d0199b840f5 100755 --- a/app/controllers/DatasetController.scala +++ b/app/controllers/DatasetController.scala @@ -118,7 +118,8 @@ case class ReserveAttachmentUploadToPathRequest( attachmentName: String, attachmentType: LayerAttachmentType.Value, attachmentDataformat: LayerAttachmentDataformat.Value, - pathPrefix: Option[UPath] + pathPrefix: Option[UPath], + overwritePending: Option[Boolean] = None ) object ReserveAttachmentUploadToPathRequest { @@ -727,7 +728,7 @@ class DatasetController @Inject()(userService: UserService, dataset <- datasetDAO.findOne(datasetId) ?~> notFoundMessage(datasetId.toString) ~> NOT_FOUND _ <- Fox.assertTrue(datasetService.isEditableBy(dataset, Some(request.identity))) ?~> "notAllowed" ~> FORBIDDEN _ <- datasetMagsDAO.finishUploadToPath(datasetId, request.body.layerName, request.body.mag) - // TODO assert pending exists + _ <- datasetMagsDAO.findOneWithPendingUploadToPath(datasetId, request.body.layerName, request.body.mag) ?~> "dataset.finishMagUploadToPath.notPending" dataStoreClient <- datasetService.clientFor(dataset) _ <- Fox.runIf(!dataset.isVirtual) { for { @@ -754,7 +755,11 @@ class DatasetController @Inject()(userService: UserService, for { dataset <- datasetDAO.findOne(datasetId) ?~> notFoundMessage(datasetId.toString) ~> NOT_FOUND _ <- Fox.assertTrue(datasetService.isEditableBy(dataset, Some(request.identity))) ?~> "notAllowed" ~> FORBIDDEN - // TODO assert pending exists + _ <- datasetLayerAttachmentsDAO.findOneWithPendingUploadToPath( + datasetId, + request.body.layerName, + request.body.attachmentType, + request.body.attachmentName) ?~> "dataset.finishAttachmentUploadToPath.notPending" _ <- datasetLayerAttachmentsDAO.finishUploadToPath(datasetId, request.body.layerName, request.body.attachmentType, diff --git a/app/controllers/WKRemoteDataStoreController.scala b/app/controllers/WKRemoteDataStoreController.scala index fed382f4829..fcc533f5f4d 100644 --- a/app/controllers/WKRemoteDataStoreController.scala +++ b/app/controllers/WKRemoteDataStoreController.scala @@ -55,6 +55,7 @@ class WKRemoteDataStoreController @Inject()( jobDAO: JobDAO, datasetMagDAO: DatasetMagDAO, datasetAttachmentDAO: DatasetLayerAttachmentDAO, + uploadToPathsService: UploadToPathsService, credentialDAO: CredentialDAO, wkSilhouetteEnvironment: WkSilhouetteEnvironment)(implicit ec: ExecutionContext, bodyParsers: PlayBodyParsers) extends Controller @@ -110,7 +111,10 @@ class WKRemoteDataStoreController @Inject()( (dataSource, dataLayer) <- datasetService.getDataSourceAndLayerFor(dataset, request.body.layerName) _ <- Fox.fromBool(!dataLayer.mags.exists(_.mag.maxDim == request.body.mag.mag.maxDim)) ?~> s"New mag ${request.body.mag.mag} conflicts with existing mag of the layer." _ <- Fox.fromBool(dataset._dataStore == dataStore.name) ?~> "Cannot upload mag to existing dataset via different datastore." - // TODO if pending exists, and overwritePending is set, remove it and delete its paths + _ <- uploadToPathsService.handleExistingPendingMag(dataset, + request.body.layerName, + request.body.mag.mag, + request.body.overwritePending) _ <- datasetMagDAO.insertWithUploadPending(request.body.datasetId, request.body.layerName, request.body.mag.mag, @@ -134,7 +138,11 @@ class WKRemoteDataStoreController @Inject()( _ <- Fox.fromBool(existingAttachmentOpt.isEmpty) ?~> s"Layer already has ${request.body.attachmentType} attachment named ${request.body.attachment.name}" _ <- Fox.fromBool(dataset._dataStore == dataStore.name) ?~> "Cannot upload mag to existing dataset via different datastore." dummyAttachmentPath <- UPath.fromString("").toFox - // TODO if pending exists, and overwritePending is set, remove it and delete its paths + _ <- uploadToPathsService.handleExistingPendingAttachment(dataset, + request.body.layerName, + request.body.attachmentType, + request.body.attachment.name, + request.body.overwritePending) _ <- datasetAttachmentDAO.insertWithUploadPending( request.body.datasetId, request.body.layerName, diff --git a/app/models/dataset/Dataset.scala b/app/models/dataset/Dataset.scala index 4c9bc88612e..dc012417043 100755 --- a/app/models/dataset/Dataset.scala +++ b/app/models/dataset/Dataset.scala @@ -1031,36 +1031,13 @@ class DatasetMagDAO @Inject()(sqlClient: SqlClient)(implicit ec: ExecutionContex magLocator <- parseMagLocator(row) } yield magLocator - def findMagLocatorPathWithPendingUploadToPath(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[UPath] = - for { - rows <- run(q"""SELECT path - FROM webknossos.dataset_mags - WHERE _dataset = $datasetId - AND dataLayerName = $layerName - AND mag = $mag::webknossos.VECTOR3 - AND uploadToPathIsPending - AND path IS NOT NULL - """.as[String]) - first <- rows.headOption.toFox - firstAsUpath <- UPath.fromString(first).toFox - } yield firstAsUpath - - def deleteMagLocatorWithUploadToPathPending(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[Unit] = + def deletePendingMagLocator(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[Unit] = for { _ <- run(q"""DELETE FROM webknossos.dataset_mags - WHERE _dataset = $datasetId - AND dataLayerName = $layerName - AND mag = $mag::webknossos.VECTOR3 - AND uploadToPathIsPending""".asUpdate) - } yield () - - def deleteMagLocatorWithUploadPending(datasetId: ObjectId, layerName: String, mag: Vec3Int): Fox[Unit] = - for { - _ <- run(q"""DELETE FROM webknossos.dataset_mags - WHERE _dataset = $datasetId - AND dataLayerName = $layerName - AND mag = $mag::webknossos.VECTOR3 - AND uploadIsPending""".asUpdate) + WHERE _dataset = $datasetId + AND dataLayerName = $layerName + AND mag = $mag::webknossos.VECTOR3 + AND (uploadToPathIsPending OR uploadIsPending)""".asUpdate) } yield () } @@ -1434,30 +1411,17 @@ class DatasetLayerAttachmentDAO @Inject()(sqlClient: SqlClient)(implicit ec: Exe attachment <- parseRow(row) } yield attachment - def deleteAttachmentWithUploadToPathPending(datasetId: ObjectId, - layerName: String, - attachmentType: LayerAttachmentType.Value, - attachmentName: String): Fox[Unit] = - for { - _ <- run(q"""DELETE FROM webknossos.dataset_layer_attachments - WHERE _dataset = $datasetId - AND layerName = $layerName - AND type = $attachmentType - AND name = $attachmentName - AND uploadToPathIsPending""".asUpdate) - } yield () - - def deleteAttachmentWithUploadPending(datasetId: ObjectId, - layerName: String, - attachmentType: LayerAttachmentType.Value, - attachmentName: String): Fox[Unit] = + def deletePendingAttachment(datasetId: ObjectId, + layerName: String, + attachmentType: LayerAttachmentType.Value, + attachmentName: String): Fox[Unit] = for { _ <- run(q"""DELETE FROM webknossos.dataset_layer_attachments WHERE _dataset = $datasetId AND layerName = $layerName AND type = $attachmentType AND name = $attachmentName - AND uploadIsPending""".asUpdate) + AND (uploadIsPending OR uploadToPathIsPending)""".asUpdate) } yield () def finishUpload(datasetId: ObjectId, diff --git a/app/models/dataset/UploadToPathsService.scala b/app/models/dataset/UploadToPathsService.scala index de2f0e0540a..d584c22a7d8 100644 --- a/app/models/dataset/UploadToPathsService.scala +++ b/app/models/dataset/UploadToPathsService.scala @@ -3,7 +3,7 @@ package models.dataset import com.scalableminds.util.accesscontext.{DBAccessContext, GlobalAccessContext} import com.scalableminds.util.geometry.Vec3Int import com.scalableminds.util.objectid.ObjectId -import com.scalableminds.util.tools.{Box, Empty, Failure, Fox, FoxImplicits, Full, TextUtils} +import com.scalableminds.util.tools.{Box, Failure, Fox, FoxImplicits, Full, TextUtils} import com.scalableminds.webknossos.datastore.dataformats.MagLocator import com.scalableminds.webknossos.datastore.helpers.UPath import com.scalableminds.webknossos.datastore.models.datasource.LayerAttachmentDataformat.LayerAttachmentDataformat @@ -45,7 +45,7 @@ class UploadToPathsService @Inject()(datasetService: DatasetService, dataStoreDAO: DataStoreDAO, layerToLinkService: LayerToLinkService, datasetLayerAttachmentsDAO: DatasetLayerAttachmentDAO, - datasetMagsDAO: DatasetMagDAO, + datasetMagDAO: DatasetMagDAO, pathDeletionService: PathDeletionService, folderDAO: FolderDAO, conf: WkConf) @@ -252,7 +252,11 @@ class UploadToPathsService @Inject()(datasetService: DatasetService, mp: MessagesProvider): Fox[UPath] = for { _ <- datasetService.usableDataSourceFor(dataset) - // TODO overwrite pending functionality here too? + _ <- handleExistingPendingAttachment(dataset, + parameters.layerName, + parameters.attachmentType, + parameters.attachmentName, + parameters.overwritePending.getOrElse(false)) isSingletonAttachment = LayerAttachmentType.isSingletonAttachment(parameters.attachmentType) existingAttachmentsCount <- datasetLayerAttachmentsDAO.countAttachmentsIncludingPending( dataset._id, @@ -280,38 +284,63 @@ class UploadToPathsService @Inject()(datasetService: DatasetService, mp: MessagesProvider): Fox[UPath] = for { _ <- datasetService.usableDataSourceFor(dataset) - _ <- handleExistingPendingMagIfExists(dataset, parameters.layerName, parameters.mag, parameters.overwritePending) + _ <- handleExistingPendingMag(dataset, parameters.layerName, parameters.mag, parameters.overwritePending) datasetParent <- selectPathPrefixDatasetParent(parameters.pathPrefix, dataset._organization) datasetPath = datasetParent / dataset.directoryName magPath = generateMagPath(parameters.mag, datasetPath / parameters.layerName) - _ <- datasetMagsDAO.insertWithUploadToPathPending(dataset._id, - parameters.layerName, - parameters.mag, - parameters.axisOrder, - parameters.channelIndex, - magPath) + _ <- datasetMagDAO.insertWithUploadToPathPending(dataset._id, + parameters.layerName, + parameters.mag, + parameters.axisOrder, + parameters.channelIndex, + magPath) } yield magPath - private def handleExistingPendingMagIfExists(dataset: Dataset, - layerName: String, - mag: Vec3Int, - overwritePending: Boolean)(implicit ec: ExecutionContext): Fox[Unit] = + def handleExistingPendingMag(dataset: Dataset, layerName: String, mag: Vec3Int, overwritePending: Boolean)( + implicit ec: ExecutionContext): Fox[Unit] = for { - existingMagLocatorPathBox <- datasetMagsDAO - .findMagLocatorPathWithPendingUploadToPath(dataset._id, layerName, mag) + withPendingUploadToPathsBox <- datasetMagDAO.findOneWithPendingUploadToPath(dataset._id, layerName, mag).shiftBox + withPendingUploadBox <- datasetMagDAO.findOneWithPendingUploadToPath(dataset._id, layerName, mag).shiftBox + _ <- if (overwritePending) { + for { + _ <- Fox.runOptional(withPendingUploadToPathsBox.toOption) { oldPending => + deletePathsForOldPending(dataset, oldPending.path) + } + _ <- datasetMagDAO.deletePendingMagLocator(dataset._id, layerName, mag) + } yield () + } else + Fox.runIf(withPendingUploadToPathsBox.isDefined || withPendingUploadBox.isDefined) { + Fox.failure("Conflict with existing pending mag. Pass overwritePending to overwrite.") + } + } yield () + + def handleExistingPendingAttachment(dataset: Dataset, + layerName: String, + attachmentType: LayerAttachmentType, + attachmentName: String, + overwritePending: Boolean)(implicit ec: ExecutionContext): Fox[Unit] = + for { + withPendingUploadToPathsBox <- datasetLayerAttachmentsDAO + .findOneWithPendingUploadToPath(dataset._id, layerName, attachmentType, attachmentName) .shiftBox - _ <- existingMagLocatorPathBox match { - case Full(existingMagLocatorPath) => - if (overwritePending) { - for { - client <- datasetService.clientFor(dataset)(GlobalAccessContext) - _ <- pathDeletionService.deletePaths(client, Seq(existingMagLocatorPath)) - _ <- datasetMagsDAO.deleteMagLocatorWithUploadToPathPending(dataset._id, layerName, mag) - } yield () - } else Fox.failure("dataset.reserveMagUploadToPath.exists") - case Empty => Fox.successful(()) - case f: Failure => f.toFox - } + withPendingUploadBox <- datasetLayerAttachmentsDAO + .findOneWithPendingUpload(dataset._id, layerName, attachmentType, attachmentName) + .shiftBox + _ <- if (overwritePending) { + datasetLayerAttachmentsDAO.deletePendingAttachment(dataset._id, layerName, attachmentType, attachmentName) + } else + Fox.runIf(withPendingUploadToPathsBox.isDefined || withPendingUploadBox.isDefined) { + Fox.failure("Conflict with existing pending attachment. Pass overwritePending to overwrite.") + } } yield () + private def deletePathsForOldPending(dataset: Dataset, pathOpt: Option[UPath])( + implicit ec: ExecutionContext): Fox[_] = + Fox.runOptional(pathOpt) { path => + for { + client <- datasetService.clientFor(dataset)(GlobalAccessContext) + _ <- pathDeletionService.deletePaths(client, Seq(path)) + } yield () + } + } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/helpers/DatasetDeleter.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/helpers/DatasetDeleter.scala index 118c178adb4..0c7d8098ee8 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/helpers/DatasetDeleter.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/helpers/DatasetDeleter.scala @@ -14,12 +14,10 @@ trait DatasetDeleter extends LazyLogging with DirectoryConstants with FoxImplici def deleteOnDisk(datasetId: ObjectId, organizationId: String, datasetName: String, - isInConversion: Boolean = false, + path: Option[Path] = None, reason: Option[String] = None)(implicit ec: ExecutionContext): Fox[Unit] = { - val dataSourcePath = - if (isInConversion) dataBaseDir.resolve(organizationId).resolve(forConversionDir).resolve(datasetName) - else dataBaseDir.resolve(organizationId).resolve(datasetName) + val dataSourcePath = path.getOrElse(dataBaseDir.resolve(organizationId).resolve(datasetName)) if (Files.exists(dataSourcePath)) { val trashPath: Path = dataBaseDir.resolve(organizationId).resolve(trashDir) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala index 1f9fcf6de18..6717baf3f6b 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala @@ -60,7 +60,6 @@ trait UploadMetadataStore extends FoxImplicits { def findFilePaths(uploadId: String)(implicit ec: ExecutionContext): Fox[Seq[String]] = store.findParsed[Seq[String]](redisKeyForFilePaths(uploadId)) - // TODO make this Fox[Long]? def findTotalFileSizeInBytes(uploadId: String): Fox[Long] = store.findLong(redisKeyForTotalFileSizeInBytes(uploadId)) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index 85fe71fd221..2e779848a71 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -195,21 +195,20 @@ class UploadService @Inject()(dataSourceService: DataSourceService, } yield () } - def reserveMagUpload(magUploadInfo: MagUploadInfo, dataSourceId: DataSourceId): Fox[Unit] = + def reserveMagUpload(magUploadInfo: MagUploadInfo, dataSourceId: DataSourceId): Fox[Unit] = { + val uploadId = magUploadInfo.resumableUploadInfo.uploadId for { - // TODO if overwritePending, cancel pending if exists (disk, redis, postgres) _ <- reserveResumableUpload(magUploadInfo.resumableUploadInfo, magUploadInfo.datasetId, dataSourceId, UploadDomain.mag) - uploadId = magUploadInfo.resumableUploadInfo.uploadId _ <- magUploadMetadataStore.insertMag(uploadId, magUploadInfo.mag.withoutCredentials) _ <- magUploadMetadataStore.insertLayerName(uploadId, magUploadInfo.layerName) } yield () + } def reserveAttachmentUpload(attachmentUploadInfo: AttachmentUploadInfo, dataSourceId: DataSourceId): Fox[Unit] = for { - // TODO if overwritePending, cancel pending if exists (disk, redis, postgres) _ <- reserveResumableUpload(attachmentUploadInfo.resumableUploadInfo, attachmentUploadInfo.datasetId, dataSourceId, @@ -227,7 +226,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, for { _ <- dataSourceService.ensureDataDirWritable(dataSourceId) uploadId = resumableUploadInfo.uploadId - _ = logger.info(f"Reserving $uploadDomain ${uploadFullName(uploadId, datasetId, dataSourceId)}...") + _ = logger.info(f"Reserving ${uploadFullName(uploadDomain, uploadId, datasetId, dataSourceId)}...") uploadMetadataStore = selectUploadMetadataStore(uploadDomain) _ <- uploadMetadataStore.insertDataSourceId(uploadId, dataSourceId) _ <- uploadMetadataStore.insertDatasetId(uploadId, datasetId) @@ -325,7 +324,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, case e: Exception => uploadMetadataStore.removeFileChunkFromSet(uploadId, filePath, currentChunkNumber) val errorMsg = - s"Error receiving chunk $currentChunkNumber for ${uploadFullName(uploadId, datasetId, dataSourceId)}: ${e.getMessage}" + s"Error receiving chunk $currentChunkNumber for ${uploadFullName(uploadDomain, uploadId, datasetId, dataSourceId)}: ${e.getMessage}" logger.warn(errorMsg) Fox.failure(errorMsg) } @@ -341,22 +340,22 @@ class UploadService @Inject()(dataSourceService: DataSourceService, knownUpload <- uploadMetadataStore.isKnownUpload(uploadId) } yield if (knownUpload) { - logger.info(f"Cancelling ${uploadFullName(uploadId, datasetId, dataSourceId)}...") - cleanUpUploadedDataset(uploadDirectoryFor(dataSourceId.organizationId, uploadId, uploadDomain), - uploadId, - reason = "Cancelled by user", - uploadDomain) + logger.info(f"Cancelling ${uploadFullName(uploadDomain, uploadId, datasetId, dataSourceId)}...") + cleanUpUploaded(uploadId, reason = "Cancelled by user", uploadDomain) } else Fox.failure(s"Unknown upload") } - private def uploadFullName(uploadId: String, datasetId: ObjectId, dataSourceId: DataSourceId) = - s"upload $uploadId for dataset $datasetId ($dataSourceId)" + private def uploadFullName(uploadDomain: UploadDomain, + uploadId: String, + datasetId: ObjectId, + dataSourceId: DataSourceId) = + s"upload $uploadId for $uploadDomain (dataset $datasetId - $dataSourceId)" def finishDatasetUpload(uploadId: String, datasetId: ObjectId)(implicit tc: TokenContext): Fox[Unit] = for { dataSourceId <- datasetUploadMetadataStore.findDataSourceId(uploadId) needsConversion <- datasetUploadMetadataStore.findNeedsConversion(uploadId) - _ = logger.info(s"Finishing ${uploadFullName(uploadId, datasetId, dataSourceId)}...") + _ = logger.info(s"Finishing ${uploadFullName(UploadDomain.dataset, uploadId, datasetId, dataSourceId)}...") linkedLayerIdentifiers <- datasetUploadMetadataStore.findLinkedLayerIdentifiers(uploadId) uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId, UploadDomain.dataset) _ <- backupRawUploadedData(uploadDir, uploadBackupDirectoryFor(dataSourceId.organizationId, uploadId), datasetId).toFox @@ -364,17 +363,17 @@ class UploadService @Inject()(dataSourceService: DataSourceService, _ <- checkAllChunksUploaded(uploadId, UploadDomain.dataset) ?~> "dataset.upload.allChunksUploadedCheck.failed" unpackToDir = unpackToDirFor(dataSourceId, UploadDomain.dataset, uploadId) unpackResult <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId, UploadDomain.dataset).shiftBox - _ <- cleanUpUploadedDataset(uploadDir, uploadId, reason = "Upload complete, data unpacked.", UploadDomain.dataset) + _ <- cleanUpUploaded(uploadId, reason = "Upload complete, data unpacked.", UploadDomain.dataset) _ <- cleanUpOnFailure(unpackResult, datasetId, dataSourceId, - needsConversion, - label = s"unpacking to dataset to $unpackToDir") + unpackToDir, + label = s"unpacking dataset to $unpackToDir") postProcessingResult <- exploreUploadedDataSourceIfNeeded(needsConversion, unpackToDir, dataSourceId).shiftBox _ <- cleanUpOnFailure(postProcessingResult, datasetId, dataSourceId, - needsConversion, + unpackToDir, label = s"processing dataset at $unpackToDir") datasetSizeBytes <- measureDirectorySizeBytes(unpackToDir) ?~> "dataset.upload.measureTotalSize.failed" dataSourceWithAbsolutePathsOpt <- moveUnpackedDatasetToTarget( @@ -406,8 +405,13 @@ class UploadService @Inject()(dataSourceService: DataSourceService, .resolve(mag.mag.toMagLiteral(allowScalar = true)) _ <- checkWithinRequestedFileSize(uploadDir, uploadId, datasetId, UploadDomain.mag) ?~> "dataset.upload.fileSizeCheck.failed" _ <- checkAllChunksUploaded(uploadId, UploadDomain.mag) ?~> "dataset.upload.allChunksUploadedCheck.failed" - // TODO clean up on failure, clean up on success - _ <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId, UploadDomain.mag) + unpackResult <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId, UploadDomain.mag).shiftBox + _ <- cleanUpOnFailure(unpackResult, + datasetId, + dataSourceId, + unpackToDir, + label = s"unpacking mag to $unpackToDir") + _ <- cleanUpUploaded(uploadId, reason = "Upload complete, data unpacked.", UploadDomain.mag) magSizeBytes <- measureDirectorySizeBytes(unpackToDir) ?~> "dataset.upload.measureTotalSize.failed" finalPath <- moveUnpackedMagOrAttachmentToTarget(unpackToDir, layerName, @@ -430,8 +434,13 @@ class UploadService @Inject()(dataSourceService: DataSourceService, unpackToDir = unpackToDirFor(dataSourceId, UploadDomain.attachment, uploadId) _ <- checkWithinRequestedFileSize(uploadDir, uploadId, datasetId, UploadDomain.attachment) ?~> "dataset.upload.fileSizeCheck.failed" _ <- checkAllChunksUploaded(uploadId, UploadDomain.attachment) ?~> "dataset.upload.allChunksUploadedCheck.failed" - // TODO clean up on failure, clean up on success - _ <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId, UploadDomain.attachment) + unpackResult <- unpackOrMoveUploaded(uploadDir, unpackToDir, datasetId, UploadDomain.attachment).shiftBox + _ <- cleanUpOnFailure(unpackResult, + datasetId, + dataSourceId, + unpackToDir, + label = s"unpacking attachment to $unpackToDir") + _ <- cleanUpUploaded(uploadId, reason = "Upload complete, data unpacked.", UploadDomain.attachment) attachmentSizeBytes <- measureDirectorySizeBytes(unpackToDir) ?~> "dataset.upload.measureTotalSize.failed" finalPath <- moveUnpackedMagOrAttachmentToTarget( unpackToDir, @@ -459,7 +468,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, for { actualFileSize <- tryo(FileUtils.sizeOfDirectoryAsBigInteger(uploadDir.toFile).longValue).toFox ?~> "Could not measure actual file size" _ <- if (actualFileSize > reservedFileSize) { - cleanUpDatasetExceedingSize(uploadDir, uploadId, uploadDomain) + cleanUpExceedingSize(uploadId, uploadDomain) Fox.failure( f"Uploaded $uploadDomain $datasetId exceeds the reserved size of $reservedFileSize bytes, got $actualFileSize bytes.") } else Fox.successful(()) @@ -468,12 +477,12 @@ class UploadService @Inject()(dataSourceService: DataSourceService, } yield () } - // TODO adapt for mags/attachments - private def cleanUpDatasetExceedingSize(uploadDir: Path, uploadId: String, uploadDomain: UploadDomain): Fox[Unit] = + private def cleanUpExceedingSize(uploadId: String, uploadDomain: UploadDomain): Fox[Unit] = for { datasetId <- getDatasetIdByUploadId(uploadId, uploadDomain) - _ <- cleanUpUploadedDataset(uploadDir, uploadId, reason = "Exceeded reserved fileSize", uploadDomain) - _ <- remoteWebknossosClient.deleteDataset(datasetId) + _ <- cleanUpUploaded(uploadId, reason = "Exceeded reserved fileSize", uploadDomain) + // Datasets need to be cleaned up in postgres as well. The other domains don’t (overwritePending mechanism is used there) + _ <- Fox.runIf(uploadDomain == UploadDomain.dataset)(remoteWebknossosClient.deleteDataset(datasetId)) } yield () private def deleteFilesNotReferencedInDataSource(unpackedDir: Path, dataSource: UsableDataSource): Fox[Unit] = @@ -675,7 +684,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, private def cleanUpOnFailure[T](result: Box[T], datasetId: ObjectId, dataSourceId: DataSourceId, - needsConversion: Boolean, + unpackToDir: Path, label: String): Fox[Unit] = result match { case Full(_) => @@ -684,7 +693,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, deleteOnDisk(datasetId, dataSourceId.organizationId, dataSourceId.directoryName, - needsConversion, + Some(unpackToDir), Some("the upload failed")) Fox.failure(s"Unknown error $label") case Failure(msg, e, _) => @@ -692,7 +701,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, deleteOnDisk(datasetId, dataSourceId.organizationId, dataSourceId.directoryName, - needsConversion, + Some(unpackToDir), Some("the upload failed")) remoteWebknossosClient.deleteDataset(datasetId) for { @@ -903,16 +912,17 @@ class UploadService @Inject()(dataSourceService: DataSourceService, tryo(FileUtils.copyDirectory(uploadDir.toFile, backupDir.toFile)) } - private def cleanUpUploadedDataset(uploadDir: Path, - uploadId: String, - reason: String, - uploadDomain: UploadDomain): Fox[Unit] = + private def cleanUpUploaded(uploadId: String, reason: String, uploadDomain: UploadDomain): Fox[Unit] = { + val uploadMetadataStore = selectUploadMetadataStore(uploadDomain) for { - _ <- Fox.successful(logger.info(s"Cleaning up uploaded dataset. Reason: $reason")) + dataSourceId <- uploadMetadataStore.findDataSourceId(uploadId) + uploadDir = uploadDirectoryFor(dataSourceId.organizationId, uploadId, uploadDomain) + _ <- Fox.successful(logger.info(s"Cleaning up uploaded $uploadDir. Reason: $reason")) _ <- PathUtils.deleteDirectoryRecursively(uploadDir).toFox uploadMetadataStore = selectUploadMetadataStore(uploadDomain) _ <- uploadMetadataStore.cleanUp(uploadId) } yield () + } private def cleanUpOrphanUploads(): Fox[Unit] = for { @@ -920,22 +930,28 @@ class UploadService @Inject()(dataSourceService: DataSourceService, _ <- Fox.serialCombined(organizationDirs)(cleanUpOrphanUploadsForOrga) } yield () - // TODO should also handle attachments/mags - private def cleanUpOrphanUploadsForOrga(organizationDir: Path): Fox[Unit] = { - val orgaUploadingDir: Path = organizationDir.resolve(uploadingDir) + private def cleanUpOrphanUploadsForOrga(organizationDir: Path): Fox[Unit] = + for { + _ <- cleanUpOrphanUploadsForOrgaAndDomain(organizationDir, UploadDomain.dataset) + _ <- cleanUpOrphanUploadsForOrgaAndDomain(organizationDir, UploadDomain.mag) + _ <- cleanUpOrphanUploadsForOrgaAndDomain(organizationDir, UploadDomain.attachment) + } yield () + + private def cleanUpOrphanUploadsForOrgaAndDomain(organizationDir: Path, uploadDomain: UploadDomain): Fox[Unit] = { + val orgaUploadingDir: Path = organizationDir.resolve(uploadingDir).resolve(uploadDomain.toString) if (!Files.exists(orgaUploadingDir)) Fox.successful(()) else { for { uploadDirs <- PathUtils.listDirectories(orgaUploadingDir, silent = false).toFox _ <- Fox.serialCombined(uploadDirs) { uploadDir => - datasetUploadMetadataStore.isKnownUpload(uploadDir.getFileName.toString).map { + isKnownUploadOfAnyDomain(uploadDir.getFileName.toString).map { case false => val deleteResult = PathUtils.deleteDirectoryRecursively(uploadDir) if (deleteResult.isDefined) { - logger.info(f"Deleted orphan dataset upload at $uploadDir") + logger.info(f"Deleted orphan $uploadDomain upload at $uploadDir") } else { - logger.warn(f"Failed to delete orphan dataset upload at $uploadDir") + logger.warn(f"Failed to delete orphan $uploadDomain upload at $uploadDir") } case true => () } @@ -944,6 +960,13 @@ class UploadService @Inject()(dataSourceService: DataSourceService, } } + private def isKnownUploadOfAnyDomain(uploadId: String): Fox[Boolean] = + for { + fromDataset <- datasetUploadMetadataStore.isKnownUpload(uploadId) + fromMag <- magUploadMetadataStore.isKnownUpload(uploadId) + fromAttachment <- attachmentUploadMetadataStore.isKnownUpload(uploadId) + } yield fromDataset || fromMag || fromAttachment + } object UploadedDataSourceType extends Enumeration { From b0c8f21471c6ca305244e66427ac1f01f254cc65 Mon Sep 17 00:00:00 2001 From: Florian M Date: Thu, 9 Apr 2026 14:02:32 +0200 Subject: [PATCH 33/37] fix copy-paste errors --- app/controllers/DatasetController.scala | 2 +- app/controllers/WKRemoteDataStoreController.scala | 4 ++-- app/models/dataset/UploadToPathsService.scala | 2 +- conf/webknossos.versioned.routes | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/app/controllers/DatasetController.scala b/app/controllers/DatasetController.scala index d0199b840f5..33fafc0cc1a 100755 --- a/app/controllers/DatasetController.scala +++ b/app/controllers/DatasetController.scala @@ -727,8 +727,8 @@ class DatasetController @Inject()(userService: UserService, for { dataset <- datasetDAO.findOne(datasetId) ?~> notFoundMessage(datasetId.toString) ~> NOT_FOUND _ <- Fox.assertTrue(datasetService.isEditableBy(dataset, Some(request.identity))) ?~> "notAllowed" ~> FORBIDDEN - _ <- datasetMagsDAO.finishUploadToPath(datasetId, request.body.layerName, request.body.mag) _ <- datasetMagsDAO.findOneWithPendingUploadToPath(datasetId, request.body.layerName, request.body.mag) ?~> "dataset.finishMagUploadToPath.notPending" + _ <- datasetMagsDAO.finishUploadToPath(datasetId, request.body.layerName, request.body.mag) dataStoreClient <- datasetService.clientFor(dataset) _ <- Fox.runIf(!dataset.isVirtual) { for { diff --git a/app/controllers/WKRemoteDataStoreController.scala b/app/controllers/WKRemoteDataStoreController.scala index fcc533f5f4d..308c4d2c4b3 100644 --- a/app/controllers/WKRemoteDataStoreController.scala +++ b/app/controllers/WKRemoteDataStoreController.scala @@ -131,12 +131,12 @@ class WKRemoteDataStoreController @Inject()( for { user <- bearerTokenService.userForToken(token) dataset <- datasetDAO.findOne(request.body.datasetId)(AuthorizedAccessContext(user)) - _ <- Fox.fromBool(dataset.isVirtual) ?~> "dataset.reserveMagUpload.notVirtual" + _ <- Fox.fromBool(dataset.isVirtual) ?~> "dataset.reserveAttachmentUpload.notVirtual" (dataSource, dataLayer) <- datasetService.getDataSourceAndLayerFor(dataset, request.body.layerName) existingAttachmentOpt = dataLayer.attachments.flatMap( _.getByTypeAndName(request.body.attachmentType, request.body.attachment.name)) _ <- Fox.fromBool(existingAttachmentOpt.isEmpty) ?~> s"Layer already has ${request.body.attachmentType} attachment named ${request.body.attachment.name}" - _ <- Fox.fromBool(dataset._dataStore == dataStore.name) ?~> "Cannot upload mag to existing dataset via different datastore." + _ <- Fox.fromBool(dataset._dataStore == dataStore.name) ?~> "Cannot upload attachment to existing dataset via different datastore." dummyAttachmentPath <- UPath.fromString("").toFox _ <- uploadToPathsService.handleExistingPendingAttachment(dataset, request.body.layerName, diff --git a/app/models/dataset/UploadToPathsService.scala b/app/models/dataset/UploadToPathsService.scala index d584c22a7d8..0bd9085f755 100644 --- a/app/models/dataset/UploadToPathsService.scala +++ b/app/models/dataset/UploadToPathsService.scala @@ -300,7 +300,7 @@ class UploadToPathsService @Inject()(datasetService: DatasetService, implicit ec: ExecutionContext): Fox[Unit] = for { withPendingUploadToPathsBox <- datasetMagDAO.findOneWithPendingUploadToPath(dataset._id, layerName, mag).shiftBox - withPendingUploadBox <- datasetMagDAO.findOneWithPendingUploadToPath(dataset._id, layerName, mag).shiftBox + withPendingUploadBox <- datasetMagDAO.findOneWithPendingUpload(dataset._id, layerName, mag).shiftBox _ <- if (overwritePending) { for { _ <- Fox.runOptional(withPendingUploadToPathsBox.toOption) { oldPending => diff --git a/conf/webknossos.versioned.routes b/conf/webknossos.versioned.routes index be9e92d9204..db0938c73e6 100644 --- a/conf/webknossos.versioned.routes +++ b/conf/webknossos.versioned.routes @@ -4,7 +4,7 @@ # Note: keep this in sync with the reported version numbers in the com.scalableminds.util.mvc.ApiVersioning trait # version log - # chnaged in v14: Dataset upload routes and parameters have been refactored, introduced upload domain + # changed in v14: Dataset upload routes and parameters have been refactored, introduced upload domain # changed in v13: Attachments not mentioned in the dataSource passed to updatePartial will now be deleted. # changed in v12: Dataset upload now expects layersToLink in new format with datasetId instead of orgaId+directoryName # changed in v11: Datasets reserveManualUpload flow via WK side. Note: older versions of the route are *not* supported for security reasons. From 9e0cd3dfdc9639f4cc7dfc5abac0aa04b10ae140 Mon Sep 17 00:00:00 2001 From: Florian M Date: Thu, 9 Apr 2026 14:05:55 +0200 Subject: [PATCH 34/37] format --- .../controllers/DSLegacyApiController.scala | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala index 1a6d0e3499e..46b86a3154c 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala @@ -130,14 +130,15 @@ class DSLegacyApiController @Inject()( for { result <- uploadController.finishUpload(UploadDomain.dataset.toString, request.body.uploadId)( request.withBody(play.api.mvc.AnyContentAsEmpty)) - } yield if (result.header.status == OK) { - result.body match { - case play.api.http.HttpEntity.Strict(data, _) => - val json = Json.parse(data.toArray).as[JsObject] - Ok((json - "datasetId") ++ Json.obj("newDatasetId" -> (json \ "datasetId").get)) - case _ => result - } - } else result + } yield + if (result.header.status == OK) { + result.body match { + case play.api.http.HttpEntity.Strict(data, _) => + val json = Json.parse(data.toArray).as[JsObject] + Ok((json - "datasetId") ++ Json.obj("newDatasetId" -> (json \ "datasetId").get)) + case _ => result + } + } else result } def reserveDatasetUploadV13(): Action[ReserveUploadInformationV13] = From ead0339690cacfffa3e10c55a15eec825042be80 Mon Sep 17 00:00:00 2001 From: Florian M Date: Thu, 9 Apr 2026 14:31:49 +0200 Subject: [PATCH 35/37] implement feedback --- app/controllers/LegacyApiController.scala | 6 ------ .../WKRemoteDataStoreController.scala | 9 +++++++-- .../test/backend_snapshot_tests/datasets.e2e.ts | 2 +- .../controllers/DSLegacyApiController.scala | 4 ++-- .../datasource/DataLayerAttachments.scala | 17 +++++++++++++---- .../uploading/UploadMetadataStore.scala | 17 +++++++++++++++++ .../services/uploading/UploadService.scala | 2 +- 7 files changed, 41 insertions(+), 16 deletions(-) diff --git a/app/controllers/LegacyApiController.scala b/app/controllers/LegacyApiController.scala index ceca56d89f6..fafe35d3170 100644 --- a/app/controllers/LegacyApiController.scala +++ b/app/controllers/LegacyApiController.scala @@ -46,12 +46,6 @@ object LegacyTaskParameters { implicit val taskParametersFormat: Format[LegacyTaskParameters] = Json.format[LegacyTaskParameters] } -case class LegacyUploadInformation(uploadId: String) - -object LegacyUploadInformation { - implicit val jsonFormat: OFormat[LegacyUploadInformation] = Json.format[LegacyUploadInformation] -} - class LegacyApiController @Inject()(datasetController: DatasetController, projectController: ProjectController, taskController: TaskController, diff --git a/app/controllers/WKRemoteDataStoreController.scala b/app/controllers/WKRemoteDataStoreController.scala index 308c4d2c4b3..11a1549ee73 100644 --- a/app/controllers/WKRemoteDataStoreController.scala +++ b/app/controllers/WKRemoteDataStoreController.scala @@ -11,6 +11,7 @@ import com.scalableminds.webknossos.datastore.models.datasource.{ DataSource, DataSourceId, DataSourceStatus, + LayerAttachmentType, UnusableDataSource } import com.scalableminds.webknossos.datastore.services.{DataSourcePathInfo, DataStoreStatus} @@ -133,9 +134,11 @@ class WKRemoteDataStoreController @Inject()( dataset <- datasetDAO.findOne(request.body.datasetId)(AuthorizedAccessContext(user)) _ <- Fox.fromBool(dataset.isVirtual) ?~> "dataset.reserveAttachmentUpload.notVirtual" (dataSource, dataLayer) <- datasetService.getDataSourceAndLayerFor(dataset, request.body.layerName) + isSingletonAttachment = LayerAttachmentType.isSingletonAttachment(request.body.attachmentType) + existsError = if (isSingletonAttachment) "attachment.singleton.alreadyFilled" else "attachment.name.taken" existingAttachmentOpt = dataLayer.attachments.flatMap( - _.getByTypeAndName(request.body.attachmentType, request.body.attachment.name)) - _ <- Fox.fromBool(existingAttachmentOpt.isEmpty) ?~> s"Layer already has ${request.body.attachmentType} attachment named ${request.body.attachment.name}" + _.getByTypeAndNameAlwaysReturnSingletons(request.body.attachmentType, request.body.attachment.name)) + _ <- Fox.fromBool(existingAttachmentOpt.isEmpty) ?~> existsError _ <- Fox.fromBool(dataset._dataStore == dataStore.name) ?~> "Cannot upload attachment to existing dataset via different datastore." dummyAttachmentPath <- UPath.fromString("").toFox _ <- uploadToPathsService.handleExistingPendingAttachment(dataset, @@ -229,6 +232,7 @@ class WKRemoteDataStoreController @Inject()( _ <- datasetMagDAO.finishUpload(request.body.datasetId, request.body.layerName, request.body.mag) dataStoreClient <- datasetService.clientFor(dataset)(GlobalAccessContext) _ <- dataStoreClient.invalidateDatasetInDSCache(dataset._id) + _ <- usedStorageService.refreshStorageReportForDataset(dataset) } yield Ok } } @@ -251,6 +255,7 @@ class WKRemoteDataStoreController @Inject()( request.body.attachment) dataStoreClient <- datasetService.clientFor(dataset)(GlobalAccessContext) _ <- dataStoreClient.invalidateDatasetInDSCache(dataset._id) + _ <- usedStorageService.refreshStorageReportForDataset(dataset) } yield Ok } } diff --git a/frontend/javascripts/test/backend_snapshot_tests/datasets.e2e.ts b/frontend/javascripts/test/backend_snapshot_tests/datasets.e2e.ts index 6fb607ac7f8..6855523f080 100644 --- a/frontend/javascripts/test/backend_snapshot_tests/datasets.e2e.ts +++ b/frontend/javascripts/test/backend_snapshot_tests/datasets.e2e.ts @@ -195,7 +195,7 @@ describe("Dataset API (E2E)", () => { }), body: JSON.stringify({ resumableUploadInfo: { - filePaths: ["test-dataset-upload.zip"], + filePaths: ["test-dataset.zip"], totalFileCount: 1, uploadId: uploadId, }, diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala index 46b86a3154c..cc64ded4292 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSLegacyApiController.scala @@ -192,8 +192,8 @@ class DSLegacyApiController @Inject()( initialTeamIds = request.body.initialTeams, folderId = request.body.folderId, requireUniqueName = request.body.requireUniqueName, - isVirtual = None, - needsConversion = None + isVirtual = request.body.isVirtual, + needsConversion = request.body.needsConversion ) result <- Fox.fromFuture(uploadController.reserveDatasetUpload()(request.withBody(adaptedRequestBody))) } yield result diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataLayerAttachments.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataLayerAttachments.scala index 496fedb4d3e..40eb78447c0 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataLayerAttachments.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataLayerAttachments.scala @@ -54,11 +54,20 @@ case class DataLayerAttachments( def getByTypeAndName(attachmentType: LayerAttachmentType, name: String): Option[LayerAttachment] = attachmentType match { - case LayerAttachmentType.mesh => meshes.find(_.name == name) - case LayerAttachmentType.agglomerate => agglomerates.find(_.name == name) + case LayerAttachmentType.mesh => meshes.find(_.name == name) + case LayerAttachmentType.agglomerate => agglomerates.find(_.name == name) case LayerAttachmentType.segmentIndex => segmentIndex.find(_.name == name) - case LayerAttachmentType.connectome => connectomes.find(_.name == name) - case LayerAttachmentType.cumsum => cumsum.find(_.name == name) + case LayerAttachmentType.connectome => connectomes.find(_.name == name) + case LayerAttachmentType.cumsum => cumsum.find(_.name == name) + } + + def getByTypeAndNameAlwaysReturnSingletons(attachmentType: LayerAttachmentType, name: String): Option[LayerAttachment] = + attachmentType match { + case LayerAttachmentType.mesh => meshes.find(_.name == name) + case LayerAttachmentType.agglomerate => agglomerates.find(_.name == name) + case LayerAttachmentType.segmentIndex => segmentIndex + case LayerAttachmentType.connectome => connectomes.find(_.name == name) + case LayerAttachmentType.cumsum => cumsum } def mapped(attachmentMapping: LayerAttachment => LayerAttachment): DataLayerAttachments = diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala index 6717baf3f6b..8deef0c309d 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala @@ -196,6 +196,14 @@ class MagUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) def findLayerName(uploadId: String): Fox[String] = store.find(redisKeyForLayerName(uploadId)) + + override def cleanUp(uploadId: String)(implicit ec: ExecutionContext): Fox[Unit] = + for { + dataSourceId <- findDataSourceId(uploadId) + _ <- store.remove(redisKeyForMag(uploadId)) + _ <- store.remove(redisKeyForLayerName(uploadId)) + _ <- super.cleanUp(uploadId) + } yield () } class AttachmentUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) extends UploadMetadataStore { @@ -227,4 +235,13 @@ class AttachmentUploadMetadataStore @Inject()(protected val store: DataStoreRedi def findLayerName(uploadId: String): Fox[String] = store.find(redisKeyForLayerName(uploadId)) + + override def cleanUp(uploadId: String)(implicit ec: ExecutionContext): Fox[Unit] = + for { + dataSourceId <- findDataSourceId(uploadId) + _ <- store.remove(findAttachmentType(uploadId)) + _ <- store.remove(findAttachment(uploadId)) + _ <- store.remove(redisKeyForLayerName(uploadId)) + _ <- super.cleanUp(uploadId) + } yield () } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index 2e779848a71..7ed24448a84 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -418,7 +418,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService, datasetId, dataSourceId, s"${mag.mag.toMagLiteral(true)}__${ObjectId.generate}", - UploadDomain.attachment) + UploadDomain.mag) magAdapted = mag.copy(path = Some(finalPath)) _ <- remoteWebknossosClient.reportMagUpload( ReportMagUploadParameters(datasetId, layerName, magAdapted, magSizeBytes)) From 58af571ef5ca7b34e99e05ed49c761c30be05d06 Mon Sep 17 00:00:00 2001 From: Florian M Date: Thu, 9 Apr 2026 14:35:28 +0200 Subject: [PATCH 36/37] typo --- .../datastore/services/uploading/UploadMetadataStore.scala | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala index 8deef0c309d..e5308c1ddeb 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadMetadataStore.scala @@ -199,7 +199,6 @@ class MagUploadMetadataStore @Inject()(protected val store: DataStoreRedisStore) override def cleanUp(uploadId: String)(implicit ec: ExecutionContext): Fox[Unit] = for { - dataSourceId <- findDataSourceId(uploadId) _ <- store.remove(redisKeyForMag(uploadId)) _ <- store.remove(redisKeyForLayerName(uploadId)) _ <- super.cleanUp(uploadId) @@ -238,9 +237,8 @@ class AttachmentUploadMetadataStore @Inject()(protected val store: DataStoreRedi override def cleanUp(uploadId: String)(implicit ec: ExecutionContext): Fox[Unit] = for { - dataSourceId <- findDataSourceId(uploadId) - _ <- store.remove(findAttachmentType(uploadId)) - _ <- store.remove(findAttachment(uploadId)) + _ <- store.remove(redisKeyForAttachmentType(uploadId)) + _ <- store.remove(redisKeyForAttachment(uploadId)) _ <- store.remove(redisKeyForLayerName(uploadId)) _ <- super.cleanUp(uploadId) } yield () From f06b5dd76a4d44308994002938fb1702d346d4c6 Mon Sep 17 00:00:00 2001 From: Florian M Date: Thu, 9 Apr 2026 14:41:40 +0200 Subject: [PATCH 37/37] format --- .../datasource/DataLayerAttachments.scala | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataLayerAttachments.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataLayerAttachments.scala index 40eb78447c0..5354a56a7d1 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataLayerAttachments.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataLayerAttachments.scala @@ -54,20 +54,21 @@ case class DataLayerAttachments( def getByTypeAndName(attachmentType: LayerAttachmentType, name: String): Option[LayerAttachment] = attachmentType match { - case LayerAttachmentType.mesh => meshes.find(_.name == name) - case LayerAttachmentType.agglomerate => agglomerates.find(_.name == name) + case LayerAttachmentType.mesh => meshes.find(_.name == name) + case LayerAttachmentType.agglomerate => agglomerates.find(_.name == name) case LayerAttachmentType.segmentIndex => segmentIndex.find(_.name == name) - case LayerAttachmentType.connectome => connectomes.find(_.name == name) - case LayerAttachmentType.cumsum => cumsum.find(_.name == name) + case LayerAttachmentType.connectome => connectomes.find(_.name == name) + case LayerAttachmentType.cumsum => cumsum.find(_.name == name) } - def getByTypeAndNameAlwaysReturnSingletons(attachmentType: LayerAttachmentType, name: String): Option[LayerAttachment] = + def getByTypeAndNameAlwaysReturnSingletons(attachmentType: LayerAttachmentType, + name: String): Option[LayerAttachment] = attachmentType match { - case LayerAttachmentType.mesh => meshes.find(_.name == name) - case LayerAttachmentType.agglomerate => agglomerates.find(_.name == name) + case LayerAttachmentType.mesh => meshes.find(_.name == name) + case LayerAttachmentType.agglomerate => agglomerates.find(_.name == name) case LayerAttachmentType.segmentIndex => segmentIndex - case LayerAttachmentType.connectome => connectomes.find(_.name == name) - case LayerAttachmentType.cumsum => cumsum + case LayerAttachmentType.connectome => connectomes.find(_.name == name) + case LayerAttachmentType.cumsum => cumsum } def mapped(attachmentMapping: LayerAttachment => LayerAttachment): DataLayerAttachments =