diff --git a/amoro-ams/pom.xml b/amoro-ams/pom.xml index 1a78ca0f26..1db3a8f81a 100644 --- a/amoro-ams/pom.xml +++ b/amoro-ams/pom.xml @@ -280,10 +280,10 @@ url-connection-client - - software.amazon.awssdk - s3-transfer-manager - + + software.amazon.awssdk + s3-transfer-manager + org.apache.hadoop diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/AmoroServiceContainer.java b/amoro-ams/src/main/java/org/apache/amoro/server/AmoroServiceContainer.java index 1dd763f324..467f45aac6 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/AmoroServiceContainer.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/AmoroServiceContainer.java @@ -27,6 +27,7 @@ import org.apache.amoro.config.ConfigHelpers; import org.apache.amoro.config.Configurations; import org.apache.amoro.server.dashboard.DashboardServer; +import org.apache.amoro.server.dashboard.JavalinJsonMapper; import org.apache.amoro.server.dashboard.response.ErrorResponse; import org.apache.amoro.server.dashboard.utils.AmsUtil; import org.apache.amoro.server.dashboard.utils.CommonUtil; @@ -66,7 +67,6 @@ import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.Yaml; -import java.io.IOException; import java.net.InetSocketAddress; import java.nio.file.Files; import java.nio.file.Paths; @@ -215,7 +215,7 @@ public void dispose() { MetricManager.dispose(); } - private void initConfig() throws IOException { + private void initConfig() throws Exception { LOG.info("initializing configurations..."); new ConfigurationHelper().init(); } @@ -243,6 +243,7 @@ private void initHttpService() { config.addStaticFiles(dashboardServer.configStaticFiles()); config.sessionHandler(SessionHandler::new); config.enableCorsForAllOrigins(); + config.jsonMapper(JavalinJsonMapper.createDefaultJsonMapper()); config.showJavalinBanner = false; }); httpServer.routes( @@ -407,14 +408,14 @@ private class ConfigurationHelper { private JsonNode yamlConfig; - public void init() throws IOException { + public void init() throws Exception { Map envConfig = initEnvConfig(); initServiceConfig(envConfig); setIcebergSystemProperties(); initContainerConfig(); } - private void initServiceConfig(Map envConfig) throws IOException { + private void initServiceConfig(Map envConfig) throws Exception { LOG.info("initializing service configuration..."); String configPath = Environments.getConfigPath() + "/" + SERVER_CONFIG_FILENAME; LOG.info("load config from path: {}", configPath); @@ -438,6 +439,8 @@ private void initServiceConfig(Map envConfig) throws IOException private Map initEnvConfig() { LOG.info("initializing system env configuration..."); + Map envs = System.getenv(); + envs.forEach((k, v) -> LOG.info("export {}={}", k, v)); String prefix = AmoroManagementConf.SYSTEM_CONFIG.toUpperCase(); return ConfigHelpers.convertConfigurationKeys(prefix, System.getenv()); } diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/DefaultOptimizingService.java b/amoro-ams/src/main/java/org/apache/amoro/server/DefaultOptimizingService.java index d77b7f9170..6e8924b185 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/DefaultOptimizingService.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/DefaultOptimizingService.java @@ -33,6 +33,7 @@ import org.apache.amoro.resource.Resource; import org.apache.amoro.resource.ResourceGroup; import org.apache.amoro.server.exception.ForbiddenException; +import org.apache.amoro.server.exception.IllegalTaskStateException; import org.apache.amoro.server.exception.ObjectNotExistsException; import org.apache.amoro.server.exception.PluginRetryAuthException; import org.apache.amoro.server.exception.TaskNotFoundException; @@ -49,7 +50,6 @@ import org.apache.amoro.server.table.DefaultTableService; import org.apache.amoro.server.table.RuntimeHandlerChain; import org.apache.amoro.server.table.TableRuntime; -import org.apache.amoro.server.table.TableRuntimeMeta; import org.apache.amoro.server.table.TableService; import org.apache.amoro.shade.guava32.com.google.common.base.Preconditions; import org.apache.amoro.shade.guava32.com.google.common.collect.ImmutableList; @@ -121,24 +121,24 @@ public RuntimeHandlerChain getTableRuntimeHandler() { return tableHandlerChain; } - private void loadOptimizingQueues(List tableRuntimeMetaList) { + private void loadOptimizingQueues(List tableRuntimeMetaList) { List optimizerGroups = getAs(ResourceMapper.class, ResourceMapper::selectResourceGroups); List optimizers = getAs(OptimizerMapper.class, OptimizerMapper::selectAll); - Map> groupToTableRuntimes = + Map> groupToTableRuntimes = tableRuntimeMetaList.stream() - .collect(Collectors.groupingBy(TableRuntimeMeta::getOptimizerGroup)); + .collect(Collectors.groupingBy(TableRuntime::getOptimizerGroup)); optimizerGroups.forEach( group -> { String groupName = group.getName(); - List tableRuntimeMetas = groupToTableRuntimes.remove(groupName); + List tableRuntimes = groupToTableRuntimes.remove(groupName); OptimizingQueue optimizingQueue = new OptimizingQueue( tableService, group, this, planExecutor, - Optional.ofNullable(tableRuntimeMetas).orElseGet(ArrayList::new), + Optional.ofNullable(tableRuntimes).orElseGet(ArrayList::new), maxPlanningParallelism); optimizingQueueByGroup.put(groupName, optimizingQueue); }); @@ -148,8 +148,8 @@ private void loadOptimizingQueues(List tableRuntimeMetaList) { .forEach(groupName -> LOG.warn("Unloaded task runtime in group {}", groupName)); } - private void registerOptimizer(OptimizerInstance optimizer, boolean needPersistency) { - if (needPersistency) { + private void registerOptimizer(OptimizerInstance optimizer, boolean needPersistent) { + if (needPersistent) { doAs(OptimizerMapper.class, mapper -> mapper.insertOptimizer(optimizer)); } @@ -456,9 +456,9 @@ public void handleTableRemoved(TableRuntime tableRuntime) { } @Override - protected void initHandler(List tableRuntimeMetaList) { + protected void initHandler(List tableRuntimeList) { LOG.info("OptimizerManagementService begin initializing"); - loadOptimizingQueues(tableRuntimeMetaList); + loadOptimizingQueues(tableRuntimeList); optimizerKeeper.start(); LOG.info("SuspendingDetector for Optimizer has been started."); LOG.info("OptimizerManagementService initializing has completed"); @@ -566,7 +566,14 @@ private void retryTask(TaskRuntime task, OptimizingQueue queue) { task.getTaskId(), task.getResourceDesc()); // optimizing task of suspending optimizer would not be counted for retrying - queue.retryTask(task); + try { + queue.retryTask(task); + } catch (IllegalTaskStateException e) { + LOG.error( + "Retry task {} failed due to {}, will check it in next round", + task.getTaskId(), + e.getMessage()); + } } private Predicate buildSuspendingPredication(Set activeTokens) { diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/DashboardServer.java b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/DashboardServer.java index 2e865974fb..bb773fca6d 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/DashboardServer.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/DashboardServer.java @@ -39,6 +39,7 @@ import org.apache.amoro.server.dashboard.controller.HealthCheckController; import org.apache.amoro.server.dashboard.controller.LoginController; import org.apache.amoro.server.dashboard.controller.OptimizerController; +import org.apache.amoro.server.dashboard.controller.OptimizerGroupController; import org.apache.amoro.server.dashboard.controller.OverviewController; import org.apache.amoro.server.dashboard.controller.PlatformFileInfoController; import org.apache.amoro.server.dashboard.controller.SettingController; @@ -77,6 +78,7 @@ public class DashboardServer { private final CatalogController catalogController; private final HealthCheckController healthCheckController; private final LoginController loginController; + private final OptimizerGroupController optimizerGroupController; private final OptimizerController optimizerController; private final PlatformFileInfoController platformFileInfoController; private final SettingController settingController; @@ -98,7 +100,8 @@ public DashboardServer( this.catalogController = new CatalogController(tableService, platformFileManager); this.healthCheckController = new HealthCheckController(); this.loginController = new LoginController(serviceConfig); - this.optimizerController = new OptimizerController(tableService, optimizerManager); + this.optimizerGroupController = new OptimizerGroupController(tableService, optimizerManager); + this.optimizerController = new OptimizerController(optimizerManager); this.platformFileInfoController = new PlatformFileInfoController(platformFileManager); this.settingController = new SettingController(serviceConfig, optimizerManager); ServerTableDescriptor tableDescriptor = new ServerTableDescriptor(tableService, serviceConfig); @@ -221,6 +224,9 @@ private EndpointGroup apiGroup() { get( "/catalogs/{catalog}/dbs/{db}/tables/{table}/optimizing-processes", tableController::getOptimizingProcesses); + get( + "/catalogs/{catalog}/dbs/{db}/tables/{table}/optimizing-types", + tableController::getOptimizingTypes); get( "/catalogs/{catalog}/dbs/{db}/tables/{table}/optimizing-processes/{processId}/tasks", tableController::getOptimizingProcessTasks); @@ -274,26 +280,29 @@ private EndpointGroup apiGroup() { () -> { get( "/optimizerGroups/{optimizerGroup}/tables", - optimizerController::getOptimizerTables); - get("/optimizerGroups/{optimizerGroup}/optimizers", optimizerController::getOptimizers); - get("/optimizerGroups", optimizerController::getOptimizerGroups); + optimizerGroupController::getOptimizerTables); + get( + "/optimizerGroups/{optimizerGroup}/optimizers", + optimizerGroupController::getOptimizers); + get("/optimizerGroups", optimizerGroupController::getOptimizerGroups); get( "/optimizerGroups/{optimizerGroup}/info", - optimizerController::getOptimizerGroupInfo); - delete( - "/optimizerGroups/{optimizerGroup}/optimizers/{jobId}", - optimizerController::releaseOptimizer); + optimizerGroupController::getOptimizerGroupInfo); post( "/optimizerGroups/{optimizerGroup}/optimizers", - optimizerController::scaleOutOptimizer); - get("/resourceGroups", optimizerController::getResourceGroup); - post("/resourceGroups", optimizerController::createResourceGroup); - put("/resourceGroups", optimizerController::updateResourceGroup); - delete("/resourceGroups/{resourceGroupName}", optimizerController::deleteResourceGroup); + optimizerGroupController::scaleOutOptimizer); + post("/optimizers", optimizerController::createOptimizer); + delete("/optimizers/{jobId}", optimizerController::releaseOptimizer); + get("/resourceGroups", optimizerGroupController::getResourceGroup); + post("/resourceGroups", optimizerGroupController::createResourceGroup); + put("/resourceGroups", optimizerGroupController::updateResourceGroup); + delete( + "/resourceGroups/{resourceGroupName}", + optimizerGroupController::deleteResourceGroup); get( "/resourceGroups/{resourceGroupName}/delete/check", - optimizerController::deleteCheckResourceGroup); - get("/containers/get", optimizerController::getContainers); + optimizerGroupController::deleteCheckResourceGroup); + get("/containers/get", optimizerGroupController::getContainers); }); // console apis diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/JavalinJsonMapper.java b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/JavalinJsonMapper.java new file mode 100644 index 0000000000..43bda20b7f --- /dev/null +++ b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/JavalinJsonMapper.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.amoro.server.dashboard; + +import io.javalin.plugin.json.JsonMapper; +import org.apache.amoro.TableFormat; +import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.core.JsonProcessingException; +import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.databind.module.SimpleModule; +import org.jetbrains.annotations.NotNull; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; + +/** Json mapper to adapt shaded jackson. */ +public class JavalinJsonMapper implements JsonMapper { + + private final ObjectMapper objectMapper; + + public static JavalinJsonMapper createDefaultJsonMapper() { + ObjectMapper om = new ObjectMapper(); + SimpleModule module = new SimpleModule(); + module.addSerializer(TableFormat.class, new TableFormat.JsonSerializer()); + module.addDeserializer(TableFormat.class, new TableFormat.JsonDeserializer()); + om.registerModule(module); + return new JavalinJsonMapper(om); + } + + public JavalinJsonMapper(ObjectMapper shadedMapper) { + this.objectMapper = shadedMapper; + } + + @NotNull + @Override + public String toJsonString(@NotNull Object obj) { + if (obj instanceof String) { + return (String) obj; + } + try { + return objectMapper.writeValueAsString(obj); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + } + + @NotNull + @Override + public InputStream toJsonStream(@NotNull Object obj) { + if (obj instanceof String) { + String result = (String) obj; + return new ByteArrayInputStream(result.getBytes()); + } else { + byte[] string = new byte[0]; + try { + string = objectMapper.writeValueAsBytes(obj); + return new ByteArrayInputStream(string); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + } + } + + @NotNull + @Override + public T fromJsonString(@NotNull String json, @NotNull Class targetClass) { + try { + return objectMapper.readValue(json, targetClass); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + } + + @NotNull + @Override + public T fromJsonStream(@NotNull InputStream json, @NotNull Class targetClass) { + try { + return objectMapper.readValue(json, targetClass); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/MixedAndIcebergTableDescriptor.java b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/MixedAndIcebergTableDescriptor.java index 3db518a241..1bacd81a13 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/MixedAndIcebergTableDescriptor.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/MixedAndIcebergTableDescriptor.java @@ -33,6 +33,7 @@ import org.apache.amoro.server.optimizing.MetricsSummary; import org.apache.amoro.server.optimizing.OptimizingProcessMeta; import org.apache.amoro.server.optimizing.OptimizingTaskMeta; +import org.apache.amoro.server.optimizing.OptimizingType; import org.apache.amoro.server.optimizing.TaskRuntime; import org.apache.amoro.server.persistence.PersistentBase; import org.apache.amoro.server.persistence.mapper.OptimizingMapper; @@ -64,6 +65,7 @@ import org.apache.amoro.utils.MixedDataFiles; import org.apache.amoro.utils.MixedTableUtil; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.iceberg.ContentFile; import org.apache.iceberg.HasTableOperations; @@ -501,7 +503,7 @@ public List getTableConsumerInfos(AmoroTable amoroTable) { @Override public Pair, Integer> getOptimizingProcessesInfo( - AmoroTable amoroTable, int limit, int offset) { + AmoroTable amoroTable, String type, ProcessStatus status, int limit, int offset) { TableIdentifier tableIdentifier = amoroTable.id(); List processMetaList = getAs( @@ -511,6 +513,16 @@ public Pair, Integer> getOptimizingProcessesInfo( tableIdentifier.getCatalog(), tableIdentifier.getDatabase(), tableIdentifier.getTableName())); + + processMetaList = + processMetaList.stream() + .filter( + p -> + StringUtils.isBlank(type) + || type.equalsIgnoreCase(p.getOptimizingType().getStatus().displayValue())) + .filter(p -> status == null || status.name().equalsIgnoreCase(p.getStatus().name())) + .collect(Collectors.toList()); + int total = processMetaList.size(); processMetaList = processMetaList.stream().skip(offset).limit(limit).collect(Collectors.toList()); @@ -532,6 +544,15 @@ public Pair, Integer> getOptimizingProcessesInfo( total); } + @Override + public Map getTableOptimizingTypes(AmoroTable amoroTable) { + Map types = Maps.newHashMap(); + for (OptimizingType type : OptimizingType.values()) { + types.put(type.name(), type.getStatus().displayValue()); + } + return types; + } + @Override public List getOptimizingTaskInfos( AmoroTable amoroTable, String processId) { diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/ServerTableDescriptor.java b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/ServerTableDescriptor.java index 8caa17e685..521d80f62f 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/ServerTableDescriptor.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/ServerTableDescriptor.java @@ -22,6 +22,7 @@ import org.apache.amoro.TableFormat; import org.apache.amoro.api.TableIdentifier; import org.apache.amoro.config.Configurations; +import org.apache.amoro.process.ProcessStatus; import org.apache.amoro.server.catalog.ServerCatalog; import org.apache.amoro.server.persistence.PersistentBase; import org.apache.amoro.server.table.TableService; @@ -124,10 +125,11 @@ public List getTableConsumersInfos(TableIdentifier tableIdentifier } public Pair, Integer> getOptimizingProcessesInfo( - TableIdentifier tableIdentifier, int limit, int offset) { + TableIdentifier tableIdentifier, String type, ProcessStatus status, int limit, int offset) { AmoroTable amoroTable = loadTable(tableIdentifier); FormatTableDescriptor formatTableDescriptor = formatDescriptorMap.get(amoroTable.format()); - return formatTableDescriptor.getOptimizingProcessesInfo(amoroTable, limit, offset); + return formatTableDescriptor.getOptimizingProcessesInfo( + amoroTable, type, status, limit, offset); } public List getOptimizingProcessTaskInfos( @@ -137,6 +139,12 @@ public List getOptimizingProcessTaskInfos( return formatTableDescriptor.getOptimizingTaskInfos(amoroTable, processId); } + public Map getTableOptimizingTypes(TableIdentifier tableIdentifier) { + AmoroTable amoroTable = loadTable(tableIdentifier); + FormatTableDescriptor formatTableDescriptor = formatDescriptorMap.get(amoroTable.format()); + return formatTableDescriptor.getTableOptimizingTypes(amoroTable); + } + private AmoroTable loadTable(TableIdentifier identifier) { ServerCatalog catalog = tableService.getServerCatalog(identifier.getCatalog()); return catalog.loadTable(identifier.getDatabase(), identifier.getTableName()); diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/OptimizerController.java b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/OptimizerController.java index aa5abf386d..9e34df3ab3 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/OptimizerController.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/OptimizerController.java @@ -19,159 +19,29 @@ package org.apache.amoro.server.dashboard.controller; import io.javalin.http.Context; -import org.apache.amoro.ServerTableIdentifier; import org.apache.amoro.resource.Resource; import org.apache.amoro.resource.ResourceGroup; import org.apache.amoro.resource.ResourceType; import org.apache.amoro.server.DefaultOptimizingService; -import org.apache.amoro.server.dashboard.model.OptimizerInstanceInfo; -import org.apache.amoro.server.dashboard.model.OptimizerResourceInfo; -import org.apache.amoro.server.dashboard.model.TableOptimizingInfo; import org.apache.amoro.server.dashboard.response.OkResponse; -import org.apache.amoro.server.dashboard.response.PageResult; -import org.apache.amoro.server.dashboard.utils.OptimizingUtil; import org.apache.amoro.server.resource.ContainerMetadata; import org.apache.amoro.server.resource.OptimizerInstance; import org.apache.amoro.server.resource.ResourceContainers; -import org.apache.amoro.server.table.TableRuntime; -import org.apache.amoro.server.table.TableService; import org.apache.amoro.shade.guava32.com.google.common.base.Preconditions; -import org.apache.commons.lang3.StringUtils; -import javax.ws.rs.BadRequestException; - -import java.util.ArrayList; -import java.util.Comparator; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; /** The controller that handles optimizer requests. */ public class OptimizerController { - private static final String ALL_GROUP = "all"; - private final TableService tableService; + private final DefaultOptimizingService optimizerManager; - public OptimizerController(TableService tableService, DefaultOptimizingService optimizerManager) { - this.tableService = tableService; + public OptimizerController(DefaultOptimizingService optimizerManager) { this.optimizerManager = optimizerManager; } - /** Get optimize tables. * @return List of {@link TableOptimizingInfo} */ - public void getOptimizerTables(Context ctx) { - String optimizerGroup = ctx.pathParam("optimizerGroup"); - String dbFilterStr = ctx.queryParam("dbSearchInput"); - String tableFilterStr = ctx.queryParam("tableSearchInput"); - Integer page = ctx.queryParamAsClass("page", Integer.class).getOrDefault(1); - Integer pageSize = ctx.queryParamAsClass("pageSize", Integer.class).getOrDefault(20); - int offset = (page - 1) * pageSize; - - List tableRuntimes = new ArrayList<>(); - List tables = tableService.listManagedTables(); - for (ServerTableIdentifier identifier : tables) { - TableRuntime tableRuntime = tableService.getRuntime(identifier); - if (tableRuntime == null) { - continue; - } - if ((ALL_GROUP.equals(optimizerGroup) - || tableRuntime.getOptimizerGroup().equals(optimizerGroup)) - && (StringUtils.isEmpty(dbFilterStr) - || StringUtils.containsIgnoreCase(identifier.getDatabase(), dbFilterStr)) - && (StringUtils.isEmpty(tableFilterStr) - || StringUtils.containsIgnoreCase(identifier.getTableName(), tableFilterStr))) { - tableRuntimes.add(tableRuntime); - } - } - tableRuntimes.sort( - (o1, o2) -> { - // first we compare the status , and then we compare the start time when status are equal; - int statDiff = o1.getOptimizingStatus().compareTo(o2.getOptimizingStatus()); - // status order is asc, startTime order is desc - if (statDiff == 0) { - long timeDiff = o1.getCurrentStatusStartTime() - o2.getCurrentStatusStartTime(); - return timeDiff >= 0 ? (timeDiff == 0 ? 0 : -1) : 1; - } else { - return statDiff; - } - }); - PageResult amsPageResult = - PageResult.of(tableRuntimes, offset, pageSize, OptimizingUtil::buildTableOptimizeInfo); - ctx.json(OkResponse.of(amsPageResult)); - } - - /** get optimizers. */ - public void getOptimizers(Context ctx) { - String optimizerGroup = ctx.pathParam("optimizerGroup"); - Integer page = ctx.queryParamAsClass("page", Integer.class).getOrDefault(1); - Integer pageSize = ctx.queryParamAsClass("pageSize", Integer.class).getOrDefault(20); - - int offset = (page - 1) * pageSize; - List optimizers; - if (optimizerGroup.equals("all")) { - optimizers = optimizerManager.listOptimizers(); - } else { - optimizers = optimizerManager.listOptimizers(optimizerGroup); - } - List optimizerList = new ArrayList<>(optimizers); - optimizerList.sort(Comparator.comparingLong(OptimizerInstance::getStartTime).reversed()); - List result = - optimizerList.stream() - .map( - e -> - OptimizerInstanceInfo.builder() - .token(e.getToken()) - .startTime(e.getStartTime()) - .touchTime(e.getTouchTime()) - .jobId(e.getResourceId()) - .groupName(e.getGroupName()) - .coreNumber(e.getThreadCount()) - .memory(e.getMemoryMb()) - .jobStatus("RUNNING") - .container(e.getContainerName()) - .build()) - .collect(Collectors.toList()); - - PageResult amsPageResult = PageResult.of(result, offset, pageSize); - ctx.json(OkResponse.of(amsPageResult)); - } - - /** get optimizerGroup: optimizerGroupId, optimizerGroupName url = /optimizerGroups. */ - public void getOptimizerGroups(Context ctx) { - List> result = - optimizerManager.listResourceGroups().stream() - .filter( - resourceGroup -> - !ResourceContainers.EXTERNAL_CONTAINER_NAME.equals( - resourceGroup.getContainer())) - .map( - e -> { - Map mapObj = new HashMap<>(); - mapObj.put("optimizerGroupName", e.getName()); - return mapObj; - }) - .collect(Collectors.toList()); - ctx.json(OkResponse.of(result)); - } - - /** get optimizer info: occupationCore, occupationMemory */ - public void getOptimizerGroupInfo(Context ctx) { - String optimizerGroup = ctx.pathParam("optimizerGroup"); - List optimizers; - if (optimizerGroup.equals("all")) { - optimizers = optimizerManager.listOptimizers(); - } else { - optimizers = optimizerManager.listOptimizers(optimizerGroup); - } - OptimizerResourceInfo optimizerResourceInfo = new OptimizerResourceInfo(); - optimizers.forEach( - e -> { - optimizerResourceInfo.addOccupationCore(e.getThreadCount()); - optimizerResourceInfo.addOccupationMemory(e.getMemoryMb()); - }); - ctx.json(OkResponse.of(optimizerResourceInfo)); - } - /** * release optimizer. * @@ -198,12 +68,11 @@ public void releaseOptimizer(Context ctx) { ctx.json(OkResponse.of("Success to release optimizer")); } - /** scale out optimizers, url:/optimizerGroups/{optimizerGroup}/optimizers. */ - public void scaleOutOptimizer(Context ctx) { - String optimizerGroup = ctx.pathParam("optimizerGroup"); - Map map = ctx.bodyAsClass(Map.class); - int parallelism = map.get("parallelism"); - + /** scale out optimizers, url:/optimizers. */ + public void createOptimizer(Context ctx) { + Map map = ctx.bodyAsClass(Map.class); + int parallelism = Integer.parseInt(map.get("parallelism").toString()); + String optimizerGroup = map.get("optimizerGroup").toString(); ResourceGroup resourceGroup = optimizerManager.getResourceGroup(optimizerGroup); Resource resource = new Resource.Builder( @@ -213,75 +82,7 @@ public void scaleOutOptimizer(Context ctx) { .build(); ResourceContainers.get(resource.getContainerName()).requestResource(resource); optimizerManager.createResource(resource); - ctx.json(OkResponse.of("success to scaleOut optimizer")); - } - - /** get {@link List} url = /optimize/resourceGroups */ - public void getResourceGroup(Context ctx) { - List result = - optimizerManager.listResourceGroups().stream() - .map( - group -> { - List optimizers = - optimizerManager.listOptimizers(group.getName()); - OptimizerResourceInfo optimizerResourceInfo = new OptimizerResourceInfo(); - optimizerResourceInfo.setResourceGroup( - optimizerManager.getResourceGroup(group.getName())); - optimizers.forEach( - optimizer -> { - optimizerResourceInfo.addOccupationCore(optimizer.getThreadCount()); - optimizerResourceInfo.addOccupationMemory(optimizer.getMemoryMb()); - }); - return optimizerResourceInfo; - }) - .collect(Collectors.toList()); - ctx.json(OkResponse.of(result)); - } - - /** - * create optimizeGroup: name, container, schedulePolicy, properties url = - * /optimize/resourceGroups/create - */ - public void createResourceGroup(Context ctx) { - Map map = ctx.bodyAsClass(Map.class); - String name = (String) map.get("name"); - String container = (String) map.get("container"); - Map properties = (Map) map.get("properties"); - if (optimizerManager.getResourceGroup(name) != null) { - throw new BadRequestException(String.format("Optimizer group:%s already existed.", name)); - } - ResourceGroup.Builder builder = new ResourceGroup.Builder(name, container); - builder.addProperties(properties); - optimizerManager.createResourceGroup(builder.build()); - ctx.json(OkResponse.of("The optimizer group has been successfully created.")); - } - - /** - * update optimizeGroup: name, container, schedulePolicy, properties url = - * /optimize/resourceGroups/update - */ - public void updateResourceGroup(Context ctx) { - Map map = ctx.bodyAsClass(Map.class); - String name = (String) map.get("name"); - String container = (String) map.get("container"); - Map properties = (Map) map.get("properties"); - ResourceGroup.Builder builder = new ResourceGroup.Builder(name, container); - builder.addProperties(properties); - optimizerManager.updateResourceGroup(builder.build()); - ctx.json(OkResponse.of("The optimizer group has been successfully updated.")); - } - - /** delete optimizeGroup url = /optimize/resourceGroups/{resourceGroupName} */ - public void deleteResourceGroup(Context ctx) { - String name = ctx.pathParam("resourceGroupName"); - optimizerManager.deleteResourceGroup(name); - ctx.json(OkResponse.of("The optimizer group has been successfully deleted.")); - } - - /** check if optimizerGroup can be deleted url = /optimize/resourceGroups/delete/check */ - public void deleteCheckResourceGroup(Context ctx) { - String name = ctx.pathParam("resourceGroupName"); - ctx.json(OkResponse.of(optimizerManager.canDeleteResourceGroup(name))); + ctx.json(OkResponse.of("success to create optimizer")); } /** check if optimizerGroup can be deleted url = /optimize/containers/get */ diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/OptimizerGroupController.java b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/OptimizerGroupController.java new file mode 100644 index 0000000000..8d8ac3fc79 --- /dev/null +++ b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/OptimizerGroupController.java @@ -0,0 +1,278 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.amoro.server.dashboard.controller; + +import io.javalin.http.Context; +import org.apache.amoro.resource.Resource; +import org.apache.amoro.resource.ResourceGroup; +import org.apache.amoro.resource.ResourceType; +import org.apache.amoro.server.DefaultOptimizingService; +import org.apache.amoro.server.dashboard.model.OptimizerInstanceInfo; +import org.apache.amoro.server.dashboard.model.OptimizerResourceInfo; +import org.apache.amoro.server.dashboard.model.TableOptimizingInfo; +import org.apache.amoro.server.dashboard.response.OkResponse; +import org.apache.amoro.server.dashboard.response.PageResult; +import org.apache.amoro.server.dashboard.utils.OptimizingUtil; +import org.apache.amoro.server.persistence.TableRuntimeMeta; +import org.apache.amoro.server.resource.ContainerMetadata; +import org.apache.amoro.server.resource.OptimizerInstance; +import org.apache.amoro.server.resource.ResourceContainers; +import org.apache.amoro.server.table.TableRuntime; +import org.apache.amoro.server.table.TableService; +import org.apache.amoro.shade.guava32.com.google.common.base.Preconditions; + +import javax.ws.rs.BadRequestException; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** The controller that handles optimizer requests. */ +public class OptimizerGroupController { + private static final String ALL_GROUP = "all"; + private final TableService tableService; + private final DefaultOptimizingService optimizerManager; + + public OptimizerGroupController( + TableService tableService, DefaultOptimizingService optimizerManager) { + this.tableService = tableService; + this.optimizerManager = optimizerManager; + } + + /** Get optimize tables. * @return List of {@link TableOptimizingInfo} */ + public void getOptimizerTables(Context ctx) { + String optimizerGroup = ctx.pathParam("optimizerGroup"); + String dbFilterStr = ctx.queryParam("dbSearchInput"); + String tableFilterStr = ctx.queryParam("tableSearchInput"); + Integer page = ctx.queryParamAsClass("page", Integer.class).getOrDefault(1); + Integer pageSize = ctx.queryParamAsClass("pageSize", Integer.class).getOrDefault(20); + int offset = (page - 1) * pageSize; + + String optimizerGroupUsedInDbFilter = ALL_GROUP.equals(optimizerGroup) ? null : optimizerGroup; + // get all info from underlying table table_runtime + List tableRuntimeBeans = + tableService.getTableRuntimes( + optimizerGroupUsedInDbFilter, dbFilterStr, tableFilterStr, pageSize, offset); + + List tableRuntimes = + tableRuntimeBeans.stream() + .map(meta -> tableService.getRuntime(meta.getTableId())) + .collect(Collectors.toList()); + + PageResult amsPageResult = + PageResult.of(tableRuntimes, offset, pageSize, OptimizingUtil::buildTableOptimizeInfo); + ctx.json(OkResponse.of(amsPageResult)); + } + + /** get optimizers. */ + public void getOptimizers(Context ctx) { + String optimizerGroup = ctx.pathParam("optimizerGroup"); + Integer page = ctx.queryParamAsClass("page", Integer.class).getOrDefault(1); + Integer pageSize = ctx.queryParamAsClass("pageSize", Integer.class).getOrDefault(20); + + int offset = (page - 1) * pageSize; + List optimizers; + if (optimizerGroup.equals("all")) { + optimizers = optimizerManager.listOptimizers(); + } else { + optimizers = optimizerManager.listOptimizers(optimizerGroup); + } + List optimizerList = new ArrayList<>(optimizers); + optimizerList.sort(Comparator.comparingLong(OptimizerInstance::getStartTime).reversed()); + List result = + optimizerList.stream() + .map( + e -> + OptimizerInstanceInfo.builder() + .token(e.getToken()) + .startTime(e.getStartTime()) + .touchTime(e.getTouchTime()) + .jobId(e.getResourceId()) + .groupName(e.getGroupName()) + .coreNumber(e.getThreadCount()) + .memory(e.getMemoryMb()) + .jobStatus("RUNNING") + .container(e.getContainerName()) + .build()) + .collect(Collectors.toList()); + + PageResult amsPageResult = PageResult.of(result, offset, pageSize); + ctx.json(OkResponse.of(amsPageResult)); + } + + /** get optimizerGroup: optimizerGroupId, optimizerGroupName url = /optimizerGroups. */ + public void getOptimizerGroups(Context ctx) { + List> result = + optimizerManager.listResourceGroups().stream() + .filter( + resourceGroup -> + !ResourceContainers.EXTERNAL_CONTAINER_NAME.equals( + resourceGroup.getContainer())) + .map( + e -> { + Map mapObj = new HashMap<>(); + mapObj.put("optimizerGroupName", e.getName()); + return mapObj; + }) + .collect(Collectors.toList()); + ctx.json(OkResponse.of(result)); + } + + /** get optimizer info: occupationCore, occupationMemory */ + public void getOptimizerGroupInfo(Context ctx) { + String optimizerGroup = ctx.pathParam("optimizerGroup"); + List optimizers; + if (optimizerGroup.equals("all")) { + optimizers = optimizerManager.listOptimizers(); + } else { + optimizers = optimizerManager.listOptimizers(optimizerGroup); + } + OptimizerResourceInfo optimizerResourceInfo = new OptimizerResourceInfo(); + optimizers.forEach( + e -> { + optimizerResourceInfo.addOccupationCore(e.getThreadCount()); + optimizerResourceInfo.addOccupationMemory(e.getMemoryMb()); + }); + ctx.json(OkResponse.of(optimizerResourceInfo)); + } + + /** + * release optimizer. + * + * @pathParam jobId + */ + public void releaseOptimizer(Context ctx) { + String resourceId = ctx.pathParam("jobId"); + Preconditions.checkArgument( + !resourceId.isEmpty(), "resource id can not be empty, maybe it's a external optimizer"); + + List optimizerInstances = + optimizerManager.listOptimizers().stream() + .filter(e -> resourceId.equals(e.getResourceId())) + .collect(Collectors.toList()); + Preconditions.checkState( + !optimizerInstances.isEmpty(), + String.format( + "The resource ID %s has not been indexed" + " to any optimizer.", resourceId)); + Resource resource = optimizerManager.getResource(resourceId); + resource.getProperties().putAll(optimizerInstances.get(0).getProperties()); + ResourceContainers.get(resource.getContainerName()).releaseOptimizer(resource); + optimizerManager.deleteResource(resourceId); + optimizerManager.deleteOptimizer(resource.getGroupName(), resourceId); + ctx.json(OkResponse.of("Success to release optimizer")); + } + + /** scale out optimizers, url:/optimizerGroups/{optimizerGroup}/optimizers. */ + public void scaleOutOptimizer(Context ctx) { + String optimizerGroup = ctx.pathParam("optimizerGroup"); + Map map = ctx.bodyAsClass(Map.class); + int parallelism = map.get("parallelism"); + + ResourceGroup resourceGroup = optimizerManager.getResourceGroup(optimizerGroup); + Resource resource = + new Resource.Builder( + resourceGroup.getContainer(), resourceGroup.getName(), ResourceType.OPTIMIZER) + .setProperties(resourceGroup.getProperties()) + .setThreadCount(parallelism) + .build(); + ResourceContainers.get(resource.getContainerName()).requestResource(resource); + optimizerManager.createResource(resource); + ctx.json(OkResponse.of("success to scaleOut optimizer")); + } + + /** get {@link List} url = /optimize/resourceGroups */ + public void getResourceGroup(Context ctx) { + List result = + optimizerManager.listResourceGroups().stream() + .map( + group -> { + List optimizers = + optimizerManager.listOptimizers(group.getName()); + OptimizerResourceInfo optimizerResourceInfo = new OptimizerResourceInfo(); + optimizerResourceInfo.setResourceGroup( + optimizerManager.getResourceGroup(group.getName())); + optimizers.forEach( + optimizer -> { + optimizerResourceInfo.addOccupationCore(optimizer.getThreadCount()); + optimizerResourceInfo.addOccupationMemory(optimizer.getMemoryMb()); + }); + return optimizerResourceInfo; + }) + .collect(Collectors.toList()); + ctx.json(OkResponse.of(result)); + } + + /** + * create optimizeGroup: name, container, schedulePolicy, properties url = + * /optimize/resourceGroups/create + */ + public void createResourceGroup(Context ctx) { + Map map = ctx.bodyAsClass(Map.class); + String name = (String) map.get("name"); + String container = (String) map.get("container"); + Map properties = (Map) map.get("properties"); + if (optimizerManager.getResourceGroup(name) != null) { + throw new BadRequestException(String.format("Optimizer group:%s already existed.", name)); + } + ResourceGroup.Builder builder = new ResourceGroup.Builder(name, container); + builder.addProperties(properties); + optimizerManager.createResourceGroup(builder.build()); + ctx.json(OkResponse.of("The optimizer group has been successfully created.")); + } + + /** + * update optimizeGroup: name, container, schedulePolicy, properties url = + * /optimize/resourceGroups/update + */ + public void updateResourceGroup(Context ctx) { + Map map = ctx.bodyAsClass(Map.class); + String name = (String) map.get("name"); + String container = (String) map.get("container"); + Map properties = (Map) map.get("properties"); + ResourceGroup.Builder builder = new ResourceGroup.Builder(name, container); + builder.addProperties(properties); + optimizerManager.updateResourceGroup(builder.build()); + ctx.json(OkResponse.of("The optimizer group has been successfully updated.")); + } + + /** delete optimizeGroup url = /optimize/resourceGroups/{resourceGroupName} */ + public void deleteResourceGroup(Context ctx) { + String name = ctx.pathParam("resourceGroupName"); + optimizerManager.deleteResourceGroup(name); + ctx.json(OkResponse.of("The optimizer group has been successfully deleted.")); + } + + /** check if optimizerGroup can be deleted url = /optimize/resourceGroups/delete/check */ + public void deleteCheckResourceGroup(Context ctx) { + String name = ctx.pathParam("resourceGroupName"); + ctx.json(OkResponse.of(optimizerManager.canDeleteResourceGroup(name))); + } + + /** check if optimizerGroup can be deleted url = /optimize/containers/get */ + public void getContainers(Context ctx) { + ctx.json( + OkResponse.of( + ResourceContainers.getMetadataList().stream() + .map(ContainerMetadata::getName) + .collect(Collectors.toList()))); + } +} diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/TableController.java b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/TableController.java index 43a71e785d..1fd1f596fa 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/TableController.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/TableController.java @@ -32,6 +32,7 @@ import org.apache.amoro.hive.utils.HiveTableUtil; import org.apache.amoro.hive.utils.UpgradeHiveTableUtil; import org.apache.amoro.mixed.CatalogLoader; +import org.apache.amoro.process.ProcessStatus; import org.apache.amoro.properties.CatalogMetaProperties; import org.apache.amoro.properties.HiveTableProperties; import org.apache.amoro.server.catalog.ServerCatalog; @@ -148,7 +149,7 @@ public void getTableDetail(Context ctx) { tableService.getServerTableIdentifier( TableIdentifier.of(catalog, database, tableName).buildTableIdentifier())); if (serverTableIdentifier.isPresent()) { - TableRuntime tableRuntime = tableService.getRuntime(serverTableIdentifier.get()); + TableRuntime tableRuntime = tableService.getRuntime(serverTableIdentifier.get().getId()); tableSummary.setOptimizingStatus(tableRuntime.getOptimizingStatus().name()); OptimizingEvaluator.PendingInput tableRuntimeSummary = tableRuntime.getTableSummary(); if (tableRuntimeSummary != null) { @@ -308,6 +309,8 @@ public void getOptimizingProcesses(Context ctx) { String catalog = ctx.pathParam("catalog"); String db = ctx.pathParam("db"); String table = ctx.pathParam("table"); + String type = ctx.queryParam("type"); + String status = ctx.queryParam("status"); Integer page = ctx.queryParamAsClass("page", Integer.class).getOrDefault(1); Integer pageSize = ctx.queryParamAsClass("pageSize", Integer.class).getOrDefault(20); @@ -319,15 +322,30 @@ public void getOptimizingProcesses(Context ctx) { Preconditions.checkState(serverCatalog.tableExists(db, table), "no such table"); TableIdentifier tableIdentifier = TableIdentifier.of(catalog, db, table); + ProcessStatus processStatus = + StringUtils.isBlank(status) ? null : ProcessStatus.valueOf(status); Pair, Integer> optimizingProcessesInfo = tableDescriptor.getOptimizingProcessesInfo( - tableIdentifier.buildTableIdentifier(), limit, offset); + tableIdentifier.buildTableIdentifier(), type, processStatus, limit, offset); List result = optimizingProcessesInfo.getLeft(); int total = optimizingProcessesInfo.getRight(); ctx.json(OkResponse.of(PageResult.of(result, total))); } + public void getOptimizingTypes(Context ctx) { + String catalog = ctx.pathParam("catalog"); + String db = ctx.pathParam("db"); + String table = ctx.pathParam("table"); + TableIdentifier tableIdentifier = TableIdentifier.of(catalog, db, table); + ServerCatalog serverCatalog = tableService.getServerCatalog(catalog); + Preconditions.checkState(serverCatalog.tableExists(db, table), "no such table"); + + Map values = + tableDescriptor.getTableOptimizingTypes(tableIdentifier.buildTableIdentifier()); + ctx.json(OkResponse.of(values)); + } + /** * Get tasks of optimizing process. * @@ -497,18 +515,16 @@ public void getTableList(Context ctx) { ServerCatalog serverCatalog = tableService.getServerCatalog(catalog); Function formatToType = format -> { - switch (format) { - case MIXED_HIVE: - case MIXED_ICEBERG: - return TableMeta.TableType.ARCTIC.toString(); - case PAIMON: - return TableMeta.TableType.PAIMON.toString(); - case ICEBERG: - return TableMeta.TableType.ICEBERG.toString(); - case HUDI: - return TableMeta.TableType.HUDI.toString(); - default: - throw new IllegalStateException("Unknown format"); + if (format.equals(TableFormat.MIXED_HIVE) || format.equals(TableFormat.MIXED_ICEBERG)) { + return TableMeta.TableType.ARCTIC.toString(); + } else if (format.equals(TableFormat.PAIMON)) { + return TableMeta.TableType.PAIMON.toString(); + } else if (format.equals(TableFormat.ICEBERG)) { + return TableMeta.TableType.ICEBERG.toString(); + } else if (format.equals(TableFormat.HUDI)) { + return TableMeta.TableType.HUDI.toString(); + } else { + return format.toString(); } }; @@ -656,7 +672,9 @@ public void cancelOptimizingProcess(Context ctx) { tableService.getServerTableIdentifier( TableIdentifier.of(catalog, db, table).buildTableIdentifier()); TableRuntime tableRuntime = - serverTableIdentifier != null ? tableService.getRuntime(serverTableIdentifier) : null; + serverTableIdentifier != null + ? tableService.getRuntime(serverTableIdentifier.getId()) + : null; Preconditions.checkArgument( tableRuntime != null diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/OptimizingQueue.java b/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/OptimizingQueue.java index dd192c1298..77ec96cfc1 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/OptimizingQueue.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/OptimizingQueue.java @@ -36,7 +36,6 @@ import org.apache.amoro.server.resource.QuotaProvider; import org.apache.amoro.server.table.TableManager; import org.apache.amoro.server.table.TableRuntime; -import org.apache.amoro.server.table.TableRuntimeMeta; import org.apache.amoro.shade.guava32.com.google.common.annotations.VisibleForTesting; import org.apache.amoro.shade.guava32.com.google.common.base.Preconditions; import org.apache.amoro.shade.guava32.com.google.common.collect.Lists; @@ -93,7 +92,7 @@ public OptimizingQueue( ResourceGroup optimizerGroup, QuotaProvider quotaProvider, Executor planExecutor, - List tableRuntimeMetaList, + List tableRuntimeList, int maxPlanningParallelism) { Preconditions.checkNotNull(optimizerGroup, "Optimizer group can not be null"); this.planExecutor = planExecutor; @@ -106,14 +105,12 @@ public OptimizingQueue( new OptimizerGroupMetrics( optimizerGroup.getName(), MetricManager.getInstance().getGlobalRegistry(), this); this.metrics.register(); - tableRuntimeMetaList.forEach(this::initTableRuntime); + tableRuntimeList.forEach(this::initTableRuntime); } - private void initTableRuntime(TableRuntimeMeta tableRuntimeMeta) { - TableRuntime tableRuntime = tableRuntimeMeta.getTableRuntime(); - if (tableRuntime.getOptimizingStatus().isProcessing() - && tableRuntimeMeta.getOptimizingProcessId() != 0) { - tableRuntime.recover(new TableOptimizingProcess(tableRuntimeMeta)); + private void initTableRuntime(TableRuntime tableRuntime) { + if (tableRuntime.getOptimizingStatus().isProcessing() && tableRuntime.getProcessId() != 0) { + tableRuntime.recover(new TableOptimizingProcess(tableRuntime)); } if (tableRuntime.isOptimizingEnabled()) { @@ -122,7 +119,7 @@ private void initTableRuntime(TableRuntimeMeta tableRuntimeMeta) { if (!tableRuntime.getOptimizingStatus().isProcessing()) { scheduler.addTable(tableRuntime); } else if (tableRuntime.getOptimizingStatus() != OptimizingStatus.COMMITTING) { - tableQueue.offer(new TableOptimizingProcess(tableRuntimeMeta)); + tableQueue.offer(new TableOptimizingProcess(tableRuntime)); } } else { OptimizingProcess process = tableRuntime.getOptimizingProcess(); @@ -387,21 +384,21 @@ public TableOptimizingProcess(OptimizingPlanner planner) { beginAndPersistProcess(); } - public TableOptimizingProcess(TableRuntimeMeta tableRuntimeMeta) { - processId = tableRuntimeMeta.getOptimizingProcessId(); - tableRuntime = tableRuntimeMeta.getTableRuntime(); - optimizingType = tableRuntimeMeta.getOptimizingType(); - targetSnapshotId = tableRuntimeMeta.getTargetSnapshotId(); - targetChangeSnapshotId = tableRuntimeMeta.getTargetChangeSnapshotId(); - planTime = tableRuntimeMeta.getPlanTime(); - if (tableRuntimeMeta.getFromSequence() != null) { - fromSequence = tableRuntimeMeta.getFromSequence(); + public TableOptimizingProcess(TableRuntime tableRuntime) { + processId = tableRuntime.getProcessId(); + this.tableRuntime = tableRuntime; + optimizingType = tableRuntime.getOptimizingType(); + targetSnapshotId = tableRuntime.getTargetSnapshotId(); + targetChangeSnapshotId = tableRuntime.getTargetChangeSnapshotId(); + planTime = tableRuntime.getLastPlanTime(); + if (tableRuntime.getFromSequence() != null) { + fromSequence = tableRuntime.getFromSequence(); } - if (tableRuntimeMeta.getToSequence() != null) { - toSequence = tableRuntimeMeta.getToSequence(); + if (tableRuntime.getToSequence() != null) { + toSequence = tableRuntime.getToSequence(); } if (this.status != OptimizingProcess.Status.CLOSED) { - tableRuntimeMeta.getTableRuntime().recover(this); + tableRuntime.recover(this); } loadTaskRuntimes(this); } diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/OptimizingStatus.java b/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/OptimizingStatus.java index 7faca66261..5e34be9ea9 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/OptimizingStatus.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/OptimizingStatus.java @@ -19,20 +19,23 @@ package org.apache.amoro.server.optimizing; public enum OptimizingStatus { - FULL_OPTIMIZING("full", true), - MAJOR_OPTIMIZING("major", true), - MINOR_OPTIMIZING("minor", true), - COMMITTING("committing", true), - PLANNING("planning", false), - PENDING("pending", false), - IDLE("idle", false); + FULL_OPTIMIZING("full", true, 100), + MAJOR_OPTIMIZING("major", true, 200), + MINOR_OPTIMIZING("minor", true, 300), + COMMITTING("committing", true, 400), + PLANNING("planning", false, 500), + PENDING("pending", false, 600), + IDLE("idle", false, 700); private final String displayValue; private final boolean isProcessing; - OptimizingStatus(String displayValue, boolean isProcessing) { + private final int code; + + OptimizingStatus(String displayValue, boolean isProcessing, int code) { this.displayValue = displayValue; this.isProcessing = isProcessing; + this.code = code; } public boolean isProcessing() { @@ -42,4 +45,17 @@ public boolean isProcessing() { public String displayValue() { return displayValue; } + + public int getCode() { + return code; + } + + public static OptimizingStatus ofCode(int code) { + for (OptimizingStatus status : values()) { + if (status.getCode() == code) { + return status; + } + } + return null; + } } diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/maintainer/TableMaintainer.java b/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/maintainer/TableMaintainer.java index 73cd661f7c..a6ce202de5 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/maintainer/TableMaintainer.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/maintainer/TableMaintainer.java @@ -60,9 +60,9 @@ default void cleanDanglingDeleteFiles(TableRuntime tableRuntime) { static TableMaintainer ofTable(AmoroTable amoroTable) { TableFormat format = amoroTable.format(); - if (format == TableFormat.MIXED_HIVE || format == TableFormat.MIXED_ICEBERG) { + if (format.in(TableFormat.MIXED_HIVE, TableFormat.MIXED_ICEBERG)) { return new MixedTableMaintainer((MixedTable) amoroTable.originalTable()); - } else if (format == TableFormat.ICEBERG) { + } else if (TableFormat.ICEBERG.equals(format)) { return new IcebergTableMaintainer((Table) amoroTable.originalTable()); } else { throw new RuntimeException("Unsupported table type" + amoroTable.originalTable().getClass()); diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/plan/CommonPartitionEvaluator.java b/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/plan/CommonPartitionEvaluator.java index 013f11f87a..8038e97618 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/plan/CommonPartitionEvaluator.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/plan/CommonPartitionEvaluator.java @@ -24,6 +24,7 @@ import org.apache.amoro.shade.guava32.com.google.common.base.MoreObjects; import org.apache.amoro.shade.guava32.com.google.common.base.Preconditions; import org.apache.amoro.shade.guava32.com.google.common.collect.Sets; +import org.apache.amoro.utils.TableFileUtil; import org.apache.iceberg.ContentFile; import org.apache.iceberg.DataFile; import org.apache.iceberg.FileContent; @@ -226,8 +227,12 @@ public boolean fileShouldRewrite(DataFile dataFile, List> deletes public boolean segmentShouldRewritePos(DataFile dataFile, List> deletes) { Preconditions.checkArgument(!isFragmentFile(dataFile), "Unsupported fragment file."); - if (deletes.stream().filter(delete -> delete.content() == FileContent.POSITION_DELETES).count() - >= 2) { + long posDeleteFileCount = + deletes.stream().filter(delete -> delete.content() == FileContent.POSITION_DELETES).count(); + if (posDeleteFileCount == 1) { + return !TableFileUtil.isOptimizingPosDeleteFile( + dataFile.path().toString(), deletes.get(0).path().toString()); + } else if (posDeleteFileCount > 1) { combinePosSegmentFileCount++; return true; } diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/plan/OptimizingEvaluator.java b/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/plan/OptimizingEvaluator.java index 765bdf3bb1..9be5be7974 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/plan/OptimizingEvaluator.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/plan/OptimizingEvaluator.java @@ -37,11 +37,14 @@ import org.apache.amoro.utils.MixedTableUtil; import org.apache.amoro.utils.TablePropertyUtil; import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Snapshot; +import org.apache.iceberg.SnapshotSummary; import org.apache.iceberg.StructLike; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.util.Pair; +import org.apache.iceberg.util.PropertyUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -77,7 +80,7 @@ public TableRuntime getTableRuntime() { protected void initEvaluator() { long startTime = System.currentTimeMillis(); TableFileScanHelper tableFileScanHelper; - if (TableFormat.ICEBERG == mixedTable.format()) { + if (TableFormat.ICEBERG.equals(mixedTable.format())) { tableFileScanHelper = new IcebergTableFileScanHelper(mixedTable.asUnkeyedTable(), currentSnapshot.snapshotId()); } else { @@ -142,7 +145,7 @@ private Map partitionProperties(Pair partit } protected PartitionEvaluator buildEvaluator(Pair partition) { - if (TableFormat.ICEBERG == mixedTable.format()) { + if (TableFormat.ICEBERG.equals(mixedTable.format())) { return new CommonPartitionEvaluator(tableRuntime, partition, System.currentTimeMillis()); } else { Map partitionProperties = partitionProperties(partition); @@ -177,6 +180,13 @@ public PendingInput getPendingInput() { if (!isInitialized) { initEvaluator(); } + // Dangling delete files will cause the data scanned by TableScan + // to be inconsistent with the snapshot summary of iceberg + if (TableFormat.ICEBERG == mixedTable.format()) { + Snapshot snapshot = mixedTable.asUnkeyedTable().snapshot(currentSnapshot.snapshotId()); + return new PendingInput(partitionPlanMap.values(), snapshot); + } + return new PendingInput(partitionPlanMap.values()); } @@ -191,37 +201,73 @@ public static class PendingInput { @JsonIgnore private final Map> partitions = Maps.newHashMap(); + private int totalFileCount = 0; + private long totalFileSize = 0L; + private long totalFileRecords = 0L; private int dataFileCount = 0; - private long dataFileSize = 0; - private long dataFileRecords = 0; + private long dataFileSize = 0L; + private long dataFileRecords = 0L; private int equalityDeleteFileCount = 0; private int positionalDeleteFileCount = 0; private long positionalDeleteBytes = 0L; private long equalityDeleteBytes = 0L; private long equalityDeleteFileRecords = 0L; private long positionalDeleteFileRecords = 0L; + private int danglingDeleteFileCount = 0; private int healthScore = -1; // -1 means not calculated public PendingInput() {} public PendingInput(Collection evaluators) { + initialize(evaluators); + totalFileCount = dataFileCount + positionalDeleteFileCount + equalityDeleteFileCount; + totalFileSize = dataFileSize + positionalDeleteBytes + equalityDeleteBytes; + totalFileRecords = dataFileRecords + positionalDeleteFileRecords + equalityDeleteFileRecords; + } + + public PendingInput(Collection evaluators, Snapshot snapshot) { + initialize(evaluators); + Map summary = snapshot.summary(); + int totalDeleteFiles = + PropertyUtil.propertyAsInt(summary, SnapshotSummary.TOTAL_DELETE_FILES_PROP, 0); + int totalDataFiles = + PropertyUtil.propertyAsInt(summary, SnapshotSummary.TOTAL_DATA_FILES_PROP, 0); + totalFileRecords = PropertyUtil.propertyAsInt(summary, SnapshotSummary.TOTAL_RECORDS_PROP, 0); + totalFileSize = PropertyUtil.propertyAsLong(summary, SnapshotSummary.TOTAL_FILE_SIZE_PROP, 0); + totalFileCount = totalDeleteFiles + totalDataFiles; + danglingDeleteFileCount = + totalDeleteFiles - equalityDeleteFileCount - positionalDeleteFileCount; + } + + private void initialize(Collection evaluators) { double totalHealthScore = 0; for (PartitionEvaluator evaluator : evaluators) { - partitions - .computeIfAbsent(evaluator.getPartition().first(), ignore -> Sets.newHashSet()) - .add(evaluator.getPartition().second()); - dataFileCount += evaluator.getFragmentFileCount() + evaluator.getSegmentFileCount(); - dataFileSize += evaluator.getFragmentFileSize() + evaluator.getSegmentFileSize(); - dataFileRecords += evaluator.getFragmentFileRecords() + evaluator.getSegmentFileRecords(); - positionalDeleteBytes += evaluator.getPosDeleteFileSize(); - positionalDeleteFileRecords += evaluator.getPosDeleteFileRecords(); - positionalDeleteFileCount += evaluator.getPosDeleteFileCount(); - equalityDeleteBytes += evaluator.getEqualityDeleteFileSize(); - equalityDeleteFileRecords += evaluator.getEqualityDeleteFileRecords(); - equalityDeleteFileCount += evaluator.getEqualityDeleteFileCount(); + addPartitionData(evaluator); totalHealthScore += evaluator.getHealthScore(); } - healthScore = (int) Math.ceil(totalHealthScore / evaluators.size()); + healthScore = avgHealthScore(totalHealthScore, evaluators.size()); + } + + private void addPartitionData(PartitionEvaluator evaluator) { + partitions + .computeIfAbsent(evaluator.getPartition().first(), ignore -> Sets.newHashSet()) + .add(evaluator.getPartition().second()); + dataFileCount += evaluator.getFragmentFileCount() + evaluator.getSegmentFileCount(); + dataFileSize += evaluator.getFragmentFileSize() + evaluator.getSegmentFileSize(); + dataFileRecords += evaluator.getFragmentFileRecords() + evaluator.getSegmentFileRecords(); + positionalDeleteBytes += evaluator.getPosDeleteFileSize(); + positionalDeleteFileRecords += evaluator.getPosDeleteFileRecords(); + positionalDeleteFileCount += evaluator.getPosDeleteFileCount(); + equalityDeleteBytes += evaluator.getEqualityDeleteFileSize(); + equalityDeleteFileRecords += evaluator.getEqualityDeleteFileRecords(); + equalityDeleteFileCount += evaluator.getEqualityDeleteFileCount(); + } + + private int avgHealthScore(double totalHealthScore, int partitionCount) { + if (partitionCount == 0) { + return 0; + } + return (int) Math.ceil(totalHealthScore / partitionCount); } public Map> getPartitions() { @@ -268,9 +314,28 @@ public int getHealthScore() { return healthScore; } + public int getTotalFileCount() { + return totalFileCount; + } + + public long getTotalFileSize() { + return totalFileSize; + } + + public long getTotalFileRecords() { + return totalFileRecords; + } + + public int getDanglingDeleteFileCount() { + return danglingDeleteFileCount; + } + @Override public String toString() { return MoreObjects.toStringHelper(this) + .add("totalFileCount", totalFileCount) + .add("totalFileSize", totalFileSize) + .add("totalFileRecords", totalFileRecords) .add("partitions", partitions) .add("dataFileCount", dataFileCount) .add("dataFileSize", dataFileSize) @@ -282,6 +347,7 @@ public String toString() { .add("equalityDeleteFileRecords", equalityDeleteFileRecords) .add("positionalDeleteFileRecords", positionalDeleteFileRecords) .add("healthScore", healthScore) + .add("danglingDeleteFileCount", danglingDeleteFileCount) .toString(); } } diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/plan/OptimizingPlanner.java b/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/plan/OptimizingPlanner.java index f0c30eb43c..5985a5a342 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/plan/OptimizingPlanner.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/optimizing/plan/OptimizingPlanner.java @@ -233,7 +233,7 @@ public PartitionPlannerFactory( } public PartitionEvaluator buildPartitionPlanner(Pair partition) { - if (TableFormat.ICEBERG == mixedTable.format()) { + if (TableFormat.ICEBERG.equals(mixedTable.format())) { return new IcebergPartitionPlan(tableRuntime, mixedTable, partition, planTime); } else { if (TableTypeUtil.isHive(mixedTable)) { diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/persistence/SqlSessionFactoryProvider.java b/amoro-ams/src/main/java/org/apache/amoro/server/persistence/SqlSessionFactoryProvider.java index 6a59d34a8b..4560e5a4e7 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/persistence/SqlSessionFactoryProvider.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/persistence/SqlSessionFactoryProvider.java @@ -33,7 +33,9 @@ import org.apache.commons.lang3.StringUtils; import org.apache.commons.pool2.impl.BaseObjectPoolConfig; import org.apache.ibatis.jdbc.ScriptRunner; +import org.apache.ibatis.mapping.DatabaseIdProvider; import org.apache.ibatis.mapping.Environment; +import org.apache.ibatis.mapping.VendorDatabaseIdProvider; import org.apache.ibatis.session.Configuration; import org.apache.ibatis.session.SqlSession; import org.apache.ibatis.session.SqlSessionFactory; @@ -53,8 +55,10 @@ import java.nio.file.Paths; import java.sql.Connection; import java.sql.ResultSet; +import java.sql.SQLException; import java.sql.Statement; import java.time.Duration; +import java.util.Properties; public class SqlSessionFactoryProvider { private static final Logger LOG = LoggerFactory.getLogger(SqlSessionFactoryProvider.class); @@ -73,7 +77,7 @@ public static SqlSessionFactoryProvider getInstance() { private volatile SqlSessionFactory sqlSessionFactory; - public void init(Configurations config) { + public void init(Configurations config) throws SQLException { BasicDataSource dataSource = new BasicDataSource(); dataSource.setUrl(config.getString(AmoroManagementConf.DB_CONNECTION_URL)); dataSource.setDriverClassName(config.getString(AmoroManagementConf.DB_DRIVER_CLASS_NAME)); @@ -111,6 +115,14 @@ public void init(Configurations config) { configuration.addMapper(PlatformFileMapper.class); configuration.addMapper(ResourceMapper.class); configuration.addMapper(TableBlockerMapper.class); + + DatabaseIdProvider provider = new VendorDatabaseIdProvider(); + Properties properties = new Properties(); + properties.setProperty("MySQL", "mysql"); + properties.setProperty("PostgreSQL", "postgres"); + properties.setProperty("Derby", "derby"); + provider.setProperties(properties); + configuration.setDatabaseId(provider.getDatabaseId(dataSource)); if (sqlSessionFactory == null) { synchronized (this) { if (sqlSessionFactory == null) { diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/table/TableRuntimeMeta.java b/amoro-ams/src/main/java/org/apache/amoro/server/persistence/TableRuntimeMeta.java similarity index 93% rename from amoro-ams/src/main/java/org/apache/amoro/server/table/TableRuntimeMeta.java rename to amoro-ams/src/main/java/org/apache/amoro/server/persistence/TableRuntimeMeta.java index 9fd8c05ea5..d94db0a25d 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/table/TableRuntimeMeta.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/persistence/TableRuntimeMeta.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.amoro.server.table; +package org.apache.amoro.server.persistence; import org.apache.amoro.TableFormat; import org.apache.amoro.config.TableConfiguration; @@ -27,6 +27,7 @@ import java.util.Map; +/** The class for table used when transfer data from/to database. */ public class TableRuntimeMeta { private long tableId; private String catalogName; @@ -58,24 +59,8 @@ public class TableRuntimeMeta { private Map fromSequence; private Map toSequence; - private TableRuntime tableRuntime; - public TableRuntimeMeta() {} - public TableRuntime constructTableRuntime(TableManager initializer) { - if (tableRuntime == null) { - tableRuntime = new TableRuntime(this, initializer); - } - return tableRuntime; - } - - public TableRuntime getTableRuntime() { - if (tableRuntime == null) { - throw new IllegalStateException("TableRuntime is not constructed yet."); - } - return tableRuntime; - } - public long getTargetSnapshotId() { return targetSnapshotId; } @@ -212,10 +197,6 @@ public void setTableSummary(OptimizingEvaluator.PendingInput tableSummary) { this.tableSummary = tableSummary; } - public void setTableRuntime(TableRuntime tableRuntime) { - this.tableRuntime = tableRuntime; - } - public void setLastOptimizedChangeSnapshotId(long lastOptimizedChangeSnapshotId) { this.lastOptimizedChangeSnapshotId = lastOptimizedChangeSnapshotId; } diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/persistence/converter/OptimizingStatusConverter.java b/amoro-ams/src/main/java/org/apache/amoro/server/persistence/converter/OptimizingStatusConverter.java new file mode 100644 index 0000000000..d314bb41bc --- /dev/null +++ b/amoro-ams/src/main/java/org/apache/amoro/server/persistence/converter/OptimizingStatusConverter.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.amoro.server.persistence.converter; + +import org.apache.amoro.server.optimizing.OptimizingStatus; +import org.apache.ibatis.type.BaseTypeHandler; +import org.apache.ibatis.type.JdbcType; +import org.apache.ibatis.type.MappedJdbcTypes; +import org.apache.ibatis.type.MappedTypes; + +import java.sql.CallableStatement; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +@MappedJdbcTypes(JdbcType.INTEGER) +@MappedTypes(Enum.class) +public class OptimizingStatusConverter extends BaseTypeHandler { + + @Override + public void setNonNullParameter( + PreparedStatement ps, int i, OptimizingStatus parameter, JdbcType jdbcType) + throws SQLException { + ps.setInt(i, parameter.getCode()); + } + + @Override + public OptimizingStatus getNullableResult(ResultSet rs, String columnName) throws SQLException { + String s = rs.getString(columnName); + return s == null ? null : OptimizingStatus.ofCode(Integer.parseInt(s)); + } + + @Override + public OptimizingStatus getNullableResult(ResultSet rs, int columnIndex) throws SQLException { + String s = rs.getString(columnIndex); + return s == null ? null : OptimizingStatus.ofCode(Integer.parseInt(s)); + } + + @Override + public OptimizingStatus getNullableResult(CallableStatement cs, int columnIndex) + throws SQLException { + String s = cs.getString(columnIndex); + return s == null ? null : OptimizingStatus.ofCode(Integer.parseInt(s)); + } +} diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/persistence/converter/TableFormatConverter.java b/amoro-ams/src/main/java/org/apache/amoro/server/persistence/converter/TableFormatConverter.java new file mode 100644 index 0000000000..ada463cd47 --- /dev/null +++ b/amoro-ams/src/main/java/org/apache/amoro/server/persistence/converter/TableFormatConverter.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.amoro.server.persistence.converter; + +import org.apache.amoro.TableFormat; +import org.apache.ibatis.type.JdbcType; +import org.apache.ibatis.type.MappedJdbcTypes; +import org.apache.ibatis.type.MappedTypes; +import org.apache.ibatis.type.TypeHandler; + +import java.sql.CallableStatement; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +@MappedTypes(TableFormat.class) +@MappedJdbcTypes(JdbcType.VARCHAR) +public class TableFormatConverter implements TypeHandler { + + @Override + public void setParameter(PreparedStatement ps, int i, TableFormat parameter, JdbcType jdbcType) + throws SQLException { + if (parameter == null) { + ps.setString(i, ""); + } else { + ps.setString(i, parameter.name()); + } + } + + @Override + public TableFormat getResult(ResultSet rs, String columnName) throws SQLException { + String res = rs.getString(columnName); + if (res == null) { + return null; + } + return TableFormat.valueOf(res); + } + + @Override + public TableFormat getResult(ResultSet rs, int columnIndex) throws SQLException { + String res = rs.getString(columnIndex); + if (res == null) { + return null; + } + return TableFormat.valueOf(res); + } + + @Override + public TableFormat getResult(CallableStatement cs, int columnIndex) throws SQLException { + String res = cs.getString(columnIndex); + if (res == null) { + return null; + } + return TableFormat.valueOf(res); + } +} diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/persistence/mapper/TableMetaMapper.java b/amoro-ams/src/main/java/org/apache/amoro/server/persistence/mapper/TableMetaMapper.java index f056dcb5e8..ead0b70066 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/persistence/mapper/TableMetaMapper.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/persistence/mapper/TableMetaMapper.java @@ -19,13 +19,15 @@ package org.apache.amoro.server.persistence.mapper; import org.apache.amoro.ServerTableIdentifier; +import org.apache.amoro.server.persistence.TableRuntimeMeta; import org.apache.amoro.server.persistence.converter.JsonObjectConverter; import org.apache.amoro.server.persistence.converter.Long2TsConverter; import org.apache.amoro.server.persistence.converter.Map2StringConverter; import org.apache.amoro.server.persistence.converter.MapLong2StringConverter; +import org.apache.amoro.server.persistence.converter.OptimizingStatusConverter; +import org.apache.amoro.server.persistence.converter.TableFormatConverter; import org.apache.amoro.server.table.TableMetadata; import org.apache.amoro.server.table.TableRuntime; -import org.apache.amoro.server.table.TableRuntimeMeta; import org.apache.ibatis.annotations.Delete; import org.apache.ibatis.annotations.Insert; import org.apache.ibatis.annotations.Options; @@ -77,7 +79,10 @@ Integer decTableCount( @Result(property = "tableIdentifier.tableName", column = "table_name"), @Result(property = "tableIdentifier.database", column = "db_name"), @Result(property = "tableIdentifier.catalog", column = "catalog_name"), - @Result(property = "tableIdentifier.format", column = "format"), + @Result( + property = "tableIdentifier.format", + column = "format", + typeHandler = TableFormatConverter.class), @Result(property = "primaryKey", column = "primary_key"), @Result(property = "tableLocation", column = "table_location"), @Result(property = "baseLocation", column = "base_location"), @@ -112,7 +117,10 @@ Integer decTableCount( @Result(property = "tableIdentifier.catalog", column = "catalog_name"), @Result(property = "tableIdentifier.database", column = "db_name"), @Result(property = "tableIdentifier.tableName", column = "table_name"), - @Result(property = "tableIdentifier.format", column = "format"), + @Result( + property = "tableIdentifier.format", + column = "format", + typeHandler = TableFormatConverter.class), @Result(property = "primaryKey", column = "primary_key"), @Result(property = "tableLocation", column = "table_location"), @Result(property = "baseLocation", column = "base_location"), @@ -182,7 +190,10 @@ int commitTableChange( @Result(property = "tableIdentifier.catalog", column = "catalog_name"), @Result(property = "tableIdentifier.database", column = "db_name"), @Result(property = "tableIdentifier.tableName", column = "table_name"), - @Result(property = "tableIdentifier.format", column = "format"), + @Result( + property = "tableIdentifier.format", + column = "format", + typeHandler = TableFormatConverter.class), @Result(property = "primaryKey", column = "primary_key"), @Result(property = "tableLocation", column = "table_location"), @Result(property = "baseLocation", column = "base_location"), @@ -217,7 +228,10 @@ int commitTableChange( @Result(property = "tableIdentifier.catalog", column = "catalog_name"), @Result(property = "tableIdentifier.database", column = "db_name"), @Result(property = "tableIdentifier.tableName", column = "table_name"), - @Result(property = "tableIdentifier.format", column = "format"), + @Result( + property = "tableIdentifier.format", + column = "format", + typeHandler = TableFormatConverter.class), @Result(property = "primaryKey", column = "primary_key"), @Result(property = "tableLocation", column = "table_location"), @Result(property = "baseLocation", column = "base_location"), @@ -244,7 +258,7 @@ TableMetadata selectTableMetaByName( @Insert( "INSERT INTO table_identifier(catalog_name, db_name, table_name, format) VALUES(" + " #{tableIdentifier.catalog}, #{tableIdentifier.database}, #{tableIdentifier.tableName}, " - + " #{tableIdentifier.format})") + + " #{tableIdentifier.format, typeHandler=org.apache.amoro.server.persistence.converter.TableFormatConverter})") @Options(useGeneratedKeys = true, keyProperty = "tableIdentifier.id") void insertTable(@Param("tableIdentifier") ServerTableIdentifier tableIdentifier); @@ -267,7 +281,7 @@ Integer deleteTableIdByName( @Result(property = "tableName", column = "table_name"), @Result(property = "database", column = "db_name"), @Result(property = "catalog", column = "catalog_name"), - @Result(property = "format", column = "format"), + @Result(property = "format", column = "format", typeHandler = TableFormatConverter.class) }) ServerTableIdentifier selectTableIdentifier( @Param("catalogName") String catalogName, @@ -282,7 +296,7 @@ ServerTableIdentifier selectTableIdentifier( @Result(property = "catalog", column = "catalog_name"), @Result(property = "database", column = "db_name"), @Result(property = "tableName", column = "table_name"), - @Result(property = "format", column = "format") + @Result(property = "format", column = "format", typeHandler = TableFormatConverter.class) }) List selectTableIdentifiersByDb( @Param("catalogName") String catalogName, @Param("databaseName") String databaseName); @@ -295,7 +309,7 @@ List selectTableIdentifiersByDb( @Result(property = "catalog", column = "catalog_name"), @Result(property = "database", column = "db_name"), @Result(property = "tableName", column = "table_name"), - @Result(property = "format", column = "format") + @Result(property = "format", column = "format", typeHandler = TableFormatConverter.class) }) List selectTableIdentifiersByCatalog( @Param("catalogName") String catalogName); @@ -306,7 +320,7 @@ List selectTableIdentifiersByCatalog( @Result(property = "catalog", column = "catalog_name"), @Result(property = "database", column = "db_name"), @Result(property = "tableName", column = "table_name"), - @Result(property = "format", column = "format") + @Result(property = "format", column = "format", typeHandler = TableFormatConverter.class) }) List selectAllTableIdentifiers(); @@ -321,7 +335,8 @@ List selectTableIdentifiersByCatalog( + " typeHandler=org.apache.amoro.server.persistence.converter.Long2TsConverter}," + " last_full_optimizing_time = #{runtime.lastFullOptimizingTime," + " typeHandler=org.apache.amoro.server.persistence.converter.Long2TsConverter}," - + " optimizing_status = #{runtime.optimizingStatus}," + + " optimizing_status_code = #{runtime.optimizingStatus," + + "typeHandler=org.apache.amoro.server.persistence.converter.OptimizingStatusConverter}," + " optimizing_status_start_time = #{runtime.currentStatusStartTime," + " typeHandler=org.apache.amoro.server.persistence.converter.Long2TsConverter}," + " optimizing_process_id = #{runtime.processId}," @@ -342,7 +357,7 @@ List selectTableIdentifiersByCatalog( "INSERT INTO table_runtime (table_id, catalog_name, db_name, table_name, current_snapshot_id," + " current_change_snapshotId, last_optimized_snapshotId, last_optimized_change_snapshotId," + " last_major_optimizing_time, last_minor_optimizing_time," - + " last_full_optimizing_time, optimizing_status, optimizing_status_start_time, optimizing_process_id," + + " last_full_optimizing_time, optimizing_status_code, optimizing_status_start_time, optimizing_process_id," + " optimizer_group, table_config, pending_input, table_summary) VALUES" + " (#{runtime.tableIdentifier.id}, #{runtime.tableIdentifier.catalog}," + " #{runtime.tableIdentifier.database}, #{runtime.tableIdentifier.tableName}, #{runtime" @@ -354,7 +369,8 @@ List selectTableIdentifiersByCatalog( + " typeHandler=org.apache.amoro.server.persistence.converter.Long2TsConverter}," + " #{runtime.lastFullOptimizingTime," + " typeHandler=org.apache.amoro.server.persistence.converter.Long2TsConverter}," - + " #{runtime.optimizingStatus}," + + " #{runtime.optimizingStatus," + + " typeHandler=org.apache.amoro.server.persistence.converter.OptimizingStatusConverter}," + " #{runtime.currentStatusStartTime, " + " typeHandler=org.apache.amoro.server.persistence.converter.Long2TsConverter}," + " #{runtime.processId}, #{runtime.optimizerGroup}," @@ -367,10 +383,10 @@ List selectTableIdentifiersByCatalog( void insertTableRuntime(@Param("runtime") TableRuntime runtime); @Select( - "SELECT a.table_id, a.catalog_name, a.db_name, a.table_name, i.format, a.current_snapshot_id, a" - + ".current_change_snapshotId, a.last_optimized_snapshotId, a.last_optimized_change_snapshotId," - + " a.last_major_optimizing_time, a.last_minor_optimizing_time, a.last_full_optimizing_time, a.optimizing_status," - + " a.optimizing_status_start_time, a.optimizing_process_id," + "SELECT a.table_id, a.catalog_name, a.db_name, a.table_name, i.format, a.current_snapshot_id," + + " a.current_change_snapshotId, a.last_optimized_snapshotId, a.last_optimized_change_snapshotId," + + " a.last_major_optimizing_time, a.last_minor_optimizing_time, a.last_full_optimizing_time," + + " a.optimizing_status_code, a.optimizing_status_start_time, a.optimizing_process_id," + " a.optimizer_group, a.table_config, a.pending_input, a.table_summary, b.optimizing_type, b.target_snapshot_id," + " b.target_change_snapshot_id, b.plan_time, b.from_sequence, b.to_sequence FROM table_runtime a" + " INNER JOIN table_identifier i ON a.table_id = i.table_id " @@ -380,7 +396,7 @@ List selectTableIdentifiersByCatalog( @Result(property = "catalogName", column = "catalog_name"), @Result(property = "dbName", column = "db_name"), @Result(property = "tableName", column = "table_name"), - @Result(property = "format", column = "format"), + @Result(property = "format", column = "format", typeHandler = TableFormatConverter.class), @Result(property = "currentSnapshotId", column = "current_snapshot_id"), @Result(property = "currentChangeSnapshotId", column = "current_change_snapshotId"), @Result(property = "lastOptimizedSnapshotId", column = "last_optimized_snapshotId"), @@ -399,7 +415,10 @@ List selectTableIdentifiersByCatalog( property = "lastFullOptimizingTime", column = "last_full_optimizing_time", typeHandler = Long2TsConverter.class), - @Result(property = "tableStatus", column = "optimizing_status"), + @Result( + property = "tableStatus", + column = "optimizing_status_code", + typeHandler = OptimizingStatusConverter.class), @Result( property = "currentStatusStartTime", column = "optimizing_status_start_time", @@ -420,7 +439,7 @@ List selectTableIdentifiersByCatalog( typeHandler = JsonObjectConverter.class), @Result(property = "optimizingType", column = "optimizing_type"), @Result(property = "targetSnapshotId", column = "target_snapshot_id"), - @Result(property = "targetChangeSnapshotId", column = "target_change_napshot_id"), + @Result(property = "targetChangeSnapshotId", column = "target_change_snapshot_id"), @Result(property = "planTime", column = "plan_time", typeHandler = Long2TsConverter.class), @Result( property = "fromSequence", @@ -432,4 +451,78 @@ List selectTableIdentifiersByCatalog( typeHandler = MapLong2StringConverter.class) }) List selectTableRuntimeMetas(); + + @Select( + "") + @Results({ + @Result(property = "tableId", column = "table_id"), + @Result(property = "catalogName", column = "catalog_name"), + @Result(property = "dbName", column = "db_name"), + @Result(property = "tableName", column = "table_name"), + @Result(property = "currentSnapshotId", column = "current_snapshot_id"), + @Result(property = "currentChangeSnapshotId", column = "current_change_snapshotId"), + @Result(property = "lastOptimizedSnapshotId", column = "last_optimized_snapshotId"), + @Result( + property = "lastOptimizedChangeSnapshotId", + column = "last_optimized_change_snapshotId"), + @Result( + property = "lastMajorOptimizingTime", + column = "last_major_optimizing_time", + typeHandler = Long2TsConverter.class), + @Result( + property = "lastMinorOptimizingTime", + column = "last_minor_optimizing_time", + typeHandler = Long2TsConverter.class), + @Result( + property = "lastFullOptimizingTime", + column = "last_full_optimizing_time", + typeHandler = Long2TsConverter.class), + @Result( + property = "tableStatus", + column = "optimizing_status_code", + typeHandler = OptimizingStatusConverter.class), + @Result( + property = "currentStatusStartTime", + column = "optimizing_status_start_time", + typeHandler = Long2TsConverter.class), + @Result(property = "optimizingProcessId", column = "optimizing_process_id"), + @Result(property = "optimizerGroup", column = "optimizer_group"), + @Result( + property = "pendingInput", + column = "pending_input", + typeHandler = JsonObjectConverter.class), + @Result( + property = "tableSummary", + column = "table_summary", + typeHandler = JsonObjectConverter.class), + @Result( + property = "tableConfig", + column = "table_config", + typeHandler = JsonObjectConverter.class), + }) + List selectTableRuntimesForOptimizerGroup( + @Param("optimizerGroup") String optimizerGroup, + @Param("fuzzyDbName") String fuzzyDbName, + @Param("fuzzyTableName") String fuzzyTableName, + @Param("limitCount") int limitCount, + @Param("offsetNum") int offset); } diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/table/DefaultTableService.java b/amoro-ams/src/main/java/org/apache/amoro/server/table/DefaultTableService.java index d872f3ce3c..2ce56a069c 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/table/DefaultTableService.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/table/DefaultTableService.java @@ -42,6 +42,7 @@ import org.apache.amoro.server.manager.MetricManager; import org.apache.amoro.server.optimizing.OptimizingStatus; import org.apache.amoro.server.persistence.StatedPersistentBase; +import org.apache.amoro.server.persistence.TableRuntimeMeta; import org.apache.amoro.server.persistence.mapper.CatalogMetaMapper; import org.apache.amoro.server.persistence.mapper.TableBlockerMapper; import org.apache.amoro.server.persistence.mapper.TableMetaMapper; @@ -58,6 +59,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.annotation.Nullable; + +import java.util.ArrayList; import java.util.Comparator; import java.util.HashSet; import java.util.List; @@ -85,8 +89,7 @@ public class DefaultTableService extends StatedPersistentBase implements TableSe private final Map internalCatalogMap = new ConcurrentHashMap<>(); private final Map externalCatalogMap = new ConcurrentHashMap<>(); - private final Map tableRuntimeMap = - new ConcurrentHashMap<>(); + private final Map tableRuntimeMap = new ConcurrentHashMap<>(); private final ScheduledExecutorService tableExplorerScheduler = Executors.newSingleThreadScheduledExecutor( @@ -143,12 +146,6 @@ public ServerCatalog getServerCatalog(String catalogName) { .orElseThrow(() -> new ObjectNotExistsException("Catalog " + catalogName)); } - @Override - public InternalCatalog getInternalCatalog(String catalogName) { - return Optional.ofNullable(internalCatalogMap.get(catalogName)) - .orElseThrow(() -> new ObjectNotExistsException("Catalog " + catalogName)); - } - @Override public void createCatalog(CatalogMeta catalogMeta) { checkStarted(); @@ -213,7 +210,7 @@ public void dropTableMetadata(TableIdentifier tableIdentifier, boolean deleteDat } ServerTableIdentifier serverTableIdentifier = internalCatalog.dropTable(database, table); - Optional.ofNullable(tableRuntimeMap.remove(serverTableIdentifier)) + Optional.ofNullable(tableRuntimeMap.remove(serverTableIdentifier.getId())) .ifPresent( tableRuntime -> { if (headHandler != null) { @@ -344,6 +341,26 @@ public List getBlockers(TableIdentifier tableIdentifier) { .collect(Collectors.toList()); } + @Override + public List getTableRuntimes( + String optimizerGroup, + @Nullable String fuzzyDbName, + @Nullable String fuzzyTableName, + int limit, + int offset) { + checkStarted(); + return getAs( + TableMetaMapper.class, + mapper -> + mapper.selectTableRuntimesForOptimizerGroup( + optimizerGroup, fuzzyDbName, fuzzyTableName, limit, offset)); + } + + public InternalCatalog getInternalCatalog(String catalogName) { + return Optional.ofNullable(internalCatalogMap.get(catalogName)) + .orElseThrow(() -> new ObjectNotExistsException("Catalog " + catalogName)); + } + @Override public void addHandlerChain(RuntimeHandlerChain handler) { checkNotStarted(); @@ -376,15 +393,17 @@ public void initialize() { List tableRuntimeMetaList = getAs(TableMetaMapper.class, TableMetaMapper::selectTableRuntimeMetas); + List tableRuntimes = new ArrayList<>(tableRuntimeMetaList.size()); tableRuntimeMetaList.forEach( tableRuntimeMeta -> { - TableRuntime tableRuntime = tableRuntimeMeta.constructTableRuntime(this); - tableRuntimeMap.put(tableRuntime.getTableIdentifier(), tableRuntime); + TableRuntime tableRuntime = new TableRuntime(tableRuntimeMeta, this); + tableRuntimeMap.put(tableRuntimeMeta.getTableId(), tableRuntime); tableRuntime.registerMetric(MetricManager.getInstance().getGlobalRegistry()); + tableRuntimes.add(tableRuntime); }); if (headHandler != null) { - headHandler.initialize(tableRuntimeMetaList); + headHandler.initialize(tableRuntimes); } if (tableExplorerExecutors == null) { int threadCount = @@ -411,7 +430,7 @@ public void initialize() { private TableRuntime getAndCheckExist(ServerTableIdentifier tableIdentifier) { Preconditions.checkArgument(tableIdentifier != null, "tableIdentifier cannot be null"); - TableRuntime tableRuntime = getRuntime(tableIdentifier); + TableRuntime tableRuntime = getRuntime(tableIdentifier.getId()); if (tableRuntime == null) { throw new ObjectNotExistsException(tableIdentifier); } @@ -447,15 +466,15 @@ private ServerTableIdentifier getOrSyncServerTableIdentifier(TableIdentifier id) } @Override - public TableRuntime getRuntime(ServerTableIdentifier tableIdentifier) { + public TableRuntime getRuntime(Long tableId) { checkStarted(); - return tableRuntimeMap.get(tableIdentifier); + return tableRuntimeMap.get(tableId); } @Override - public boolean contains(ServerTableIdentifier tableIdentifier) { + public boolean contains(Long tableId) { checkStarted(); - return tableRuntimeMap.containsKey(tableIdentifier); + return tableRuntimeMap.containsKey(tableId); } public void dispose() { @@ -639,13 +658,13 @@ private boolean triggerTableAdded( AmoroTable table = catalog.loadTable( serverTableIdentifier.getDatabase(), serverTableIdentifier.getTableName()); - if (TableFormat.ICEBERG == table.format()) { + if (TableFormat.ICEBERG.equals(table.format())) { if (TablePropertyUtil.isMixedTableStore(table.properties())) { return false; } } TableRuntime tableRuntime = new TableRuntime(serverTableIdentifier, this, table.properties()); - tableRuntimeMap.put(serverTableIdentifier, tableRuntime); + tableRuntimeMap.put(serverTableIdentifier.getId(), tableRuntime); tableRuntime.registerMetric(MetricManager.getInstance().getGlobalRegistry()); if (headHandler != null) { headHandler.fireTableAdded(table, tableRuntime); @@ -659,7 +678,7 @@ private void revertTableRuntimeAdded( externalCatalog.getServerTableIdentifier( tableIdentity.getDatabase(), tableIdentity.getTableName()); if (tableIdentifier != null) { - tableRuntimeMap.remove(tableIdentifier); + tableRuntimeMap.remove(tableIdentifier.getId()); } } @@ -671,7 +690,7 @@ private void disposeTable(ServerTableIdentifier tableIdentifier) { tableIdentifier.getCatalog(), tableIdentifier.getDatabase(), tableIdentifier.getTableName())); - Optional.ofNullable(tableRuntimeMap.remove(tableIdentifier)) + Optional.ofNullable(tableRuntimeMap.remove(tableIdentifier.getId())) .ifPresent( tableRuntime -> { if (headHandler != null) { diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/table/RuntimeHandlerChain.java b/amoro-ams/src/main/java/org/apache/amoro/server/table/RuntimeHandlerChain.java index 51419878d6..7554eca0ae 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/table/RuntimeHandlerChain.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/table/RuntimeHandlerChain.java @@ -51,16 +51,15 @@ protected void appendNext(RuntimeHandlerChain handler) { } } - public final void initialize(List tableRuntimeMetaList) { - List supportedtableRuntimeMetaList = - tableRuntimeMetaList.stream() - .filter( - tableRuntimeMeta -> formatSupported(tableRuntimeMeta.getTableRuntime().getFormat())) + public final void initialize(List tableRuntimes) { + List supportedtableRuntimeList = + tableRuntimes.stream() + .filter(runtime -> formatSupported(runtime.getFormat())) .collect(Collectors.toList()); - initHandler(supportedtableRuntimeMetaList); + initHandler(supportedtableRuntimeList); initialized = true; if (next != null) { - next.initialize(tableRuntimeMetaList); + next.initialize(tableRuntimes); } } @@ -147,7 +146,7 @@ protected abstract void handleConfigChanged( protected abstract void handleTableRemoved(TableRuntime tableRuntime); - protected abstract void initHandler(List tableRuntimeMetaList); + protected abstract void initHandler(List tableRuntimeList); protected abstract void doDispose(); } diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/table/TableManager.java b/amoro-ams/src/main/java/org/apache/amoro/server/table/TableManager.java index 4c8c3a8d01..4735bca2d7 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/table/TableManager.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/table/TableManager.java @@ -31,9 +31,9 @@ public interface TableManager extends TableRuntimeHandler { */ AmoroTable loadTable(ServerTableIdentifier tableIdentifier); - TableRuntime getRuntime(ServerTableIdentifier tableIdentifier); + TableRuntime getRuntime(Long tableId); - default boolean contains(ServerTableIdentifier tableIdentifier) { - return getRuntime(tableIdentifier) != null; + default boolean contains(Long tableId) { + return getRuntime(tableId) != null; } } diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/table/TableRuntime.java b/amoro-ams/src/main/java/org/apache/amoro/server/table/TableRuntime.java index 475ab28f1d..3499b9306f 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/table/TableRuntime.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/table/TableRuntime.java @@ -33,6 +33,7 @@ import org.apache.amoro.server.optimizing.TaskRuntime; import org.apache.amoro.server.optimizing.plan.OptimizingEvaluator; import org.apache.amoro.server.persistence.StatedPersistentBase; +import org.apache.amoro.server.persistence.TableRuntimeMeta; import org.apache.amoro.server.persistence.mapper.OptimizingMapper; import org.apache.amoro.server.persistence.mapper.TableBlockerMapper; import org.apache.amoro.server.persistence.mapper.TableMetaMapper; @@ -94,7 +95,16 @@ public class TableRuntime extends StatedPersistentBase { private final TableOrphanFilesCleaningMetrics orphanFilesCleaningMetrics; private final TableSummaryMetrics tableSummaryMetrics; - protected TableRuntime( + private long targetSnapshotId; + + private long targetChangeSnapshotId; + + private Map fromSequence; + private Map toSequence; + + private OptimizingType optimizingType; + + public TableRuntime( ServerTableIdentifier tableIdentifier, TableRuntimeHandler tableHandler, Map properties) { @@ -110,9 +120,10 @@ protected TableRuntime( tableSummaryMetrics = new TableSummaryMetrics(tableIdentifier); } - protected TableRuntime(TableRuntimeMeta tableRuntimeMeta, TableRuntimeHandler tableHandler) { + public TableRuntime(TableRuntimeMeta tableRuntimeMeta, TableRuntimeHandler tableHandler) { Preconditions.checkNotNull(tableRuntimeMeta, "TableRuntimeMeta must not be null."); Preconditions.checkNotNull(tableHandler, "TableRuntimeHandler must not be null."); + this.tableHandler = tableHandler; this.tableIdentifier = ServerTableIdentifier.of( @@ -146,6 +157,12 @@ protected TableRuntime(TableRuntimeMeta tableRuntimeMeta, TableRuntimeHandler ta orphanFilesCleaningMetrics = new TableOrphanFilesCleaningMetrics(tableIdentifier); tableSummaryMetrics = new TableSummaryMetrics(tableIdentifier); tableSummaryMetrics.refresh(tableSummary); + + this.targetSnapshotId = tableRuntimeMeta.getTargetSnapshotId(); + this.targetChangeSnapshotId = tableRuntimeMeta.getTargetChangeSnapshotId(); + this.fromSequence = tableRuntimeMeta.getFromSequence(); + this.toSequence = tableRuntimeMeta.getToSequence(); + this.optimizingType = tableRuntimeMeta.getOptimizingType(); } public void recover(OptimizingProcess optimizingProcess) { @@ -531,6 +548,46 @@ public void setLastPlanTime(long lastPlanTime) { this.lastPlanTime = lastPlanTime; } + public long getTargetSnapshotId() { + return targetSnapshotId; + } + + public void setTargetSnapshotId(long targetSnapshotId) { + this.targetSnapshotId = targetSnapshotId; + } + + public long getTargetChangeSnapshotId() { + return targetChangeSnapshotId; + } + + public void setTargetChangeSnapshotId(long targetChangeSnapshotId) { + this.targetChangeSnapshotId = targetChangeSnapshotId; + } + + public Map getFromSequence() { + return fromSequence; + } + + public void setFromSequence(Map fromSequence) { + this.fromSequence = fromSequence; + } + + public Map getToSequence() { + return toSequence; + } + + public void setToSequence(Map toSequence) { + this.toSequence = toSequence; + } + + public long getProcessId() { + return processId; + } + + public OptimizingType getOptimizingType() { + return optimizingType; + } + @Override public String toString() { return MoreObjects.toStringHelper(this) @@ -544,6 +601,10 @@ public String toString() { .add("lastFullOptimizingTime", lastFullOptimizingTime) .add("lastMinorOptimizingTime", lastMinorOptimizingTime) .add("tableConfiguration", tableConfiguration) + .add("targetSnapshotId", targetSnapshotId) + .add("targetChangeSnapshotId", targetChangeSnapshotId) + .add("fromSequence", fromSequence) + .add("toSequence", toSequence) .toString(); } diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/table/TableService.java b/amoro-ams/src/main/java/org/apache/amoro/server/table/TableService.java index 1cb564ac18..5cbd257018 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/table/TableService.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/table/TableService.java @@ -23,6 +23,9 @@ import org.apache.amoro.api.Blocker; import org.apache.amoro.api.TableIdentifier; import org.apache.amoro.server.catalog.CatalogService; +import org.apache.amoro.server.persistence.TableRuntimeMeta; + +import javax.annotation.Nullable; import java.util.List; import java.util.Map; @@ -88,4 +91,22 @@ Blocker block( * @return block list */ List getBlockers(TableIdentifier tableIdentifier); + + /** + * Get the table info from database for given parameters. + * + * @param optimizerGroup The optimizer group of the table associated to. will be if we want the + * info for all groups. + * @param fuzzyDbName the fuzzy db name used to filter the result, will be null if no filter set. + * @param fuzzyTableName the fuzzy table name used to filter the result, will be null if no filter + * set. + * @param limit How many entries we want to retrieve. + * @param offset The entries we'll skip when retrieving the entries. + */ + List getTableRuntimes( + String optimizerGroup, + @Nullable String fuzzyDbName, + @Nullable String fuzzyTableName, + int limit, + int offset); } diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/table/TableSummaryMetrics.java b/amoro-ams/src/main/java/org/apache/amoro/server/table/TableSummaryMetrics.java index 748f400eee..4ad4f55d17 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/table/TableSummaryMetrics.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/table/TableSummaryMetrics.java @@ -62,6 +62,12 @@ public class TableSummaryMetrics { .withTags("catalog", "database", "table") .build(); + public static final MetricDefine TABLE_SUMMARY_DANGLING_DELETE_FILES = + defineGauge("table_summary_dangling_delete_files") + .withDescription("Number of dangling delete files in the table") + .withTags("catalog", "database", "table") + .build(); + // table summary files size metrics public static final MetricDefine TABLE_SUMMARY_TOTAL_FILES_SIZE = defineGauge("table_summary_total_files_size") @@ -128,22 +134,10 @@ public class TableSummaryMetrics { private final ServerTableIdentifier identifier; private final List registeredMetricKeys = Lists.newArrayList(); + private OptimizingEvaluator.PendingInput tableSummary = new OptimizingEvaluator.PendingInput(); private MetricRegistry globalRegistry; - private long totalFiles = 0L; - private long dataFiles = 0L; - private long positionDeleteFiles = 0L; - private long equalityDeleteFiles = 0L; - private long totalFilesSize = 0L; - private long positionDeleteFilesSize = 0L; - private long dataFilesSize = 0L; - private long equalityDeleteFilesSize = 0L; - private long positionDeleteFilesRecords = 0L; - private long totalRecords = 0L; - private long dataFilesRecords = 0L; - private long equalityDeleteFilesRecords = 0L; private long snapshots = 0L; - private long healthScore = -1L; // -1 means not calculated public TableSummaryMetrics(ServerTableIdentifier identifier) { this.identifier = identifier; @@ -167,44 +161,72 @@ private void registerMetric(MetricRegistry registry, MetricDefine define, Metric public void register(MetricRegistry registry) { if (globalRegistry == null) { // register files number metrics - registerMetric(registry, TABLE_SUMMARY_TOTAL_FILES, (Gauge) () -> totalFiles); - registerMetric(registry, TABLE_SUMMARY_DATA_FILES, (Gauge) () -> dataFiles); registerMetric( - registry, TABLE_SUMMARY_POSITION_DELETE_FILES, (Gauge) () -> positionDeleteFiles); + registry, + TABLE_SUMMARY_TOTAL_FILES, + (Gauge) () -> (long) tableSummary.getTotalFileCount()); + registerMetric( + registry, + TABLE_SUMMARY_DATA_FILES, + (Gauge) () -> (long) tableSummary.getDataFileCount()); + registerMetric( + registry, + TABLE_SUMMARY_POSITION_DELETE_FILES, + (Gauge) () -> (long) tableSummary.getPositionalDeleteFileCount()); + registerMetric( + registry, + TABLE_SUMMARY_EQUALITY_DELETE_FILES, + (Gauge) () -> (long) tableSummary.getEqualityDeleteFileCount()); registerMetric( - registry, TABLE_SUMMARY_EQUALITY_DELETE_FILES, (Gauge) () -> equalityDeleteFiles); + registry, + TABLE_SUMMARY_DANGLING_DELETE_FILES, + (Gauge) () -> (long) tableSummary.getDanglingDeleteFileCount()); // register files size metrics - registerMetric(registry, TABLE_SUMMARY_TOTAL_FILES_SIZE, (Gauge) () -> totalFilesSize); - registerMetric(registry, TABLE_SUMMARY_DATA_FILES_SIZE, (Gauge) () -> dataFilesSize); + registerMetric( + registry, + TABLE_SUMMARY_TOTAL_FILES_SIZE, + (Gauge) () -> tableSummary.getTotalFileSize()); + registerMetric( + registry, + TABLE_SUMMARY_DATA_FILES_SIZE, + (Gauge) () -> tableSummary.getDataFileSize()); registerMetric( registry, TABLE_SUMMARY_POSITION_DELETE_FILES_SIZE, - (Gauge) () -> positionDeleteFilesSize); + (Gauge) () -> tableSummary.getPositionalDeleteBytes()); registerMetric( registry, TABLE_SUMMARY_EQUALITY_DELETE_FILES_SIZE, - (Gauge) () -> equalityDeleteFilesSize); + (Gauge) () -> tableSummary.getEqualityDeleteBytes()); // register files records metrics - registerMetric(registry, TABLE_SUMMARY_TOTAL_RECORDS, (Gauge) () -> totalRecords); registerMetric( - registry, TABLE_SUMMARY_DATA_FILES_RECORDS, (Gauge) () -> dataFilesRecords); + registry, + TABLE_SUMMARY_TOTAL_RECORDS, + (Gauge) () -> tableSummary.getTotalFileRecords()); + registerMetric( + registry, + TABLE_SUMMARY_DATA_FILES_RECORDS, + (Gauge) () -> tableSummary.getDataFileRecords()); registerMetric( registry, TABLE_SUMMARY_POSITION_DELETE_FILES_RECORDS, - (Gauge) () -> positionDeleteFilesRecords); + (Gauge) () -> tableSummary.getPositionalDeleteFileRecords()); registerMetric( registry, TABLE_SUMMARY_EQUALITY_DELETE_FILES_RECORDS, - (Gauge) () -> equalityDeleteFilesRecords); + (Gauge) () -> tableSummary.getEqualityDeleteFileRecords()); + + // register health score metric + registerMetric( + registry, + TABLE_SUMMARY_HEALTH_SCORE, + (Gauge) () -> (long) tableSummary.getHealthScore()); // register snapshots number metric registerMetric(registry, TABLE_SUMMARY_SNAPSHOTS, (Gauge) () -> snapshots); - // register health score metric - registerMetric(registry, TABLE_SUMMARY_HEALTH_SCORE, (Gauge) () -> healthScore); - globalRegistry = registry; } } @@ -219,31 +241,7 @@ public void refresh(OptimizingEvaluator.PendingInput tableSummary) { if (tableSummary == null) { return; } - totalFiles = - tableSummary.getDataFileCount() - + tableSummary.getEqualityDeleteFileCount() - + tableSummary.getPositionalDeleteFileCount(); - dataFiles = tableSummary.getDataFileCount(); - positionDeleteFiles = tableSummary.getPositionalDeleteFileCount(); - equalityDeleteFiles = tableSummary.getEqualityDeleteFileCount(); - - totalFilesSize = - tableSummary.getDataFileSize() - + tableSummary.getEqualityDeleteBytes() - + tableSummary.getPositionalDeleteBytes(); - positionDeleteFilesSize = tableSummary.getPositionalDeleteBytes(); - dataFilesSize = tableSummary.getDataFileSize(); - equalityDeleteFilesSize = tableSummary.getEqualityDeleteBytes(); - - totalRecords = - tableSummary.getDataFileRecords() - + tableSummary.getEqualityDeleteFileRecords() - + tableSummary.getPositionalDeleteFileRecords(); - positionDeleteFilesRecords = tableSummary.getPositionalDeleteFileRecords(); - dataFilesRecords = tableSummary.getDataFileRecords(); - equalityDeleteFilesRecords = tableSummary.getEqualityDeleteFileRecords(); - - healthScore = tableSummary.getHealthScore(); + this.tableSummary = tableSummary; } public void refreshSnapshots(MixedTable table) { diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/table/executor/BaseTableExecutor.java b/amoro-ams/src/main/java/org/apache/amoro/server/table/executor/BaseTableExecutor.java index e2125fbfb4..ae70cadaa5 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/table/executor/BaseTableExecutor.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/table/executor/BaseTableExecutor.java @@ -25,7 +25,6 @@ import org.apache.amoro.server.table.RuntimeHandlerChain; import org.apache.amoro.server.table.TableManager; import org.apache.amoro.server.table.TableRuntime; -import org.apache.amoro.server.table.TableRuntimeMeta; import org.apache.amoro.shade.guava32.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; @@ -63,10 +62,9 @@ protected BaseTableExecutor(TableManager tableManager, int poolSize) { } @Override - protected void initHandler(List tableRuntimeMetaList) { - tableRuntimeMetaList.stream() - .map(tableRuntimeMeta -> tableRuntimeMeta.getTableRuntime()) - .filter(tableRuntime -> enabled(tableRuntime)) + protected void initHandler(List tableRuntimeList) { + tableRuntimeList.stream() + .filter(this::enabled) .forEach( tableRuntime -> { if (scheduledTables.add(tableRuntime.getTableIdentifier())) { @@ -109,7 +107,8 @@ protected String getThreadName() { } private boolean isExecutable(TableRuntime tableRuntime) { - return tableManager.contains(tableRuntime.getTableIdentifier()) && enabled(tableRuntime); + return tableManager.contains(tableRuntime.getTableIdentifier().getId()) + && enabled(tableRuntime); } @Override diff --git a/amoro-ams/src/main/java/org/apache/amoro/server/utils/InternalTableUtil.java b/amoro-ams/src/main/java/org/apache/amoro/server/utils/InternalTableUtil.java index 928fbce979..5d5ab39d0b 100644 --- a/amoro-ams/src/main/java/org/apache/amoro/server/utils/InternalTableUtil.java +++ b/amoro-ams/src/main/java/org/apache/amoro/server/utils/InternalTableUtil.java @@ -57,7 +57,7 @@ public class InternalTableUtil { */ public static boolean isLegacyMixedIceberg( org.apache.amoro.server.table.TableMetadata internalTableMetadata) { - return TableFormat.MIXED_ICEBERG == internalTableMetadata.getFormat() + return TableFormat.MIXED_ICEBERG.equals(internalTableMetadata.getFormat()) && !Boolean.parseBoolean( internalTableMetadata.getProperties().get(MIXED_ICEBERG_BASED_REST)); } diff --git a/amoro-ams/src/main/resources/derby/ams-derby-init.sql b/amoro-ams/src/main/resources/derby/ams-derby-init.sql index f41cdaf276..ca874b317e 100644 --- a/amoro-ams/src/main/resources/derby/ams-derby-init.sql +++ b/amoro-ams/src/main/resources/derby/ams-derby-init.sql @@ -111,7 +111,7 @@ CREATE TABLE table_runtime ( last_major_optimizing_time TIMESTAMP, last_minor_optimizing_time TIMESTAMP, last_full_optimizing_time TIMESTAMP, - optimizing_status VARCHAR(20) DEFAULT 'IDLE', + optimizing_status_code INT DEFAULT 700, optimizing_status_start_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP, optimizing_process_id BIGINT NOT NULL, optimizer_group VARCHAR(64) NOT NULL, diff --git a/amoro-ams/src/main/resources/mysql/ams-mysql-init.sql b/amoro-ams/src/main/resources/mysql/ams-mysql-init.sql index 76e16147a1..3db61746b7 100644 --- a/amoro-ams/src/main/resources/mysql/ams-mysql-init.sql +++ b/amoro-ams/src/main/resources/mysql/ams-mysql-init.sql @@ -122,7 +122,7 @@ CREATE TABLE `table_runtime` `last_major_optimizing_time` timestamp NULL DEFAULT NULL COMMENT 'Latest Major Optimize time for all partitions', `last_minor_optimizing_time` timestamp NULL DEFAULT NULL COMMENT 'Latest Minor Optimize time for all partitions', `last_full_optimizing_time` timestamp NULL DEFAULT NULL COMMENT 'Latest Full Optimize time for all partitions', - `optimizing_status` varchar(20) DEFAULT 'IDLE' COMMENT 'Table optimize status: FULL_OPTIMIZING, MAJOR_OPTIMIZING, MINOR_OPTIMIZING, COMMITTING, PENDING, IDLE', + `optimizing_status_code` int DEFAULT 700 COMMENT 'Table optimize status code: 100(FULL_OPTIMIZING), 200(MAJOR_OPTIMIZING), 300(MINOR_OPTIMIZING), 400(COMMITTING), 500(PLANING), 600(PENDING), 700(IDLE)', `optimizing_status_start_time` timestamp default CURRENT_TIMESTAMP COMMENT 'Table optimize status start time', `optimizing_process_id` bigint(20) NOT NULL COMMENT 'optimizing_procedure UUID', `optimizer_group` varchar(64) NOT NULL, @@ -131,7 +131,8 @@ CREATE TABLE `table_runtime` `pending_input` mediumtext, `table_summary` mediumtext, PRIMARY KEY (`table_id`), - UNIQUE KEY `table_index` (`catalog_name`,`db_name`,`table_name`) + UNIQUE KEY `table_index` (`catalog_name`,`db_name`,`table_name`), + INDEX idx_optimizer_status_and_time (optimizing_status_code, optimizing_status_start_time DESC) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT 'Optimize running information of each table' ROW_FORMAT=DYNAMIC; CREATE TABLE `table_optimizing_process` diff --git a/amoro-ams/src/main/resources/mysql/upgrade.sql b/amoro-ams/src/main/resources/mysql/upgrade.sql index 31ab7604da..d075e0f7ca 100644 --- a/amoro-ams/src/main/resources/mysql/upgrade.sql +++ b/amoro-ams/src/main/resources/mysql/upgrade.sql @@ -23,4 +23,29 @@ ALTER TABLE `table_blocker` ADD COLUMN `prev_blocker_id` bigint(20) NOT NULL DEF ALTER TABLE `table_blocker` ADD UNIQUE KEY `uq_prev` (`catalog_name`,`db_name`,`table_name`, `prev_blocker_id`); -- ADD COLUMN table_summary FOR TABLE_RUNTIME -ALTER TABLE `table_runtime` ADD COLUMN `table_summary` mediumtext AFTER `pending_input`; \ No newline at end of file +ALTER TABLE `table_runtime` ADD COLUMN `table_summary` mediumtext AFTER `pending_input`; + +RENAME TABLE table_runtime TO table_runtime_backup; +CREATE TABLE table_runtime LIKE table_runtime_backup; + +ALTER TABLE table_runtime CHANGE COLUMN optimizing_status optimizing_status_code INT DEFAULT 700; +CREATE INDEX idx_optimizer_status_and_time ON table_runtime(optimizing_status_code, optimizing_status_start_time DESC); + +INSERT INTO table_runtime( + `table_id`,`catalog_name`, `db_name`, `table_name`, `current_snapshot_id`,`current_change_snapshotId`, `last_optimized_snapshotId`, + `last_optimized_change_snapshotId`, `last_major_optimizing_time`, `last_minor_optimizing_time`, `last_full_optimizing_time`, + `optimizing_status_code`, `optimizing_status_start_time`, `optimizing_process_id`, `optimizer_group`, `table_config`, + `optimizing_config`, `pending_input`, `table_summary`) +SELECT `table_id`,`catalog_name`, `db_name`, `table_name`, `current_snapshot_id`,`current_change_snapshotId`, `last_optimized_snapshotId`, + `last_optimized_change_snapshotId`, `last_major_optimizing_time`, `last_minor_optimizing_time`, `last_full_optimizing_time`, + CASE + WHEN `optimizing_status` = 'IDLE' THEN 700 + WHEN `optimizing_status` = 'PENDING' THEN 600 + WHEN `optimizing_status` = 'PLANNING' THEN 500 + WHEN `optimizing_status` = 'COMMITTING' THEN 400 + WHEN `optimizing_status` = 'MINOR_OPTIMIZING' THEN 300 + WHEN `optimizing_status` = 'MAJOR_OPTIMIZING' THEN 200 + WHEN `optimizing_status` = 'FULL_OPTIMIZING' THEN 100 + END, + `optimizing_status_start_time`, `optimizing_process_id`, `optimizer_group`, `table_config`, `optimizing_config`, `pending_input`, `table_summary` +FROM table_runtime_backup; \ No newline at end of file diff --git a/amoro-ams/src/main/resources/postgres/ams-postgres-init.sql b/amoro-ams/src/main/resources/postgres/ams-postgres-init.sql index 83c3f6d0fe..a7d0ccf6cf 100644 --- a/amoro-ams/src/main/resources/postgres/ams-postgres-init.sql +++ b/amoro-ams/src/main/resources/postgres/ams-postgres-init.sql @@ -182,7 +182,7 @@ CREATE TABLE table_runtime last_major_optimizing_time TIMESTAMP, last_minor_optimizing_time TIMESTAMP, last_full_optimizing_time TIMESTAMP, - optimizing_status VARCHAR(20) DEFAULT 'IDLE', + optimizing_status_code INT DEFAULT 700, optimizing_status_start_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP, optimizing_process_id BIGINT NOT NULL, optimizer_group VARCHAR(64) NOT NULL, @@ -205,7 +205,8 @@ COMMENT ON COLUMN table_runtime.last_optimized_change_snapshotId IS 'Last optimi COMMENT ON COLUMN table_runtime.last_major_optimizing_time IS 'Latest Major Optimize time for all partitions'; COMMENT ON COLUMN table_runtime.last_minor_optimizing_time IS 'Latest Minor Optimize time for all partitions'; COMMENT ON COLUMN table_runtime.last_full_optimizing_time IS 'Latest Full Optimize time for all partitions'; -COMMENT ON COLUMN table_runtime.optimizing_status IS 'Table optimize status: FULL_OPTIMIZING, MAJOR_OPTIMIZING, MINOR_OPTIMIZING, COMMITTING, PENDING, IDLE'; +COMMENT ON COLUMN table_runtime.optimizing_status_code IS 'Table optimize status code: 100(FULL_OPTIMIZING),' || + ' 200(MAJOR_OPTIMIZING), 300(MINOR_OPTIMIZING), 400(COMMITTING), 500(PLANING), 600(PENDING), 700(IDLE)'; COMMENT ON COLUMN table_runtime.optimizing_status_start_time IS 'Table optimize status start time'; COMMENT ON COLUMN table_runtime.optimizing_process_id IS 'Optimizing procedure UUID'; COMMENT ON COLUMN table_runtime.optimizer_group IS 'Optimizer group'; @@ -213,6 +214,7 @@ COMMENT ON COLUMN table_runtime.table_config IS 'Table-specific configuration'; COMMENT ON COLUMN table_runtime.optimizing_config IS 'Optimizing configuration'; COMMENT ON COLUMN table_runtime.pending_input IS 'Pending input data'; COMMENT ON COLUMN table_runtime.table_summary IS 'Table summary data'; +CREATE INDEX idx_optimizer_status_and_time ON table_runtime(optimizing_status_code, optimizing_status_start_time DESC); CREATE TABLE table_optimizing_process ( diff --git a/amoro-ams/src/main/resources/postgres/upgrade-0.7.0-to-0.7.1.sql b/amoro-ams/src/main/resources/postgres/upgrade-0.7.0-to-0.7.1.sql new file mode 100644 index 0000000000..96ef432160 --- /dev/null +++ b/amoro-ams/src/main/resources/postgres/upgrade-0.7.0-to-0.7.1.sql @@ -0,0 +1,39 @@ +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. + +RENAME TABLE table_runtime TO table_runtime_backup; +CREATE TABLE table_runtime LIKE table_runtime_backup; + +ALTER TABLE table_runtime CHANGE COLUMN optimizing_status optimizing_status_code INT DEFAULT 7; +CREATE INDEX idx_optimizer_status_and_time ON table_runtime(optimizing_status_code, optimizing_status_start_time DESC); + +INSERT INTO table_runtime( + `table_id`,`catalog_name`, `db_name`, `table_name`, `current_snapshot_id`,`current_change_snapshotId`, `last_optimized_snapshotId`, + `last_optimized_change_snapshotId`, `last_major_optimizing_time`, `last_minor_optimizing_time`, `last_full_optimizing_time`, + `optimizing_status_code`, `optimizing_status_start_time`, `optimizing_process_id`, `optimizer_group`, `table_config`, + `optimizing_config`, `pending_input`) +SELECT `table_id`,`catalog_name`, `db_name`, `table_name`, `current_snapshot_id`,`current_change_snapshotId`, `last_optimized_snapshotId`, + `last_optimized_change_snapshotId`, `last_major_optimizing_time`, `last_minor_optimizing_time`, `last_full_optimizing_time`, + CASE + WHEN `optimizing_status` = 'IDLE' THEN 700 + WHEN `optimizing_status` = 'PENDING' THEN 600 + WHEN `optimizing_status` = 'PLANNING' THEN 500 + WHEN `optimizing_status` = 'COMMITTING' THEN 400 + WHEN `optimizing_status` = 'MINOR_OPTIMIZING' THEN 300 + WHEN `optimizing_status` = 'MAJOR_OPTIMIZING' THEN 200 + WHEN `optimizing_status` = 'FULL_OPTIMIZING' THEN 100 + END, + `optimizing_status_start_time`, `optimizing_process_id`, `optimizer_group`, `table_config`, `optimizing_config`, `pending_input` +FROM table_runtime_backup; \ No newline at end of file diff --git a/amoro-ams/src/test/java/org/apache/amoro/server/RestCatalogServiceTestBase.java b/amoro-ams/src/test/java/org/apache/amoro/server/RestCatalogServiceTestBase.java index 63b2073aaf..99b1853fa7 100644 --- a/amoro-ams/src/test/java/org/apache/amoro/server/RestCatalogServiceTestBase.java +++ b/amoro-ams/src/test/java/org/apache/amoro/server/RestCatalogServiceTestBase.java @@ -120,7 +120,7 @@ protected TableMetadata getTableMetadata(TableIdentifier identifier) { protected TableRuntime getTableRuntime(TableIdentifier identifier) { ServerTableIdentifier serverTableIdentifier = getServerTableIdentifier(identifier); - return tableService.getRuntime(serverTableIdentifier); + return tableService.getRuntime(serverTableIdentifier.getId()); } protected void assertTableRuntime(TableIdentifier identifier, TableFormat format) { diff --git a/amoro-ams/src/test/java/org/apache/amoro/server/TestDefaultOptimizingService.java b/amoro-ams/src/test/java/org/apache/amoro/server/TestDefaultOptimizingService.java index 776a7a72cb..5a237700d7 100644 --- a/amoro-ams/src/test/java/org/apache/amoro/server/TestDefaultOptimizingService.java +++ b/amoro-ams/src/test/java/org/apache/amoro/server/TestDefaultOptimizingService.java @@ -114,7 +114,7 @@ private void initTableWithFiles() { (MixedTable) tableService().loadTable(serverTableIdentifier()).originalTable(); appendData(mixedTable.asUnkeyedTable(), 1); appendData(mixedTable.asUnkeyedTable(), 2); - TableRuntime runtime = tableService().getRuntime(serverTableIdentifier()); + TableRuntime runtime = tableService().getRuntime(serverTableIdentifier().getId()); runtime.refresh(tableService().loadTable(serverTableIdentifier())); } @@ -387,10 +387,13 @@ private void assertTaskCompleted(TaskRuntime taskRuntime) { 0, optimizingService().listTasks(defaultResourceGroup().getName()).size()); Assertions.assertEquals( OptimizingProcess.Status.RUNNING, - tableService().getRuntime(serverTableIdentifier()).getOptimizingProcess().getStatus()); + tableService() + .getRuntime(serverTableIdentifier().getId()) + .getOptimizingProcess() + .getStatus()); Assertions.assertEquals( OptimizingStatus.COMMITTING, - tableService().getRuntime(serverTableIdentifier()).getOptimizingStatus()); + tableService().getRuntime(serverTableIdentifier().getId()).getOptimizingStatus()); } protected void reload() { @@ -415,7 +418,7 @@ public TableRuntimeRefresher() { } void refreshPending() { - execute(tableService().getRuntime(serverTableIdentifier())); + execute(tableService().getRuntime(serverTableIdentifier().getId())); } } diff --git a/amoro-ams/src/test/java/org/apache/amoro/server/dashboard/TestOverviewCache.java b/amoro-ams/src/test/java/org/apache/amoro/server/dashboard/TestOverviewCache.java index 96a8d789c1..1b59308b9d 100644 --- a/amoro-ams/src/test/java/org/apache/amoro/server/dashboard/TestOverviewCache.java +++ b/amoro-ams/src/test/java/org/apache/amoro/server/dashboard/TestOverviewCache.java @@ -92,7 +92,7 @@ private void initTableWithFiles() { .asUnkeyedTable(); appendData(table, 1); appendData(table, 2); - TableRuntime runtime = tableService().getRuntime(serverTableIdentifier()); + TableRuntime runtime = tableService().getRuntime(serverTableIdentifier().getId()); runtime.refresh(tableService().loadTable(serverTableIdentifier())); } @@ -110,7 +110,7 @@ private void appendData(UnkeyedTable table, int id) { void refreshPending() { TableRuntimeRefreshExecutor refresher = new TableRuntimeRefreshExecutor(tableService(), 1, Integer.MAX_VALUE); - refresher.execute(tableService().getRuntime(serverTableIdentifier())); + refresher.execute(tableService().getRuntime(serverTableIdentifier().getId())); refresher.dispose(); } diff --git a/amoro-ams/src/test/java/org/apache/amoro/server/optimizing/OptimizingStatusTest.java b/amoro-ams/src/test/java/org/apache/amoro/server/optimizing/OptimizingStatusTest.java new file mode 100644 index 0000000000..cadce03273 --- /dev/null +++ b/amoro-ams/src/test/java/org/apache/amoro/server/optimizing/OptimizingStatusTest.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.amoro.server.optimizing; + +import static org.junit.Assert.assertEquals; + +import org.junit.Test; + +public class OptimizingStatusTest { + @Test + public void testOptimizingStatusCodeValue() { + assertEquals(7, OptimizingStatus.values().length); + + assertEquals(OptimizingStatus.FULL_OPTIMIZING, OptimizingStatus.ofCode(100)); + assertEquals(OptimizingStatus.MAJOR_OPTIMIZING, OptimizingStatus.ofCode(200)); + assertEquals(OptimizingStatus.MINOR_OPTIMIZING, OptimizingStatus.ofCode(300)); + assertEquals(OptimizingStatus.COMMITTING, OptimizingStatus.ofCode(400)); + assertEquals(OptimizingStatus.PLANNING, OptimizingStatus.ofCode(500)); + assertEquals(OptimizingStatus.PENDING, OptimizingStatus.ofCode(600)); + assertEquals(OptimizingStatus.IDLE, OptimizingStatus.ofCode(700)); + } +} diff --git a/amoro-ams/src/test/java/org/apache/amoro/server/optimizing/TestOptimizingQueue.java b/amoro-ams/src/test/java/org/apache/amoro/server/optimizing/TestOptimizingQueue.java index 0032af9bc1..24ce7c0366 100644 --- a/amoro-ams/src/test/java/org/apache/amoro/server/optimizing/TestOptimizingQueue.java +++ b/amoro-ams/src/test/java/org/apache/amoro/server/optimizing/TestOptimizingQueue.java @@ -46,13 +46,13 @@ import org.apache.amoro.resource.ResourceGroup; import org.apache.amoro.server.manager.MetricManager; import org.apache.amoro.server.metrics.MetricRegistry; +import org.apache.amoro.server.persistence.TableRuntimeMeta; import org.apache.amoro.server.resource.OptimizerInstance; import org.apache.amoro.server.resource.OptimizerThread; import org.apache.amoro.server.resource.QuotaProvider; import org.apache.amoro.server.table.AMSTableTestBase; import org.apache.amoro.server.table.TableConfigurations; import org.apache.amoro.server.table.TableRuntime; -import org.apache.amoro.server.table.TableRuntimeMeta; import org.apache.amoro.shade.guava32.com.google.common.collect.ImmutableMap; import org.apache.amoro.shade.guava32.com.google.common.collect.Lists; import org.apache.amoro.table.MixedTable; @@ -105,13 +105,13 @@ protected static ResourceGroup testResourceGroup() { return new ResourceGroup.Builder("test", "local").build(); } - protected OptimizingQueue buildOptimizingGroupService(TableRuntimeMeta tableRuntimeMeta) { + protected OptimizingQueue buildOptimizingGroupService(TableRuntime tableRuntime) { return new OptimizingQueue( tableService(), testResourceGroup(), quotaProvider, planExecutor, - Collections.singletonList(tableRuntimeMeta), + Collections.singletonList(tableRuntime), 1); } @@ -127,7 +127,7 @@ private OptimizingQueue buildOptimizingGroupService() { @Test public void testPollNoTask() { - TableRuntimeMeta tableRuntimeMeta = + TableRuntime tableRuntimeMeta = buildTableRuntimeMeta(OptimizingStatus.PENDING, defaultResourceGroup()); OptimizingQueue queue = buildOptimizingGroupService(tableRuntimeMeta); Assert.assertNull(queue.pollTask(0)); @@ -138,25 +138,25 @@ public void testPollNoTask() { public void testRefreshAndReleaseTable() { OptimizingQueue queue = buildOptimizingGroupService(); Assert.assertEquals(0, queue.getSchedulingPolicy().getTableRuntimeMap().size()); - TableRuntimeMeta tableRuntimeMeta = + TableRuntime tableRuntime = buildTableRuntimeMeta(OptimizingStatus.IDLE, defaultResourceGroup()); - queue.refreshTable(tableRuntimeMeta.getTableRuntime()); + queue.refreshTable(tableRuntime); Assert.assertEquals(1, queue.getSchedulingPolicy().getTableRuntimeMap().size()); Assert.assertTrue( queue.getSchedulingPolicy().getTableRuntimeMap().containsKey(serverTableIdentifier())); - queue.releaseTable(tableRuntimeMeta.getTableRuntime()); + queue.releaseTable(tableRuntime); Assert.assertEquals(0, queue.getSchedulingPolicy().getTableRuntimeMap().size()); - queue.refreshTable(tableRuntimeMeta.getTableRuntime()); + queue.refreshTable(tableRuntime); Assert.assertEquals(1, queue.getSchedulingPolicy().getTableRuntimeMap().size()); queue.dispose(); } @Test public void testPollTask() { - TableRuntimeMeta tableRuntimeMeta = initTableWithFiles(); - OptimizingQueue queue = buildOptimizingGroupService(tableRuntimeMeta); + TableRuntime tableRuntime = initTableWithFiles(); + OptimizingQueue queue = buildOptimizingGroupService(tableRuntime); // 1.poll task TaskRuntime task = queue.pollTask(MAX_POLLING_TIME); @@ -169,7 +169,7 @@ public void testPollTask() { @Test public void testRetryTask() { - TableRuntimeMeta tableRuntimeMeta = initTableWithFiles(); + TableRuntime tableRuntimeMeta = initTableWithFiles(); OptimizingQueue queue = buildOptimizingGroupService(tableRuntimeMeta); // 1.poll task @@ -202,8 +202,8 @@ public void testRetryTask() { @Test public void testCommitTask() { - TableRuntimeMeta tableRuntimeMeta = initTableWithFiles(); - OptimizingQueue queue = buildOptimizingGroupService(tableRuntimeMeta); + TableRuntime tableRuntime = initTableWithFiles(); + OptimizingQueue queue = buildOptimizingGroupService(tableRuntime); Assert.assertEquals(0, queue.collectTasks().size()); TaskRuntime task = queue.pollTask(MAX_POLLING_TIME); @@ -218,11 +218,11 @@ public void testCommitTask() { Assert.assertEquals(TaskRuntime.Status.SUCCESS, task.getStatus()); // 7.commit - OptimizingProcess optimizingProcess = tableRuntimeMeta.getTableRuntime().getOptimizingProcess(); + OptimizingProcess optimizingProcess = tableRuntime.getOptimizingProcess(); Assert.assertEquals(OptimizingProcess.Status.RUNNING, optimizingProcess.getStatus()); optimizingProcess.commit(); Assert.assertEquals(OptimizingProcess.Status.SUCCESS, optimizingProcess.getStatus()); - Assert.assertNull(tableRuntimeMeta.getTableRuntime().getOptimizingProcess()); + Assert.assertNull(tableRuntime.getOptimizingProcess()); // 8.commit again, throw exceptions, and status not changed. Assert.assertThrows(IllegalStateException.class, optimizingProcess::commit); @@ -234,8 +234,8 @@ public void testCommitTask() { @Test public void testCollectingTasks() { - TableRuntimeMeta tableRuntimeMeta = initTableWithFiles(); - OptimizingQueue queue = buildOptimizingGroupService(tableRuntimeMeta); + TableRuntime tableRuntime = initTableWithFiles(); + OptimizingQueue queue = buildOptimizingGroupService(tableRuntime); Assert.assertEquals(0, queue.collectTasks().size()); TaskRuntime task = queue.pollTask(MAX_POLLING_TIME); @@ -249,8 +249,8 @@ public void testCollectingTasks() { @Test public void testTaskAndTableMetrics() { - TableRuntimeMeta tableRuntimeMeta = initTableWithFiles(); - OptimizingQueue queue = buildOptimizingGroupService(tableRuntimeMeta); + TableRuntime tableRuntime = initTableWithFiles(); + OptimizingQueue queue = buildOptimizingGroupService(tableRuntime); MetricRegistry registry = MetricManager.getInstance().getGlobalRegistry(); Map tagValues = ImmutableMap.of(GROUP_TAG, testResourceGroup().getName()); @@ -315,7 +315,7 @@ public void testTaskAndTableMetrics() { Assert.assertEquals(0, idleTablesGauge.getValue().longValue()); Assert.assertEquals(1, committingTablesGauge.getValue().longValue()); - OptimizingProcess optimizingProcess = tableRuntimeMeta.getTableRuntime().getOptimizingProcess(); + OptimizingProcess optimizingProcess = tableRuntime.getOptimizingProcess(); optimizingProcess.commit(); Assert.assertEquals(0, queueTasksGauge.getValue().longValue()); Assert.assertEquals(0, executingTasksGauge.getValue().longValue()); @@ -363,21 +363,19 @@ public void testAddAndRemoveOptimizers() { queue.dispose(); } - protected TableRuntimeMeta initTableWithFiles() { + protected TableRuntime initTableWithFiles() { MixedTable mixedTable = (MixedTable) tableService().loadTable(serverTableIdentifier()).originalTable(); appendData(mixedTable.asUnkeyedTable(), 1); appendData(mixedTable.asUnkeyedTable(), 2); - TableRuntimeMeta tableRuntimeMeta = + TableRuntime tableRuntime = buildTableRuntimeMeta(OptimizingStatus.PENDING, defaultResourceGroup()); - TableRuntime runtime = tableRuntimeMeta.getTableRuntime(); - runtime.refresh(tableService().loadTable(serverTableIdentifier())); - return tableRuntimeMeta; + tableRuntime.refresh(tableService().loadTable(serverTableIdentifier())); + return tableRuntime; } - private TableRuntimeMeta buildTableRuntimeMeta( - OptimizingStatus status, ResourceGroup resourceGroup) { + private TableRuntime buildTableRuntimeMeta(OptimizingStatus status, ResourceGroup resourceGroup) { MixedTable mixedTable = (MixedTable) tableService().loadTable(serverTableIdentifier()).originalTable(); TableRuntimeMeta tableRuntimeMeta = new TableRuntimeMeta(); @@ -389,8 +387,7 @@ private TableRuntimeMeta buildTableRuntimeMeta( tableRuntimeMeta.setTableStatus(status); tableRuntimeMeta.setTableConfig(TableConfigurations.parseTableConfig(mixedTable.properties())); tableRuntimeMeta.setOptimizerGroup(resourceGroup.getName()); - tableRuntimeMeta.constructTableRuntime(tableService()); - return tableRuntimeMeta; + return new TableRuntime(tableRuntimeMeta, tableService()); } private void appendData(UnkeyedTable table, int id) { diff --git a/amoro-ams/src/test/java/org/apache/amoro/server/table/AMSTableTestBase.java b/amoro-ams/src/test/java/org/apache/amoro/server/table/AMSTableTestBase.java index f9a8a11d20..647a2dbc87 100644 --- a/amoro-ams/src/test/java/org/apache/amoro/server/table/AMSTableTestBase.java +++ b/amoro-ams/src/test/java/org/apache/amoro/server/table/AMSTableTestBase.java @@ -174,18 +174,14 @@ protected void createTable() { TableMetadata tableMetadata = tableMetadata(); tableService().createTable(catalogMeta.getCatalogName(), tableMetadata); } else { - switch (catalogTestHelper.tableFormat()) { - case ICEBERG: - createIcebergTable(); - break; - case MIXED_ICEBERG: - createMixedIcebergTable(); - break; - case MIXED_HIVE: - createMixedHiveTable(); - break; - default: - throw new IllegalStateException("un-support format"); + if (catalogTestHelper.tableFormat().equals(TableFormat.ICEBERG)) { + createIcebergTable(); + } else if (catalogTestHelper.tableFormat().equals(TableFormat.MIXED_ICEBERG)) { + createMixedIcebergTable(); + } else if (catalogTestHelper.tableFormat().equals(TableFormat.MIXED_HIVE)) { + createMixedHiveTable(); + } else { + throw new IllegalStateException("un-support format"); } tableService().exploreExternalCatalog(); } diff --git a/amoro-ams/src/test/java/org/apache/amoro/server/table/DerbyPersistence.java b/amoro-ams/src/test/java/org/apache/amoro/server/table/DerbyPersistence.java index 7c88fbf8b7..96ca0b9aed 100644 --- a/amoro-ams/src/test/java/org/apache/amoro/server/table/DerbyPersistence.java +++ b/amoro-ams/src/test/java/org/apache/amoro/server/table/DerbyPersistence.java @@ -28,8 +28,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.io.UncheckedIOException; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; @@ -62,8 +60,8 @@ public class DerbyPersistence extends ExternalResource { LOG.info("Deleted resources in derby persistent."); })); truncateAllTables(); - } catch (IOException e) { - throw new UncheckedIOException(e); + } catch (Exception e) { + throw new RuntimeException(e); } } diff --git a/amoro-ams/src/test/java/org/apache/amoro/server/table/TestTableRuntimeHandler.java b/amoro-ams/src/test/java/org/apache/amoro/server/table/TestTableRuntimeHandler.java index f629012981..641ac0e866 100644 --- a/amoro-ams/src/test/java/org/apache/amoro/server/table/TestTableRuntimeHandler.java +++ b/amoro-ams/src/test/java/org/apache/amoro/server/table/TestTableRuntimeHandler.java @@ -92,14 +92,15 @@ public void testInitialize() throws Exception { tableService.initialize(); Assert.assertEquals(1, handler.getInitTables().size()); Assert.assertEquals( - createTableId.getId().longValue(), handler.getInitTables().get(0).getTableId()); + (Long) createTableId.getId().longValue(), + handler.getInitTables().get(0).getTableIdentifier().getId()); // test change properties MixedTable mixedTable = (MixedTable) tableService().loadTable(createTableId).originalTable(); mixedTable.updateProperties().set(TableProperties.ENABLE_ORPHAN_CLEAN, "true").commit(); tableService() - .getRuntime(createTableId) + .getRuntime(createTableId.getId()) .refresh(tableService.loadTable(serverTableIdentifier())); Assert.assertEquals(1, handler.getConfigChangedTables().size()); validateTableRuntime(handler.getConfigChangedTables().get(0).first()); @@ -131,7 +132,7 @@ protected DefaultTableService tableService() { static class TestHandler extends RuntimeHandlerChain { - private final List initTables = Lists.newArrayList(); + private final List initTables = Lists.newArrayList(); private final List> statusChangedTables = Lists.newArrayList(); private final List> configChangedTables = @@ -162,8 +163,8 @@ protected void handleTableRemoved(TableRuntime tableRuntime) { } @Override - protected void initHandler(List tableRuntimeMetaList) { - initTables.addAll(tableRuntimeMetaList); + protected void initHandler(List tableRuntimeList) { + initTables.addAll(tableRuntimeList); } @Override @@ -171,7 +172,7 @@ protected void doDispose() { disposed = true; } - public List getInitTables() { + public List getInitTables() { return initTables; } diff --git a/amoro-ams/src/test/java/org/apache/amoro/server/table/TestTableRuntimeManager.java b/amoro-ams/src/test/java/org/apache/amoro/server/table/TestTableRuntimeManager.java index 277e8e0e34..b6b8ce1e77 100644 --- a/amoro-ams/src/test/java/org/apache/amoro/server/table/TestTableRuntimeManager.java +++ b/amoro-ams/src/test/java/org/apache/amoro/server/table/TestTableRuntimeManager.java @@ -69,30 +69,9 @@ public void testLoadTable() { "unknown", "unknown", "unknown", serverTableIdentifier().getFormat()))); } - @Test - public void testTableContains() { - Assert.assertTrue(tableService().contains(serverTableIdentifier())); - ServerTableIdentifier copyId = - ServerTableIdentifier.of( - null, - serverTableIdentifier().getCatalog(), - serverTableIdentifier().getDatabase(), - serverTableIdentifier().getTableName(), - serverTableIdentifier().getFormat()); - Assert.assertFalse(tableService().contains(copyId)); - copyId = - ServerTableIdentifier.of( - serverTableIdentifier().getId(), - serverTableIdentifier().getCatalog(), - serverTableIdentifier().getDatabase(), - "unknown", - serverTableIdentifier().getFormat()); - Assert.assertFalse(tableService().contains(copyId)); - } - @Test public void testTableRuntime() { - TableRuntime tableRuntime = tableService().getRuntime(serverTableIdentifier()); + TableRuntime tableRuntime = tableService().getRuntime(serverTableIdentifier().getId()); validateTableRuntime(tableRuntime); } } diff --git a/amoro-ams/src/test/java/org/apache/amoro/server/table/TestTableService.java b/amoro-ams/src/test/java/org/apache/amoro/server/table/TestTableService.java index dea85f1a73..cbf8010c6e 100644 --- a/amoro-ams/src/test/java/org/apache/amoro/server/table/TestTableService.java +++ b/amoro-ams/src/test/java/org/apache/amoro/server/table/TestTableService.java @@ -341,7 +341,7 @@ private boolean isBlocked(BlockableOperation operation) { } private boolean isTableRuntimeBlocked(BlockableOperation operation) { - return tableService().getRuntime(serverTableIdentifier()).isBlocked(operation); + return tableService().getRuntime(serverTableIdentifier().getId()).isBlocked(operation); } private void assertBlockerCnt(int i) { diff --git a/amoro-ams/src/test/java/org/apache/amoro/server/table/TestTableSummaryMetrics.java b/amoro-ams/src/test/java/org/apache/amoro/server/table/TestTableSummaryMetrics.java index 9bc914ef78..d5d42186c9 100644 --- a/amoro-ams/src/test/java/org/apache/amoro/server/table/TestTableSummaryMetrics.java +++ b/amoro-ams/src/test/java/org/apache/amoro/server/table/TestTableSummaryMetrics.java @@ -18,6 +18,7 @@ package org.apache.amoro.server.table; +import static org.apache.amoro.server.table.TableSummaryMetrics.TABLE_SUMMARY_DANGLING_DELETE_FILES; import static org.apache.amoro.server.table.TableSummaryMetrics.TABLE_SUMMARY_DATA_FILES; import static org.apache.amoro.server.table.TableSummaryMetrics.TABLE_SUMMARY_DATA_FILES_RECORDS; import static org.apache.amoro.server.table.TableSummaryMetrics.TABLE_SUMMARY_DATA_FILES_SIZE; @@ -104,7 +105,7 @@ private void initTableWithFiles() { .asUnkeyedTable(); appendData(table); appendPosDelete(table); - TableRuntime runtime = tableService().getRuntime(serverTableIdentifier()); + TableRuntime runtime = tableService().getRuntime(serverTableIdentifier().getId()); runtime.refresh(tableService().loadTable(serverTableIdentifier())); } @@ -142,7 +143,7 @@ private void appendPosDelete(UnkeyedTable table) { void refreshPending() { TableRuntimeRefreshExecutor refresher = new TableRuntimeRefreshExecutor(tableService(), 1, Integer.MAX_VALUE); - refresher.execute(tableService().getRuntime(serverTableIdentifier())); + refresher.execute(tableService().getRuntime(serverTableIdentifier().getId())); refresher.dispose(); } @@ -154,6 +155,8 @@ public void testTableSummaryMetrics() { Gauge dataFiles = getMetric(metrics, identifier, TABLE_SUMMARY_DATA_FILES); Gauge posDelFiles = getMetric(metrics, identifier, TABLE_SUMMARY_POSITION_DELETE_FILES); Gauge eqDelFiles = getMetric(metrics, identifier, TABLE_SUMMARY_EQUALITY_DELETE_FILES); + Gauge danglingDelFiles = + getMetric(metrics, identifier, TABLE_SUMMARY_DANGLING_DELETE_FILES); Gauge totalSize = getMetric(metrics, identifier, TABLE_SUMMARY_TOTAL_FILES_SIZE); Gauge dataSize = getMetric(metrics, identifier, TABLE_SUMMARY_DATA_FILES_SIZE); @@ -177,6 +180,7 @@ public void testTableSummaryMetrics() { Assertions.assertEquals(0, dataFiles.getValue()); Assertions.assertEquals(0, posDelFiles.getValue()); Assertions.assertEquals(0, eqDelFiles.getValue()); + Assertions.assertEquals(0, danglingDelFiles.getValue()); Assertions.assertEquals(0, totalSize.getValue()); Assertions.assertEquals(0, dataSize.getValue()); diff --git a/amoro-common/src/main/java/org/apache/amoro/TableFormat.java b/amoro-common/src/main/java/org/apache/amoro/TableFormat.java index 31b94b146a..62400267a8 100644 --- a/amoro-common/src/main/java/org/apache/amoro/TableFormat.java +++ b/amoro-common/src/main/java/org/apache/amoro/TableFormat.java @@ -18,24 +18,128 @@ package org.apache.amoro; +import org.apache.amoro.shade.guava32.com.google.common.base.Preconditions; +import org.apache.amoro.shade.guava32.com.google.common.collect.Maps; +import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.core.JsonGenerator; +import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.core.JsonParser; +import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.core.JsonProcessingException; +import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.core.TreeNode; +import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.databind.DeserializationContext; +import org.apache.amoro.shade.jackson2.com.fasterxml.jackson.databind.SerializerProvider; + +import java.io.IOException; +import java.io.Serializable; +import java.util.Map; + /** * Table formats Amoro supported * * @since 0.4.0 */ -public enum TableFormat { - ICEBERG, - MIXED_ICEBERG, - MIXED_HIVE, - PAIMON, - HUDI; +public final class TableFormat implements Serializable { + private static final Map registeredFormats = Maps.newConcurrentMap(); + + /** Open-source table formats */ + public static final TableFormat ICEBERG = register("ICEBERG"); + + public static final TableFormat MIXED_ICEBERG = register("MIXED_ICEBERG"); + public static final TableFormat MIXED_HIVE = register("MIXED_HIVE"); + public static final TableFormat PAIMON = register("PAIMON"); + public static final TableFormat HUDI = register("HUDI"); + + /** + * Get all registered formats + * + * @return registered formats + */ + public static TableFormat[] values() { + return registeredFormats.values().toArray(new TableFormat[0]); + } + + /** + * Register a new TableFormat + * + * @param name table format name + * @return TableFormat. + */ + public static TableFormat register(String name) { + return registeredFormats.computeIfAbsent(name, s -> new TableFormat(name)); + } + + /** + * Get TableFormat by name + * + * @param name name + * @return TableFormat + */ + public static TableFormat valueOf(String name) { + return registeredFormats.get(name); + } + + private final String name; + + private TableFormat(String name) { + Preconditions.checkNotNull(name, "TableFormat name should not be null"); + this.name = name; + } + + public String name() { + return name; + } public boolean in(TableFormat... tableFormats) { for (TableFormat tableFormat : tableFormats) { - if (this == tableFormat) { + if (this.equals(tableFormat)) { return true; } } return false; } + + @Override + public String toString() { + return this.name; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other == null || getClass() != other.getClass()) { + return false; + } + return this.name.equals(((TableFormat) other).name); + } + + @Override + public int hashCode() { + return this.name.hashCode(); + } + + /** Json deserializer for TableFormat */ + public static class JsonDeserializer + extends org.apache.amoro.shade.jackson2.com.fasterxml.jackson.databind.JsonDeserializer< + TableFormat> { + + @Override + public TableFormat deserialize( + JsonParser jsonParser, DeserializationContext deserializationContext) + throws IOException, JsonProcessingException { + TreeNode node = jsonParser.getCodec().readTree(jsonParser); + return TableFormat.valueOf(node.toString()); + } + } + + /** Json serializer for TableFormat */ + public static class JsonSerializer + extends org.apache.amoro.shade.jackson2.com.fasterxml.jackson.databind.JsonSerializer< + TableFormat> { + + @Override + public void serialize( + TableFormat tableFormat, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) + throws IOException { + jsonGenerator.writeString(tableFormat.name()); + } + } } diff --git a/amoro-common/src/main/java/org/apache/amoro/config/OptimizingConfig.java b/amoro-common/src/main/java/org/apache/amoro/config/OptimizingConfig.java index 53b8c4476e..ef92ac5fb1 100644 --- a/amoro-common/src/main/java/org/apache/amoro/config/OptimizingConfig.java +++ b/amoro-common/src/main/java/org/apache/amoro/config/OptimizingConfig.java @@ -294,7 +294,8 @@ public boolean equals(Object o) { && baseHashBucket == that.baseHashBucket && baseRefreshInterval == that.baseRefreshInterval && hiveRefreshInterval == that.hiveRefreshInterval - && Objects.equal(optimizerGroup, that.optimizerGroup); + && Objects.equal(optimizerGroup, that.optimizerGroup) + && Objects.equal(minPlanInterval, that.minPlanInterval); } @Override @@ -318,7 +319,8 @@ public int hashCode() { fullRewriteAllFiles, baseHashBucket, baseRefreshInterval, - hiveRefreshInterval); + hiveRefreshInterval, + minPlanInterval); } @Override diff --git a/amoro-common/src/main/java/org/apache/amoro/process/ProcessStatus.java b/amoro-common/src/main/java/org/apache/amoro/process/ProcessStatus.java index 2e5d8df825..952c7e4f13 100644 --- a/amoro-common/src/main/java/org/apache/amoro/process/ProcessStatus.java +++ b/amoro-common/src/main/java/org/apache/amoro/process/ProcessStatus.java @@ -19,11 +19,11 @@ package org.apache.amoro.process; /** - * Status of any {@link AmoroProcess}. Only UNKNOWN, RUNNING, FINISHED(SUCCESS, CLOSED, FAILED) are - * necessary Stage classes are used to define multiple phases of one process such as OptimizingStage + * Status of any {@link AmoroProcess}. */ public enum ProcessStatus { UNKNOWN, + PENDING, RUNNING, SUBMITTED, SUCCESS, diff --git a/amoro-common/src/main/java/org/apache/amoro/table/TableMetaStore.java b/amoro-common/src/main/java/org/apache/amoro/table/TableMetaStore.java index db6827bca5..4bf9b732fc 100644 --- a/amoro-common/src/main/java/org/apache/amoro/table/TableMetaStore.java +++ b/amoro-common/src/main/java/org/apache/amoro/table/TableMetaStore.java @@ -767,7 +767,7 @@ public TableMetaStore build() { return new TableMetaStore(configuration); } else { readProperties(); - if (!AUTH_METHOD_AK_SK.equals(authMethod)) { + if (AUTH_METHOD_SIMPLE.equals(authMethod) || AUTH_METHOD_KERBEROS.equals(authMethod)) { Preconditions.checkNotNull(hdfsSite); Preconditions.checkNotNull(coreSite); } diff --git a/amoro-common/src/main/java/org/apache/amoro/table/descriptor/FormatTableDescriptor.java b/amoro-common/src/main/java/org/apache/amoro/table/descriptor/FormatTableDescriptor.java index baee06a366..3c1126a8f1 100644 --- a/amoro-common/src/main/java/org/apache/amoro/table/descriptor/FormatTableDescriptor.java +++ b/amoro-common/src/main/java/org/apache/amoro/table/descriptor/FormatTableDescriptor.java @@ -20,9 +20,11 @@ import org.apache.amoro.AmoroTable; import org.apache.amoro.TableFormat; +import org.apache.amoro.process.ProcessStatus; import org.apache.commons.lang3.tuple.Pair; import java.util.List; +import java.util.Map; import java.util.concurrent.ExecutorService; /** API for obtaining metadata information of various formats. */ @@ -60,7 +62,10 @@ List getTableFiles( /** Get the paged optimizing process information of the {@link AmoroTable} and total size. */ Pair, Integer> getOptimizingProcessesInfo( - AmoroTable amoroTable, int limit, int offset); + AmoroTable amoroTable, String type, ProcessStatus status, int limit, int offset); + + /** Return the optimizing types of the {@link AmoroTable} is supported. */ + Map getTableOptimizingTypes(AmoroTable amoroTable); /** Get the paged optimizing process tasks information of the {@link AmoroTable}. */ List getOptimizingTaskInfos(AmoroTable amoroTable, String processId); diff --git a/amoro-common/src/test/java/org/apache/amoro/catalog/CatalogTestHelpers.java b/amoro-common/src/test/java/org/apache/amoro/catalog/CatalogTestHelpers.java index 18d8dec5f3..e0b6cbb813 100644 --- a/amoro-common/src/test/java/org/apache/amoro/catalog/CatalogTestHelpers.java +++ b/amoro-common/src/test/java/org/apache/amoro/catalog/CatalogTestHelpers.java @@ -51,6 +51,10 @@ public static CatalogMeta buildCatalogMeta( CatalogMetaProperties.STORAGE_CONFIGS_KEY_CORE_SITE, HADOOP_EMPTY_CONFIG_BASE64); storageConfig.put( CatalogMetaProperties.STORAGE_CONFIGS_KEY_HDFS_SITE, HADOOP_EMPTY_CONFIG_BASE64); + if (CatalogMetaProperties.CATALOG_TYPE_HIVE.equalsIgnoreCase(type)) { + storageConfig.put( + CatalogMetaProperties.STORAGE_CONFIGS_KEY_HIVE_SITE, HADOOP_EMPTY_CONFIG_BASE64); + } Map authConfig = new HashMap<>(); authConfig.put( diff --git a/amoro-format-hudi/src/main/java/org/apache/amoro/formats/hudi/HudiTableDescriptor.java b/amoro-format-hudi/src/main/java/org/apache/amoro/formats/hudi/HudiTableDescriptor.java index 508b2bf449..8720958b0b 100644 --- a/amoro-format-hudi/src/main/java/org/apache/amoro/formats/hudi/HudiTableDescriptor.java +++ b/amoro-format-hudi/src/main/java/org/apache/amoro/formats/hudi/HudiTableDescriptor.java @@ -93,6 +93,8 @@ public class HudiTableDescriptor implements FormatTableDescriptor { private static final Logger LOG = LoggerFactory.getLogger(HudiTableDescriptor.class); + private static final String COMPACTION = "compaction"; + private static final String CLUSTERING = "clustering"; private ExecutorService ioExecutors; @@ -328,7 +330,7 @@ private Stream fileSliceToFileStream(String partition, Fi @Override public Pair, Integer> getOptimizingProcessesInfo( - AmoroTable amoroTable, int limit, int offset) { + AmoroTable amoroTable, String type, ProcessStatus status, int limit, int offset) { HoodieJavaTable hoodieTable = (HoodieJavaTable) amoroTable.originalTable(); HoodieDefaultTimeline timeline = new HoodieActiveTimeline(hoodieTable.getMetaClient(), false); List instants = timeline.getInstants(); @@ -369,7 +371,24 @@ public Pair, Integer> getOptimizingProcessesInfo( }) .filter(Objects::nonNull) .collect(Collectors.toList()); - return Pair.of(infos, infos.size()); + infos = + infos.stream() + .filter( + i -> + StringUtils.isNullOrEmpty(type) || type.equalsIgnoreCase(i.getOptimizingType())) + .filter(i -> status == null || status == i.getStatus()) + .collect(Collectors.toList()); + int total = infos.size(); + infos = infos.stream().skip(offset).limit(limit).collect(Collectors.toList()); + return Pair.of(infos, total); + } + + @Override + public Map getTableOptimizingTypes(AmoroTable amoroTable) { + Map types = Maps.newHashMap(); + types.put(COMPACTION, COMPACTION); + types.put(CLUSTERING, CLUSTERING); + return types; } protected OptimizingProcessInfo getOptimizingInfo( @@ -455,7 +474,7 @@ private void fillCompactProcessInfo(OptimizingProcessInfo processInfo, byte[] re processInfo.getSummary().put("strategy", strategy.getCompactorClassName()); processInfo.getSummary().putAll(strategy.getStrategyParams()); } - processInfo.setOptimizingType("Compact"); + processInfo.setOptimizingType(COMPACTION); } private OptimizingProcessInfo fillClusterProcessInfo( @@ -481,7 +500,7 @@ private OptimizingProcessInfo fillClusterProcessInfo( processInfo.setInputFiles(FilesStatistics.build(inputFileCount, inputFileSize)); int tasks = plan.getInputGroups().size(); processInfo.setTotalTasks(tasks); - processInfo.setOptimizingType("Cluster"); + processInfo.setOptimizingType(CLUSTERING); HoodieClusteringStrategy strategy = plan.getStrategy(); if (strategy != null) { diff --git a/amoro-format-iceberg/src/main/java/org/apache/amoro/io/writer/IcebergFanoutPosDeleteWriter.java b/amoro-format-iceberg/src/main/java/org/apache/amoro/io/writer/IcebergFanoutPosDeleteWriter.java index b709e18f21..ea3b7ef89d 100644 --- a/amoro-format-iceberg/src/main/java/org/apache/amoro/io/writer/IcebergFanoutPosDeleteWriter.java +++ b/amoro-format-iceberg/src/main/java/org/apache/amoro/io/writer/IcebergFanoutPosDeleteWriter.java @@ -157,7 +157,10 @@ private void flushDeletes() { String fileDir = TableFileUtil.getFileDir(filePath.get().toString()); String deleteFilePath = format.addExtension( - String.format("%s/%s-delete-%s", fileDir, fileName, fileNameSuffix)); + String.format( + "%s/%s", + fileDir, + TableFileUtil.optimizingPosDeleteFileName(fileName, fileNameSuffix))); EncryptedOutputFile outputFile = encryptionManager.encrypt(fileIO.newOutputFile(deleteFilePath)); diff --git a/amoro-format-iceberg/src/main/java/org/apache/amoro/utils/MixedTableUtil.java b/amoro-format-iceberg/src/main/java/org/apache/amoro/utils/MixedTableUtil.java index e94998c78b..19947678bf 100644 --- a/amoro-format-iceberg/src/main/java/org/apache/amoro/utils/MixedTableUtil.java +++ b/amoro-format-iceberg/src/main/java/org/apache/amoro/utils/MixedTableUtil.java @@ -53,7 +53,7 @@ public static UnkeyedTable baseStore(MixedTable mixedTable) { /** Return the table root location of the mixed-format table. */ public static String tableRootLocation(MixedTable mixedTable) { String tableRootLocation; - if (TableFormat.ICEBERG != mixedTable.format() && mixedTable.isUnkeyedTable()) { + if (!TableFormat.ICEBERG.equals(mixedTable.format()) && mixedTable.isUnkeyedTable()) { tableRootLocation = TableFileUtil.getFileDir(mixedTable.location()); } else { tableRootLocation = mixedTable.location(); @@ -179,7 +179,7 @@ private static StructLikeMap readLegacyPartitionProperties( * Mix format table will return directly after checking}. */ public static PartitionSpec getMixedTablePartitionSpecById(MixedTable mixedTable, int specId) { - if (mixedTable.format() == TableFormat.ICEBERG) { + if (TableFormat.ICEBERG.equals(mixedTable.format())) { return mixedTable.asUnkeyedTable().specs().get(specId); } else { PartitionSpec spec = mixedTable.spec(); diff --git a/amoro-format-iceberg/src/main/java/org/apache/amoro/utils/TableFileUtil.java b/amoro-format-iceberg/src/main/java/org/apache/amoro/utils/TableFileUtil.java index e1b57e45dd..e140c79ebf 100644 --- a/amoro-format-iceberg/src/main/java/org/apache/amoro/utils/TableFileUtil.java +++ b/amoro-format-iceberg/src/main/java/org/apache/amoro/utils/TableFileUtil.java @@ -33,6 +33,7 @@ public class TableFileUtil { private static final Logger LOG = LoggerFactory.getLogger(TableFileUtil.class); + private static final String POS_DELETE_FILE_IDENTIFIER = "delete"; /** * Parse file name form file path @@ -192,4 +193,13 @@ public static String getParent(String path) { Path p = new Path(path); return p.getParent().toString(); } + + public static String optimizingPosDeleteFileName(String dataFileName, String suffix) { + return String.format("%s-%s-%s", dataFileName, POS_DELETE_FILE_IDENTIFIER, suffix); + } + + public static boolean isOptimizingPosDeleteFile(String dataFilePath, String posDeleteFilePath) { + return getFileName(posDeleteFilePath) + .startsWith(String.format("%s-%s", getFileName(dataFilePath), POS_DELETE_FILE_IDENTIFIER)); + } } diff --git a/amoro-format-iceberg/src/test/java/org/apache/amoro/catalog/TableTestBase.java b/amoro-format-iceberg/src/test/java/org/apache/amoro/catalog/TableTestBase.java index 7e64192562..ae433b2089 100644 --- a/amoro-format-iceberg/src/test/java/org/apache/amoro/catalog/TableTestBase.java +++ b/amoro-format-iceberg/src/test/java/org/apache/amoro/catalog/TableTestBase.java @@ -18,6 +18,7 @@ package org.apache.amoro.catalog; +import org.apache.amoro.TableFormat; import org.apache.amoro.TableTestHelper; import org.apache.amoro.table.MixedTable; import org.apache.amoro.table.TableBuilder; @@ -44,14 +45,11 @@ public void setupTable() { this.tableMetaStore = CatalogUtil.buildMetaStore(getCatalogMeta()); getUnifiedCatalog().createDatabase(TableTestHelper.TEST_DB_NAME); - switch (getTestFormat()) { - case MIXED_HIVE: - case MIXED_ICEBERG: - createMixedFormatTable(); - break; - case ICEBERG: - createIcebergFormatTable(); - break; + TableFormat format = getTestFormat(); + if (format.in(TableFormat.MIXED_HIVE, TableFormat.MIXED_ICEBERG)) { + createMixedFormatTable(); + } else if (TableFormat.ICEBERG.equals(format)) { + createIcebergFormatTable(); } } diff --git a/amoro-format-mixed/amoro-mixed-flink/amoro-mixed-flink-common/src/main/java/org/apache/amoro/flink/catalog/FlinkUnifiedCatalog.java b/amoro-format-mixed/amoro-mixed-flink/amoro-mixed-flink-common/src/main/java/org/apache/amoro/flink/catalog/FlinkUnifiedCatalog.java index 3d944adb72..2c5c61f7c7 100644 --- a/amoro-format-mixed/amoro-mixed-flink/amoro-mixed-flink-common/src/main/java/org/apache/amoro/flink/catalog/FlinkUnifiedCatalog.java +++ b/amoro-format-mixed/amoro-mixed-flink/amoro-mixed-flink-common/src/main/java/org/apache/amoro/flink/catalog/FlinkUnifiedCatalog.java @@ -220,7 +220,7 @@ public void createTable(ObjectPath tablePath, CatalogBaseTable table, boolean ig throws TableAlreadyExistException, DatabaseNotExistException, CatalogException { Configuration configuration = new Configuration(); table.getOptions().forEach(configuration::setString); - TableFormat format = configuration.get(TABLE_FORMAT); + TableFormat format = TableFormat.valueOf(configuration.get(TABLE_FORMAT)); TableIdentifier tableIdentifier = TableIdentifier.of( unifiedCatalog.name(), tablePath.getDatabaseName(), tablePath.getObjectName()); @@ -461,25 +461,19 @@ private AmoroTable loadAmoroTable(ObjectPath tablePath) { private AbstractCatalog createOriginalCatalog( TableIdentifier tableIdentifier, TableFormat tableFormat) { CatalogFactory catalogFactory; - - switch (tableFormat) { - case MIXED_ICEBERG: - case MIXED_HIVE: - catalogFactory = new MixedCatalogFactory(); - break; - case ICEBERG: - catalogFactory = new IcebergFlinkCatalogFactory(hadoopConf); - break; - case PAIMON: - catalogFactory = - new PaimonFlinkCatalogFactory( - unifiedCatalog.properties(), unifiedCatalog.metastoreType()); - break; - default: - throw new UnsupportedOperationException( - String.format( - "Unsupported table format: [%s] in the unified catalog, table identifier is [%s], the supported table formats are [%s].", - tableFormat, tableIdentifier, FlinkUnifiedCatalogFactory.SUPPORTED_FORMATS)); + if (tableFormat.in(TableFormat.MIXED_HIVE, TableFormat.MIXED_ICEBERG)) { + catalogFactory = new MixedCatalogFactory(); + } else if (tableFormat.equals(TableFormat.ICEBERG)) { + catalogFactory = new IcebergFlinkCatalogFactory(hadoopConf); + } else if (tableFormat.equals(TableFormat.PAIMON)) { + catalogFactory = + new PaimonFlinkCatalogFactory( + unifiedCatalog.properties(), unifiedCatalog.metastoreType()); + } else { + throw new UnsupportedOperationException( + String.format( + "Unsupported table format: [%s] in the unified catalog, table identifier is [%s], the supported table formats are [%s].", + tableFormat, tableIdentifier, FlinkUnifiedCatalogFactory.SUPPORTED_FORMATS)); } AbstractCatalog originalCatalog; diff --git a/amoro-format-mixed/amoro-mixed-flink/amoro-mixed-flink-common/src/main/java/org/apache/amoro/flink/table/UnifiedDynamicTableFactory.java b/amoro-format-mixed/amoro-mixed-flink/amoro-mixed-flink-common/src/main/java/org/apache/amoro/flink/table/UnifiedDynamicTableFactory.java index 203a823f76..71f30995f9 100644 --- a/amoro-format-mixed/amoro-mixed-flink/amoro-mixed-flink-common/src/main/java/org/apache/amoro/flink/table/UnifiedDynamicTableFactory.java +++ b/amoro-format-mixed/amoro-mixed-flink/amoro-mixed-flink-common/src/main/java/org/apache/amoro/flink/table/UnifiedDynamicTableFactory.java @@ -57,7 +57,7 @@ public DynamicTableSink createDynamicTableSink(Context context) { ObjectIdentifier identifier = context.getObjectIdentifier(); FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); Configuration options = (Configuration) helper.getOptions(); - TableFormat tableFormat = options.get(MixedFormatValidator.TABLE_FORMAT); + TableFormat tableFormat = TableFormat.valueOf(options.get(MixedFormatValidator.TABLE_FORMAT)); return getOriginalCatalog(tableFormat) .flatMap(AbstractCatalog::getFactory) @@ -76,7 +76,7 @@ public DynamicTableSource createDynamicTableSource(Context context) { ObjectIdentifier identifier = context.getObjectIdentifier(); FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); Configuration options = (Configuration) helper.getOptions(); - TableFormat tableFormat = options.get(MixedFormatValidator.TABLE_FORMAT); + TableFormat tableFormat = TableFormat.valueOf(options.get(MixedFormatValidator.TABLE_FORMAT)); return getOriginalCatalog(tableFormat) .flatMap(AbstractCatalog::getFactory) diff --git a/amoro-format-mixed/amoro-mixed-flink/amoro-mixed-flink-common/src/main/java/org/apache/amoro/flink/table/descriptors/MixedFormatValidator.java b/amoro-format-mixed/amoro-mixed-flink/amoro-mixed-flink-common/src/main/java/org/apache/amoro/flink/table/descriptors/MixedFormatValidator.java index 51831749e9..662da35be3 100644 --- a/amoro-format-mixed/amoro-mixed-flink/amoro-mixed-flink-common/src/main/java/org/apache/amoro/flink/table/descriptors/MixedFormatValidator.java +++ b/amoro-format-mixed/amoro-mixed-flink/amoro-mixed-flink-common/src/main/java/org/apache/amoro/flink/table/descriptors/MixedFormatValidator.java @@ -283,18 +283,18 @@ public class MixedFormatValidator extends ConnectorDescriptorValidator { + " of the key. Default is -1, means it is automatically determined: every shard will be at least 512KB and" + " number of shard bits will not exceed 6."); - public static final ConfigOption TABLE_FORMAT = + public static final ConfigOption TABLE_FORMAT = ConfigOptions.key("table.format") - .enumType(TableFormat.class) - .defaultValue(TableFormat.MIXED_ICEBERG) + .stringType() + .defaultValue(TableFormat.MIXED_ICEBERG.name()) .withDescription( String.format( "The format of the table, valid values are %s, %s, %s or %s, and Flink choose '%s' as default format.", - TableFormat.ICEBERG, - TableFormat.MIXED_ICEBERG, - TableFormat.MIXED_HIVE, - TableFormat.PAIMON, - TableFormat.MIXED_ICEBERG)); + TableFormat.ICEBERG.name(), + TableFormat.MIXED_ICEBERG.name(), + TableFormat.MIXED_HIVE.name(), + TableFormat.PAIMON.name(), + TableFormat.MIXED_ICEBERG.name())); public static final ConfigOption SCAN_PARALLELISM = ConfigOptions.key("source.parallelism") diff --git a/amoro-format-mixed/amoro-mixed-spark/amoro-mixed-spark-3-common/src/test/java/org/apache/amoro/spark/test/SparkTestContext.java b/amoro-format-mixed/amoro-mixed-spark/amoro-mixed-spark-3-common/src/test/java/org/apache/amoro/spark/test/SparkTestContext.java index 277225eaa4..351264ecef 100644 --- a/amoro-format-mixed/amoro-mixed-spark/amoro-mixed-spark-3-common/src/test/java/org/apache/amoro/spark/test/SparkTestContext.java +++ b/amoro-format-mixed/amoro-mixed-spark/amoro-mixed-spark-3-common/src/test/java/org/apache/amoro/spark/test/SparkTestContext.java @@ -121,7 +121,7 @@ private void setupCatalogs() { } HiveConf hiveConf = hms.getHiveConf(); for (TableFormat format : TableFormat.values()) { - if (format == TableFormat.HUDI) { + if (TableFormat.HUDI.equals(format)) { continue; } // create catalog for all formats in AMS with hive metastore. @@ -140,7 +140,7 @@ private void setupCatalogs() { Joiner.on(',') .join( Arrays.stream(TableFormat.values()) - .filter(f -> TableFormat.HUDI != f) + .filter(f -> !TableFormat.HUDI.equals(f)) .collect(Collectors.toList())); allFormats.putToCatalogProperties(CatalogMetaProperties.TABLE_FORMATS, formats); allFormats.setCatalogName(AMS_ALL_FORMAT_CATALOG_NAME); diff --git a/amoro-format-mixed/amoro-mixed-spark/amoro-mixed-spark-3-common/src/test/java/org/apache/amoro/spark/test/extensions/EnableCatalogSelectExtension.java b/amoro-format-mixed/amoro-mixed-spark/amoro-mixed-spark-3-common/src/test/java/org/apache/amoro/spark/test/extensions/EnableCatalogSelectExtension.java index b518e2479f..021d770be2 100644 --- a/amoro-format-mixed/amoro-mixed-spark/amoro-mixed-spark-3-common/src/test/java/org/apache/amoro/spark/test/extensions/EnableCatalogSelectExtension.java +++ b/amoro-format-mixed/amoro-mixed-spark/amoro-mixed-spark-3-common/src/test/java/org/apache/amoro/spark/test/extensions/EnableCatalogSelectExtension.java @@ -93,30 +93,28 @@ private String selectMixedCatalogByFormat(ExtensionContext context, ExtensionReg Preconditions.condition( format == TableFormat.MIXED_ICEBERG || format == TableFormat.MIXED_HIVE, "must be a mixed-format"); - switch (format) { - case MIXED_ICEBERG: - return SparkTestContext.SparkCatalogNames.MIXED_ICEBERG; - case MIXED_HIVE: - return SparkTestContext.SparkCatalogNames.MIXED_HIVE; - default: - throw new IllegalArgumentException("must be a mixed-format"); + if (TableFormat.MIXED_HIVE.equals(format)) { + return SparkTestContext.SparkCatalogNames.MIXED_HIVE; + } else if (TableFormat.MIXED_ICEBERG.equals(format)) { + return SparkTestContext.SparkCatalogNames.MIXED_ICEBERG; + } else { + throw new IllegalArgumentException("must be a mixed-format"); } } private String selectUnifiedCatalogByFormat( ExtensionContext context, ExtensionRegistry registry) { TableFormat format = formatFromMethodArgs(context, registry); - switch (format) { - case MIXED_ICEBERG: - return SparkTestContext.SparkCatalogNames.UNIFIED_MIXED_ICEBERG; - case MIXED_HIVE: - return SparkTestContext.SparkCatalogNames.UNIFIED_MIXED_HIVE; - case ICEBERG: - return SparkTestContext.SparkCatalogNames.UNIFIED_ICEBERG; - case PAIMON: - return SparkTestContext.SparkCatalogNames.UNIFIED_PAIMON; - default: - throw new IllegalArgumentException("unknown format"); + if (TableFormat.MIXED_HIVE.equals(format)) { + return SparkTestContext.SparkCatalogNames.UNIFIED_MIXED_HIVE; + } else if (TableFormat.MIXED_ICEBERG.equals(format)) { + return SparkTestContext.SparkCatalogNames.UNIFIED_MIXED_ICEBERG; + } else if (TableFormat.ICEBERG.equals(format)) { + return SparkTestContext.SparkCatalogNames.UNIFIED_ICEBERG; + } else if (TableFormat.PAIMON.equals(format)) { + return SparkTestContext.SparkCatalogNames.UNIFIED_PAIMON; + } else { + throw new IllegalArgumentException("must be a mixed-format"); } } diff --git a/amoro-format-mixed/amoro-mixed-spark/amoro-mixed-spark-3-common/src/test/java/org/apache/amoro/spark/test/unified/UnifiedCatalogTestSuites.java b/amoro-format-mixed/amoro-mixed-spark/amoro-mixed-spark-3-common/src/test/java/org/apache/amoro/spark/test/unified/UnifiedCatalogTestSuites.java index b356842977..480cd54e19 100644 --- a/amoro-format-mixed/amoro-mixed-spark/amoro-mixed-spark-3-common/src/test/java/org/apache/amoro/spark/test/unified/UnifiedCatalogTestSuites.java +++ b/amoro-format-mixed/amoro-mixed-spark/amoro-mixed-spark-3-common/src/test/java/org/apache/amoro/spark/test/unified/UnifiedCatalogTestSuites.java @@ -76,7 +76,7 @@ public void testTableFormats(TableFormat format, boolean sessionCatalog) { + " PARTITIONED BY (pt) "; sql(sqlText); int expect = 0; - if (TableFormat.PAIMON != format || !spark().version().startsWith("3.1")) { + if (!TableFormat.PAIMON.equals(format) || !spark().version().startsWith("3.1")) { // write is not supported in spark3-1 sqlText = "INSERT INTO " @@ -109,7 +109,7 @@ public void testTableFormats(TableFormat format, boolean sessionCatalog) { } private String pkDDL(TableFormat format) { - if (TableFormat.MIXED_HIVE == format || TableFormat.MIXED_ICEBERG == format) { + if (TableFormat.MIXED_HIVE.equals(format) || TableFormat.MIXED_ICEBERG.equals(format)) { return ", primary key(id)"; } return ""; @@ -147,14 +147,10 @@ private void testVisitSubTable(TableFormat format, boolean sessionCatalog) { } List subTableNames = Lists.newArrayList(); - switch (format) { - case ICEBERG: - subTableNames = icebergInspectTableNames(); - break; - case MIXED_ICEBERG: - case MIXED_HIVE: - subTableNames = mixedFormatSubTableNames(); - break; + if (TableFormat.ICEBERG.equals(format)) { + subTableNames = icebergInspectTableNames(); + } else if (format.in(TableFormat.MIXED_HIVE, TableFormat.MIXED_ICEBERG)) { + subTableNames = mixedFormatSubTableNames(); } for (String inspectTableName : subTableNames) { diff --git a/amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-3.2/src/main/java/org/apache/amoro/spark/util/MixedFormatSparkUtils.java b/amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-3.2/src/main/java/org/apache/amoro/spark/util/MixedFormatSparkUtils.java index 6ef07a002c..52a4b90b3b 100644 --- a/amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-3.2/src/main/java/org/apache/amoro/spark/util/MixedFormatSparkUtils.java +++ b/amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-3.2/src/main/java/org/apache/amoro/spark/util/MixedFormatSparkUtils.java @@ -22,6 +22,7 @@ import static org.apache.amoro.table.TableProperties.WRITE_DISTRIBUTION_MODE_DEFAULT; import static org.apache.iceberg.spark.Spark3Util.toTransforms; +import org.apache.amoro.TableFormat; import org.apache.amoro.shade.guava32.com.google.common.base.Joiner; import org.apache.amoro.shade.guava32.com.google.common.base.Preconditions; import org.apache.amoro.spark.table.MixedSparkTable; @@ -186,12 +187,9 @@ public static Object convertConstant(Type type, Object value) { } public static String mixedTableProvider(MixedTable table) { - switch (table.format()) { - case MIXED_ICEBERG: - case MIXED_HIVE: - return table.format().name().toLowerCase(Locale.ROOT); - default: - throw new IllegalArgumentException("Not a mixed-format table:" + table.format()); + if (table.format().in(TableFormat.MIXED_ICEBERG, TableFormat.MIXED_HIVE)) { + return table.format().name().toLowerCase(Locale.ROOT); } + throw new IllegalArgumentException("Not a mixed-format table:" + table.format()); } } diff --git a/amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-3.2/src/test/java/org/apache/amoro/spark/test/suites/sql/TestCreateTableAsSelect.java b/amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-3.2/src/test/java/org/apache/amoro/spark/test/suites/sql/TestCreateTableAsSelect.java index 8953f3267d..04b61a76f3 100644 --- a/amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-3.2/src/test/java/org/apache/amoro/spark/test/suites/sql/TestCreateTableAsSelect.java +++ b/amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-3.2/src/test/java/org/apache/amoro/spark/test/suites/sql/TestCreateTableAsSelect.java @@ -233,7 +233,7 @@ public void testSchemaAndData( TableFiles files = TestTableUtil.files(table); Asserts.assertAllFilesInBaseStore(files); - if (TableFormat.MIXED_HIVE == format) { + if (TableFormat.MIXED_HIVE.equals(format)) { Table hiveTable = loadHiveTable(); Asserts.assertHiveColumns(expectSchema, ptSpec, hiveTable.getSd().getCols()); Asserts.assertHivePartition(ptSpec, hiveTable.getPartitionKeys()); diff --git a/amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-3.2/src/test/java/org/apache/amoro/spark/test/suites/sql/TestCreateTableSQL.java b/amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-3.2/src/test/java/org/apache/amoro/spark/test/suites/sql/TestCreateTableSQL.java index eb75451215..99af2f5d7e 100644 --- a/amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-3.2/src/test/java/org/apache/amoro/spark/test/suites/sql/TestCreateTableSQL.java +++ b/amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-3.2/src/test/java/org/apache/amoro/spark/test/suites/sql/TestCreateTableSQL.java @@ -218,7 +218,7 @@ public void testPartitionSpec(TableFormat format, String partitionDDL, Partition MixedTable actualTable = loadTable(); Asserts.assertPartition(expectSpec, actualTable.spec()); - if (TableFormat.MIXED_HIVE == format) { + if (TableFormat.MIXED_HIVE.equals(format)) { Table hiveTable = loadHiveTable(); Asserts.assertHivePartition(expectSpec, hiveTable.getPartitionKeys()); } @@ -303,7 +303,7 @@ public void testSchemaAndProperties( Asserts.assertType(expectSchema.asStruct(), tbl.schema().asStruct()); Asserts.assertHashMapContainExpect(expectProperties, tbl.properties()); - if (TableFormat.MIXED_HIVE == format) { + if (TableFormat.MIXED_HIVE.equals(format)) { Table hiveTable = loadHiveTable(); Asserts.assertHiveColumns( expectSchema, PartitionSpec.unpartitioned(), hiveTable.getSd().getCols()); diff --git a/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/src/main/java/org/apache/amoro/spark/util/MixedFormatSparkUtils.java b/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/src/main/java/org/apache/amoro/spark/util/MixedFormatSparkUtils.java index 6ef07a002c..925a56c82c 100644 --- a/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/src/main/java/org/apache/amoro/spark/util/MixedFormatSparkUtils.java +++ b/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/src/main/java/org/apache/amoro/spark/util/MixedFormatSparkUtils.java @@ -22,6 +22,7 @@ import static org.apache.amoro.table.TableProperties.WRITE_DISTRIBUTION_MODE_DEFAULT; import static org.apache.iceberg.spark.Spark3Util.toTransforms; +import org.apache.amoro.TableFormat; import org.apache.amoro.shade.guava32.com.google.common.base.Joiner; import org.apache.amoro.shade.guava32.com.google.common.base.Preconditions; import org.apache.amoro.spark.table.MixedSparkTable; @@ -186,12 +187,10 @@ public static Object convertConstant(Type type, Object value) { } public static String mixedTableProvider(MixedTable table) { - switch (table.format()) { - case MIXED_ICEBERG: - case MIXED_HIVE: - return table.format().name().toLowerCase(Locale.ROOT); - default: - throw new IllegalArgumentException("Not a mixed-format table:" + table.format()); + if (table.format().in(TableFormat.MIXED_HIVE, TableFormat.MIXED_ICEBERG)) { + return table.format().name().toLowerCase(Locale.ROOT); + } else { + throw new IllegalArgumentException("Not a mixed-format table:" + table.format()); } } } diff --git a/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/src/test/java/org/apache/amoro/spark/test/suites/sql/TestCreateTableAsSelect.java b/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/src/test/java/org/apache/amoro/spark/test/suites/sql/TestCreateTableAsSelect.java index aeb0de686d..bdc773e9f1 100644 --- a/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/src/test/java/org/apache/amoro/spark/test/suites/sql/TestCreateTableAsSelect.java +++ b/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/src/test/java/org/apache/amoro/spark/test/suites/sql/TestCreateTableAsSelect.java @@ -234,7 +234,7 @@ public void testSchemaAndData( TableFiles files = TestTableUtil.files(table); Asserts.assertAllFilesInBaseStore(files); - if (TableFormat.MIXED_HIVE == format) { + if (TableFormat.MIXED_HIVE.equals(format)) { Table hiveTable = loadHiveTable(); Asserts.assertHiveColumns(expectSchema, ptSpec, hiveTable.getSd().getCols()); Asserts.assertHivePartition(ptSpec, hiveTable.getPartitionKeys()); diff --git a/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/src/test/java/org/apache/amoro/spark/test/suites/sql/TestCreateTableSQL.java b/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/src/test/java/org/apache/amoro/spark/test/suites/sql/TestCreateTableSQL.java index f13f645c9a..6142def5aa 100644 --- a/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/src/test/java/org/apache/amoro/spark/test/suites/sql/TestCreateTableSQL.java +++ b/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/src/test/java/org/apache/amoro/spark/test/suites/sql/TestCreateTableSQL.java @@ -218,7 +218,7 @@ public void testPartitionSpec(TableFormat format, String partitionDDL, Partition MixedTable actualTable = loadTable(); Asserts.assertPartition(expectSpec, actualTable.spec()); - if (TableFormat.MIXED_HIVE == format) { + if (TableFormat.MIXED_HIVE.equals(format)) { Table hiveTable = loadHiveTable(); Asserts.assertHivePartition(expectSpec, hiveTable.getPartitionKeys()); } @@ -303,7 +303,7 @@ public void testSchemaAndProperties( Asserts.assertType(expectSchema.asStruct(), tbl.schema().asStruct()); Asserts.assertHashMapContainExpect(expectProperties, tbl.properties()); - if (TableFormat.MIXED_HIVE == format) { + if (TableFormat.MIXED_HIVE.equals(format)) { Table hiveTable = loadHiveTable(); Asserts.assertHiveColumns( expectSchema, PartitionSpec.unpartitioned(), hiveTable.getSd().getCols()); diff --git a/amoro-format-paimon/src/main/java/org/apache/amoro/formats/paimon/PaimonTableDescriptor.java b/amoro-format-paimon/src/main/java/org/apache/amoro/formats/paimon/PaimonTableDescriptor.java index 473488f471..681b7e0951 100644 --- a/amoro-format-paimon/src/main/java/org/apache/amoro/formats/paimon/PaimonTableDescriptor.java +++ b/amoro-format-paimon/src/main/java/org/apache/amoro/formats/paimon/PaimonTableDescriptor.java @@ -26,6 +26,7 @@ import org.apache.amoro.process.ProcessStatus; import org.apache.amoro.shade.guava32.com.google.common.collect.ImmutableList; import org.apache.amoro.shade.guava32.com.google.common.collect.Lists; +import org.apache.amoro.shade.guava32.com.google.common.collect.Maps; import org.apache.amoro.shade.guava32.com.google.common.collect.Streams; import org.apache.amoro.table.TableIdentifier; import org.apache.amoro.table.descriptor.AMSColumnInfo; @@ -45,6 +46,7 @@ import org.apache.amoro.table.descriptor.TableSummary; import org.apache.amoro.table.descriptor.TagOrBranchInfo; import org.apache.amoro.utils.CommonUtil; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.paimon.CoreOptions; import org.apache.paimon.FileStore; @@ -359,7 +361,7 @@ public List getTableFiles( @Override public Pair, Integer> getOptimizingProcessesInfo( - AmoroTable amoroTable, int limit, int offset) { + AmoroTable amoroTable, String type, ProcessStatus status, int limit, int offset) { // Temporary solution for Paimon. TODO: Get compaction info from Paimon compaction task List processInfoList = new ArrayList<>(); TableIdentifier tableIdentifier = amoroTable.id(); @@ -367,18 +369,14 @@ public Pair, Integer> getOptimizingProcessesInfo( FileStore store = fileStoreTable.store(); boolean isPrimaryTable = !fileStoreTable.primaryKeys().isEmpty(); int maxLevel = CoreOptions.fromMap(fileStoreTable.options()).numLevels() - 1; - int total; try { List compactSnapshots = Streams.stream(store.snapshotManager().snapshots()) .filter(s -> s.commitKind() == Snapshot.CommitKind.COMPACT) .collect(Collectors.toList()); - total = compactSnapshots.size(); processInfoList = compactSnapshots.stream() .sorted(Comparator.comparing(Snapshot::id).reversed()) - .skip(offset) - .limit(limit) .map( s -> { OptimizingProcessInfo optimizingProcessInfo = new OptimizingProcessInfo(); @@ -438,9 +436,25 @@ public Pair, Integer> getOptimizingProcessesInfo( } catch (IOException e) { throw new RuntimeException(e); } + processInfoList = + processInfoList.stream() + .filter(p -> StringUtils.isBlank(type) || type.equalsIgnoreCase(p.getOptimizingType())) + .filter(p -> status == null || status == p.getStatus()) + .collect(Collectors.toList()); + int total = processInfoList.size(); + processInfoList = + processInfoList.stream().skip(offset).limit(limit).collect(Collectors.toList()); return Pair.of(processInfoList, total); } + @Override + public Map getTableOptimizingTypes(AmoroTable amoroTable) { + Map types = Maps.newHashMap(); + types.put("FULL", "full"); + types.put("MINOR", "MINOR"); + return types; + } + @Override public List getOptimizingTaskInfos( AmoroTable amoroTable, String processId) { diff --git a/amoro-web/mock/modules/table.js b/amoro-web/mock/modules/table.js index a3e61a14d4..fc654349fe 100644 --- a/amoro-web/mock/modules/table.js +++ b/amoro-web/mock/modules/table.js @@ -306,6 +306,19 @@ export default [ } }), }, + { + url: '/mock/ams/v1/tables/catalogs/test_catalog/dbs/db/tables/user/optimizing-types', + method: 'get', + response: () => ({ + "message": "success", + "code": 200, + "result": { + "MINOR": "minor", + "MAJOR": "major", + "FULL": "full", + } + }), + }, { url: '/mock/ams/v1/tables/catalogs/test_catalog/dbs/db/tables/user/operations', method: 'get', diff --git a/amoro-web/src/language/en.ts b/amoro-web/src/language/en.ts index d78bbc6e67..7a8deeabca 100644 --- a/amoro-web/src/language/en.ts +++ b/amoro-web/src/language/en.ts @@ -194,6 +194,7 @@ export default { search: 'Search', reset: 'Reset', invalidInput: 'Invalid input', + createOptimizer: 'Create Optimizer', addgroup: 'Add Group', editgroup: 'Edit Group', cannotDeleteGroupModalTitle: 'Can\'t remove this group.', diff --git a/amoro-web/src/language/zh.ts b/amoro-web/src/language/zh.ts index 036420b786..a794732c5f 100644 --- a/amoro-web/src/language/zh.ts +++ b/amoro-web/src/language/zh.ts @@ -194,6 +194,7 @@ export default { search: '搜索', reset: '重置', invalidInput: '非法输入', + createOptimizer: '创建优化器', addgroup: '添加组', editgroup: '编辑组', cannotDeleteGroupModalTitle: '不能移除这个组', diff --git a/amoro-web/src/services/optimize.service.ts b/amoro-web/src/services/optimize.service.ts index 75846dc01a..cd1c368078 100644 --- a/amoro-web/src/services/optimize.service.ts +++ b/amoro-web/src/services/optimize.service.ts @@ -62,14 +62,24 @@ export function scaleoutResource( return request.post(`ams/v1/optimize/optimizerGroups/${optimizerGroup}/optimizers`, { parallelism }) } +export function createOptimizerResource( + params: { + optimizerGroup: string + parallelism: number + }, +) { + const { optimizerGroup, parallelism } = params + return request.post(`ams/v1/optimize/optimizers`, { optimizerGroup, parallelism }) +} + export function releaseResource( params: { optimizerGroup: string jobId: string }, ) { - const { optimizerGroup, jobId } = params - return request.delete(`ams/v1/optimize/optimizerGroups/${optimizerGroup}/optimizers/${jobId}`) + const { jobId } = params + return request.delete(`ams/v1/optimize/optimizers/${jobId}`) } export async function getResourceGroupsListAPI() { diff --git a/amoro-web/src/services/table.service.ts b/amoro-web/src/services/table.service.ts index 18d7f5b790..03ccc88d1d 100644 --- a/amoro-web/src/services/table.service.ts +++ b/amoro-web/src/services/table.service.ts @@ -156,13 +156,28 @@ export function getOptimizingProcesses( catalog: string db: string table: string + type: string + status: string page: number pageSize: number token?: string }, ) { - const { catalog, db, table, page, pageSize, token } = params - return request.get(`ams/v1/tables/catalogs/${catalog}/dbs/${db}/tables/${table}/optimizing-processes`, { params: { page, pageSize, token } }) + const { catalog, db, table, type, status, page, pageSize, token } = params + return request.get(`ams/v1/tables/catalogs/${catalog}/dbs/${db}/tables/${table}/optimizing-processes`, { params: { page, pageSize, token, type, status } }) +} + +// get optimizing process types +export function getTableOptimizingTypes( + params: { + catalog: string + db: string + table: string + token?: string + }, +) { + const { catalog, db, table, token } = params + return request.get(`ams/v1/tables/catalogs/${catalog}/dbs/${db}/tables/${table}/optimizing-types`, { params: { token } }) } // get optimizing taskes diff --git a/amoro-web/src/views/resource/components/CreateOptimizerModal.vue b/amoro-web/src/views/resource/components/CreateOptimizerModal.vue new file mode 100644 index 0000000000..be668be9bd --- /dev/null +++ b/amoro-web/src/views/resource/components/CreateOptimizerModal.vue @@ -0,0 +1,126 @@ + + + + + + \ No newline at end of file diff --git a/amoro-web/src/views/resource/components/List.vue b/amoro-web/src/views/resource/components/List.vue index 6e9a206b30..3693d97118 100644 --- a/amoro-web/src/views/resource/components/List.vue +++ b/amoro-web/src/views/resource/components/List.vue @@ -25,8 +25,6 @@ import { getOptimizerResourceList, getResourceGroupsListAPI, groupDeleteAPI, gro import { usePagination } from '@/hooks/usePagination' import { dateFormat, mbToSize } from '@/utils' -import ScaleOut from '@/views/resource/components/ScaleOut.vue' - const props = defineProps<{ curGroupName?: string, type: string }>() const emit = defineEmits<{ @@ -249,12 +247,6 @@ onMounted(() => { - diff --git a/amoro-web/src/views/resource/index.vue b/amoro-web/src/views/resource/index.vue index 20b2858fca..24f6c80d52 100644 --- a/amoro-web/src/views/resource/index.vue +++ b/amoro-web/src/views/resource/index.vue @@ -24,6 +24,7 @@ import { shallowReactive, toRefs, watch, + ref, } from 'vue' import { useI18n } from 'vue-i18n' import { useRoute, useRouter } from 'vue-router' @@ -33,6 +34,7 @@ import { usePlaceholder } from '@/hooks/usePlaceholder' import { usePagination } from '@/hooks/usePagination' import type { IIOptimizeGroupItem, ILableAndValue } from '@/types/common.type' import GroupModal from '@/views/resource/components/GroupModal.vue' +import CreateOptimizerModal from '@/views/resource/components/CreateOptimizerModal.vue' export default defineComponent({ name: 'Resource', @@ -40,6 +42,7 @@ export default defineComponent({ List, GroupModal, TableList, + CreateOptimizerModal, }, setup() { const { t } = useI18n() @@ -69,6 +72,9 @@ export default defineComponent({ }, groupKeyCount: 1, showTab: false as boolean, + showCreateOptimizer: false as boolean, + optimizerEdit: false, + optimizerEditRecord: null, }) watch( @@ -92,6 +98,16 @@ export default defineComponent({ state.showGroupModal = true } + const createOptimizer = (editRecord: any | null) => { + if (editRecord) { + state.optimizerEdit = true + state.optimizerEditRecord = { ...editRecord } + } else { + state.optimizerEdit = false + } + state.showCreateOptimizer = true + } + const onChangeTab = (key: string) => { const query = { ...route.query } query.tab = key @@ -102,6 +118,7 @@ export default defineComponent({ state.showTab = true }) + let createOptimizer1 = createOptimizer; return { placeholder, pagination, @@ -109,6 +126,7 @@ export default defineComponent({ tabConfig, onChangeTab, editGroup, + createOptimizer, t, } }, @@ -136,6 +154,9 @@ export default defineComponent({ :tab="t('optimizers')" :class="[activeTab === 'optimizers' ? 'active' : '']" > + + {{ t("createOptimizer") }} + + diff --git a/amoro-web/src/views/tables/components/Optimizing.vue b/amoro-web/src/views/tables/components/Optimizing.vue index f40558f684..17ea8d3ba0 100644 --- a/amoro-web/src/views/tables/components/Optimizing.vue +++ b/amoro-web/src/views/tables/components/Optimizing.vue @@ -22,19 +22,20 @@ import { useI18n } from 'vue-i18n' import { useRoute } from 'vue-router' import { Modal } from 'ant-design-vue' import { usePagination } from '@/hooks/usePagination' -import type { BreadcrumbOptimizingItem, IColumns } from '@/types/common.type' -import { cancelOptimizingProcess, getOptimizingProcesses, getTasksByOptimizingProcessId } from '@/services/table.service' +import type { BreadcrumbOptimizingItem, IColumns, ILableAndValue } from '@/types/common.type' +import { cancelOptimizingProcess, getOptimizingProcesses, getTasksByOptimizingProcessId, getTableOptimizingTypes } from '@/services/table.service' import { bytesToSize, dateFormat, formatMS2Time } from '@/utils/index' const hasBreadcrumb = ref(false) -// const statusMap = { RUNNING: 'RUNNING', CLOSED: 'CLOSED', SUCCESS: 'SUCCESS', FAILED: 'FAILED' } -const STATUS_CONFIG = shallowReactive({ - RUNNING: { title: 'RUNNING', color: '#1890ff' }, +const statusMap = { + PENDING: { title: 'PENDING', color: '#ffcc00'}, + ACTIVE: { title: 'ACTIVE', color: '#1890ff' }, CLOSED: { title: 'CLOSED', color: '#c9cdd4' }, SUCCESS: { title: 'SUCCESS', color: '#0ad787' }, FAILED: { title: 'FAILED', color: '#f5222d' }, -}) +} +const STATUS_CONFIG = shallowReactive(statusMap) const TASK_STATUS_CONFIG = shallowReactive({ PLANNED: { title: 'PLANNED', color: '#ffcc00' }, @@ -85,6 +86,19 @@ const sourceData = reactive({ table: '', ...query, }) +const actionType = ref() +const actionTypeList = ref([]) +const statusType = ref() +const statusTypeList = ref([]) + +async function getQueryDataDictList() { + const tableProcessTypes = await getTableOptimizingTypes({...sourceData}) + const typesList = Object.entries(tableProcessTypes).map(([typeName, displayName]) => ({ label: displayName as string, value: typeName})) + const status = Object.entries(statusMap).map(([key, value]) => ({ label: value.title , value: key })); + + actionTypeList.value = typesList + statusTypeList.value = status +} async function refreshOptimizingProcesses() { try { @@ -92,9 +106,11 @@ async function refreshOptimizingProcesses() { dataSource.length = 0 const result = await getOptimizingProcesses({ ...sourceData, + type: actionType.value || '', + status: statusType.value || '', page: pagination.current, pageSize: pagination.pageSize, - }) + } as any) const { list, total = 0 } = result pagination.total = total dataSource.push(...[...list || []].map((item) => { @@ -214,12 +230,23 @@ function toggleBreadcrumb(rowProcessId: number, status: string) { onMounted(() => { hasBreadcrumb.value = false refresh() + getQueryDataDictList() })