diff --git a/src/main/java/org/opensearch/ad/AnomalyDetectorJobRunner.java b/src/main/java/org/opensearch/ad/AnomalyDetectorJobRunner.java index 59e27330a..ceb39914e 100644 --- a/src/main/java/org/opensearch/ad/AnomalyDetectorJobRunner.java +++ b/src/main/java/org/opensearch/ad/AnomalyDetectorJobRunner.java @@ -112,7 +112,7 @@ public void setThreadPool(ThreadPool threadPool) { public void setSettings(Settings settings) { this.settings = settings; - this.maxRetryForEndRunException = AnomalyDetectorSettings.MAX_RETRY_FOR_END_RUN_EXCEPTION.get(settings); + this.maxRetryForEndRunException = AnomalyDetectorSettings.AD_MAX_RETRY_FOR_END_RUN_EXCEPTION.get(settings); } public void setAdTaskManager(ADTaskManager adTaskManager) { diff --git a/src/main/java/org/opensearch/ad/AnomalyDetectorProfileRunner.java b/src/main/java/org/opensearch/ad/AnomalyDetectorProfileRunner.java index 2fece82e4..0fb2fe7fb 100644 --- a/src/main/java/org/opensearch/ad/AnomalyDetectorProfileRunner.java +++ b/src/main/java/org/opensearch/ad/AnomalyDetectorProfileRunner.java @@ -39,7 +39,6 @@ import org.opensearch.ad.model.DetectorState; import org.opensearch.ad.model.InitProgressProfile; import org.opensearch.ad.settings.ADNumericSetting; -import org.opensearch.ad.settings.AnomalyDetectorSettings; import org.opensearch.ad.task.ADTaskManager; import org.opensearch.ad.transport.ProfileAction; import org.opensearch.ad.transport.ProfileRequest; @@ -70,6 +69,7 @@ import org.opensearch.timeseries.constant.CommonName; import org.opensearch.timeseries.model.IntervalTimeConfiguration; import org.opensearch.timeseries.model.Job; +import org.opensearch.timeseries.settings.TimeSeriesSettings; import org.opensearch.timeseries.util.DiscoveryNodeFilterer; import org.opensearch.timeseries.util.ExceptionUtil; import org.opensearch.timeseries.util.MultiResponsesDelegateActionListener; @@ -105,7 +105,7 @@ public AnomalyDetectorProfileRunner( } this.transportService = transportService; this.adTaskManager = adTaskManager; - this.maxTotalEntitiesToTrack = AnomalyDetectorSettings.MAX_TOTAL_ENTITIES_TO_TRACK; + this.maxTotalEntitiesToTrack = TimeSeriesSettings.MAX_TOTAL_ENTITIES_TO_TRACK; } public void profile(String detectorId, ActionListener listener, Set profilesToCollect) { diff --git a/src/main/java/org/opensearch/ad/caching/CacheBuffer.java b/src/main/java/org/opensearch/ad/caching/CacheBuffer.java index 234a72932..fb48fd273 100644 --- a/src/main/java/org/opensearch/ad/caching/CacheBuffer.java +++ b/src/main/java/org/opensearch/ad/caching/CacheBuffer.java @@ -25,8 +25,6 @@ import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.ad.MemoryTracker; -import org.opensearch.ad.MemoryTracker.Origin; import org.opensearch.ad.ml.EntityModel; import org.opensearch.ad.ml.ModelState; import org.opensearch.ad.model.InitProgressProfile; @@ -36,6 +34,8 @@ import org.opensearch.ad.ratelimit.RequestPriority; import org.opensearch.ad.util.DateUtils; import org.opensearch.timeseries.ExpiringState; +import org.opensearch.timeseries.MemoryTracker; +import org.opensearch.timeseries.MemoryTracker.Origin; /** * We use a layered cache to manage active entities’ states. We have a two-level @@ -159,7 +159,7 @@ private void put(String entityModelId, ModelState value, float prio // Since we have already considered them while allocating CacheBuffer, // skip bookkeeping. if (!sharedCacheEmpty()) { - memoryTracker.consumeMemory(memoryConsumptionPerEntity, false, Origin.HC_DETECTOR); + memoryTracker.consumeMemory(memoryConsumptionPerEntity, false, Origin.REAL_TIME_DETECTOR); } } else { update(entityModelId); @@ -267,7 +267,7 @@ public ModelState remove(String keyToRemove, boolean saveCheckpoint if (valueRemoved != null) { if (!reserved) { // release in shared memory - memoryTracker.releaseMemory(memoryConsumptionPerEntity, false, Origin.HC_DETECTOR); + memoryTracker.releaseMemory(memoryConsumptionPerEntity, false, Origin.REAL_TIME_DETECTOR); } EntityModel modelRemoved = valueRemoved.getModel(); @@ -460,9 +460,9 @@ public void clear() { // not a problem as we are releasing memory in MemoryTracker. // The newly added one loses references and soon GC will collect it. // We have memory tracking correction to fix incorrect memory usage record. - memoryTracker.releaseMemory(getReservedBytes(), true, Origin.HC_DETECTOR); + memoryTracker.releaseMemory(getReservedBytes(), true, Origin.REAL_TIME_DETECTOR); if (!sharedCacheEmpty()) { - memoryTracker.releaseMemory(getBytesInSharedCache(), false, Origin.HC_DETECTOR); + memoryTracker.releaseMemory(getBytesInSharedCache(), false, Origin.REAL_TIME_DETECTOR); } items.clear(); priorityTracker.clearPriority(); diff --git a/src/main/java/org/opensearch/ad/caching/PriorityCache.java b/src/main/java/org/opensearch/ad/caching/PriorityCache.java index 701a7ba13..175d77e64 100644 --- a/src/main/java/org/opensearch/ad/caching/PriorityCache.java +++ b/src/main/java/org/opensearch/ad/caching/PriorityCache.java @@ -11,8 +11,8 @@ package org.opensearch.ad.caching; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.DEDICATED_CACHE_SIZE; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_DEDICATED_CACHE_SIZE; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE; import java.time.Clock; import java.time.Duration; @@ -38,8 +38,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.ad.MemoryTracker; -import org.opensearch.ad.MemoryTracker.Origin; import org.opensearch.ad.ml.CheckpointDao; import org.opensearch.ad.ml.EntityModel; import org.opensearch.ad.ml.ModelManager.ModelType; @@ -57,6 +55,8 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.timeseries.MemoryTracker; +import org.opensearch.timeseries.MemoryTracker.Origin; import org.opensearch.timeseries.TimeSeriesAnalyticsPlugin; import org.opensearch.timeseries.common.exception.LimitExceededException; import org.opensearch.timeseries.common.exception.TimeSeriesException; @@ -116,12 +116,12 @@ public PriorityCache( this.activeEnities = new ConcurrentHashMap<>(); this.dedicatedCacheSize = dedicatedCacheSize; - clusterService.getClusterSettings().addSettingsUpdateConsumer(DEDICATED_CACHE_SIZE, (it) -> { + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_DEDICATED_CACHE_SIZE, (it) -> { this.dedicatedCacheSize = it; this.setDedicatedCacheSizeListener(); this.tryClearUpMemory(); }, this::validateDedicatedCacheSize); - clusterService.getClusterSettings().addSettingsUpdateConsumer(MODEL_MAX_SIZE_PERCENTAGE, it -> this.tryClearUpMemory()); + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_MODEL_MAX_SIZE_PERCENTAGE, it -> this.tryClearUpMemory()); this.memoryTracker = memoryTracker; this.maintenanceLock = new ReentrantLock(); @@ -461,7 +461,7 @@ private CacheBuffer computeBufferIfAbsent(AnomalyDetector detector, String detec if (buffer == null) { long requiredBytes = getRequiredMemory(detector, dedicatedCacheSize); if (memoryTracker.canAllocateReserved(requiredBytes)) { - memoryTracker.consumeMemory(requiredBytes, true, Origin.HC_DETECTOR); + memoryTracker.consumeMemory(requiredBytes, true, Origin.REAL_TIME_DETECTOR); long intervalSecs = detector.getIntervalInSeconds(); buffer = new CacheBuffer( @@ -621,7 +621,7 @@ private void recalculateUsedMemory() { reserved += buffer.getReservedBytes(); shared += buffer.getBytesInSharedCache(); } - memoryTracker.syncMemoryState(Origin.HC_DETECTOR, reserved + shared, reserved); + memoryTracker.syncMemoryState(Origin.REAL_TIME_DETECTOR, reserved + shared, reserved); } /** diff --git a/src/main/java/org/opensearch/ad/ml/ModelManager.java b/src/main/java/org/opensearch/ad/ml/ModelManager.java index f77eecb16..5cc2949a5 100644 --- a/src/main/java/org/opensearch/ad/ml/ModelManager.java +++ b/src/main/java/org/opensearch/ad/ml/ModelManager.java @@ -33,17 +33,16 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.ad.DetectorModelSize; -import org.opensearch.ad.MemoryTracker; import org.opensearch.ad.constant.ADCommonMessages; import org.opensearch.ad.feature.FeatureManager; import org.opensearch.ad.model.AnomalyDetector; -import org.opensearch.ad.settings.AnomalyDetectorSettings; import org.opensearch.ad.util.DateUtils; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; +import org.opensearch.timeseries.MemoryTracker; import org.opensearch.timeseries.common.exception.ResourceNotFoundException; import org.opensearch.timeseries.ml.SingleStreamModelIdMapper; import org.opensearch.timeseries.model.Entity; @@ -624,7 +623,7 @@ public List getPreviewResults(double[][] dataPoints, int shi .parallelExecutionEnabled(false) .compact(true) .precision(Precision.FLOAT_32) - .boundingBoxCacheFraction(AnomalyDetectorSettings.BATCH_BOUNDING_BOX_CACHE_RATIO) + .boundingBoxCacheFraction(TimeSeriesSettings.BATCH_BOUNDING_BOX_CACHE_RATIO) .shingleSize(shingleSize) .anomalyRate(1 - this.thresholdMinPvalue) .transformMethod(TransformMethod.NORMALIZE) diff --git a/src/main/java/org/opensearch/ad/ml/TRCFMemoryAwareConcurrentHashmap.java b/src/main/java/org/opensearch/ad/ml/TRCFMemoryAwareConcurrentHashmap.java index 7b7b1fe7d..2380173b0 100644 --- a/src/main/java/org/opensearch/ad/ml/TRCFMemoryAwareConcurrentHashmap.java +++ b/src/main/java/org/opensearch/ad/ml/TRCFMemoryAwareConcurrentHashmap.java @@ -13,8 +13,8 @@ import java.util.concurrent.ConcurrentHashMap; -import org.opensearch.ad.MemoryTracker; -import org.opensearch.ad.MemoryTracker.Origin; +import org.opensearch.timeseries.MemoryTracker; +import org.opensearch.timeseries.MemoryTracker.Origin; import com.amazon.randomcutforest.parkservices.ThresholdedRandomCutForest; @@ -37,7 +37,7 @@ public ModelState remove(Object key) { ModelState deletedModelState = super.remove(key); if (deletedModelState != null && deletedModelState.getModel() != null) { long memoryToRelease = memoryTracker.estimateTRCFModelSize(deletedModelState.getModel()); - memoryTracker.releaseMemory(memoryToRelease, true, Origin.SINGLE_ENTITY_DETECTOR); + memoryTracker.releaseMemory(memoryToRelease, true, Origin.REAL_TIME_DETECTOR); } return deletedModelState; } @@ -47,7 +47,7 @@ public ModelState put(K key, ModelState previousAssociatedState = super.put(key, value); if (value != null && value.getModel() != null) { long memoryToConsume = memoryTracker.estimateTRCFModelSize(value.getModel()); - memoryTracker.consumeMemory(memoryToConsume, true, Origin.SINGLE_ENTITY_DETECTOR); + memoryTracker.consumeMemory(memoryToConsume, true, Origin.REAL_TIME_DETECTOR); } return previousAssociatedState; } diff --git a/src/main/java/org/opensearch/ad/ratelimit/BatchWorker.java b/src/main/java/org/opensearch/ad/ratelimit/BatchWorker.java index f15296dfb..7ba8b4383 100644 --- a/src/main/java/org/opensearch/ad/ratelimit/BatchWorker.java +++ b/src/main/java/org/opensearch/ad/ratelimit/BatchWorker.java @@ -19,7 +19,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.support.ThreadedActionListener; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -27,6 +26,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.timeseries.NodeStateManager; import org.opensearch.timeseries.TimeSeriesAnalyticsPlugin; +import org.opensearch.timeseries.breaker.CircuitBreakerService; /** * @@ -46,7 +46,7 @@ public BatchWorker( Setting maxHeapPercentForQueueSetting, ClusterService clusterService, Random random, - ADCircuitBreakerService adCircuitBreakerService, + CircuitBreakerService adCircuitBreakerService, ThreadPool threadPool, Settings settings, float maxQueuedTaskRatio, diff --git a/src/main/java/org/opensearch/ad/ratelimit/CheckpointMaintainWorker.java b/src/main/java/org/opensearch/ad/ratelimit/CheckpointMaintainWorker.java index 8f9a543f7..05f9480a7 100644 --- a/src/main/java/org/opensearch/ad/ratelimit/CheckpointMaintainWorker.java +++ b/src/main/java/org/opensearch/ad/ratelimit/CheckpointMaintainWorker.java @@ -23,13 +23,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.settings.AnomalyDetectorSettings; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.threadpool.ThreadPool; import org.opensearch.timeseries.NodeStateManager; +import org.opensearch.timeseries.breaker.CircuitBreakerService; public class CheckpointMaintainWorker extends ScheduledWorker { private static final Logger LOG = LogManager.getLogger(CheckpointMaintainWorker.class); @@ -43,7 +43,7 @@ public CheckpointMaintainWorker( Setting maxHeapPercentForQueueSetting, ClusterService clusterService, Random random, - ADCircuitBreakerService adCircuitBreakerService, + CircuitBreakerService adCircuitBreakerService, ThreadPool threadPool, Settings settings, float maxQueuedTaskRatio, diff --git a/src/main/java/org/opensearch/ad/ratelimit/CheckpointReadWorker.java b/src/main/java/org/opensearch/ad/ratelimit/CheckpointReadWorker.java index 448604285..d4f1f99af 100644 --- a/src/main/java/org/opensearch/ad/ratelimit/CheckpointReadWorker.java +++ b/src/main/java/org/opensearch/ad/ratelimit/CheckpointReadWorker.java @@ -32,7 +32,6 @@ import org.opensearch.action.get.MultiGetItemResponse; import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.get.MultiGetResponse; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.caching.CacheProvider; import org.opensearch.ad.constant.ADCommonName; import org.opensearch.ad.indices.ADIndex; @@ -53,6 +52,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.timeseries.AnalysisType; import org.opensearch.timeseries.NodeStateManager; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.EndRunException; import org.opensearch.timeseries.constant.CommonMessages; import org.opensearch.timeseries.model.Config; @@ -91,7 +91,7 @@ public CheckpointReadWorker( Setting maxHeapPercentForQueueSetting, ClusterService clusterService, Random random, - ADCircuitBreakerService adCircuitBreakerService, + CircuitBreakerService adCircuitBreakerService, ThreadPool threadPool, Settings settings, float maxQueuedTaskRatio, diff --git a/src/main/java/org/opensearch/ad/ratelimit/CheckpointWriteWorker.java b/src/main/java/org/opensearch/ad/ratelimit/CheckpointWriteWorker.java index 6b294d7a0..a26cb8b94 100644 --- a/src/main/java/org/opensearch/ad/ratelimit/CheckpointWriteWorker.java +++ b/src/main/java/org/opensearch/ad/ratelimit/CheckpointWriteWorker.java @@ -30,7 +30,6 @@ import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkResponse; import org.opensearch.action.update.UpdateRequest; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.ml.CheckpointDao; import org.opensearch.ad.ml.EntityModel; import org.opensearch.ad.ml.ModelState; @@ -43,6 +42,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.timeseries.AnalysisType; import org.opensearch.timeseries.NodeStateManager; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.model.Config; import org.opensearch.timeseries.util.ExceptionUtil; @@ -60,7 +60,7 @@ public CheckpointWriteWorker( Setting maxHeapPercentForQueueSetting, ClusterService clusterService, Random random, - ADCircuitBreakerService adCircuitBreakerService, + CircuitBreakerService adCircuitBreakerService, ThreadPool threadPool, Settings settings, float maxQueuedTaskRatio, diff --git a/src/main/java/org/opensearch/ad/ratelimit/ColdEntityWorker.java b/src/main/java/org/opensearch/ad/ratelimit/ColdEntityWorker.java index 1c114217f..701fc25d4 100644 --- a/src/main/java/org/opensearch/ad/ratelimit/ColdEntityWorker.java +++ b/src/main/java/org/opensearch/ad/ratelimit/ColdEntityWorker.java @@ -12,7 +12,7 @@ package org.opensearch.ad.ratelimit; import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_BATCH_SIZE; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS; import java.time.Clock; import java.time.Duration; @@ -20,13 +20,13 @@ import java.util.Random; import java.util.stream.Collectors; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.settings.AnomalyDetectorSettings; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.threadpool.ThreadPool; import org.opensearch.timeseries.NodeStateManager; +import org.opensearch.timeseries.breaker.CircuitBreakerService; /** * A queue slowly releasing low-priority requests to CheckpointReadQueue @@ -52,7 +52,7 @@ public ColdEntityWorker( Setting maxHeapPercentForQueueSetting, ClusterService clusterService, Random random, - ADCircuitBreakerService adCircuitBreakerService, + CircuitBreakerService adCircuitBreakerService, ThreadPool threadPool, Settings settings, float maxQueuedTaskRatio, @@ -87,12 +87,12 @@ public ColdEntityWorker( this.batchSize = AD_CHECKPOINT_READ_QUEUE_BATCH_SIZE.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_CHECKPOINT_READ_QUEUE_BATCH_SIZE, it -> this.batchSize = it); - this.expectedExecutionTimeInMilliSecsPerRequest = AnomalyDetectorSettings.EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS + this.expectedExecutionTimeInMilliSecsPerRequest = AnomalyDetectorSettings.AD_EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS .get(settings); clusterService .getClusterSettings() .addSettingsUpdateConsumer( - EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS, + AD_EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS, it -> this.expectedExecutionTimeInMilliSecsPerRequest = it ); } diff --git a/src/main/java/org/opensearch/ad/ratelimit/ConcurrentWorker.java b/src/main/java/org/opensearch/ad/ratelimit/ConcurrentWorker.java index f34944391..3df70c935 100644 --- a/src/main/java/org/opensearch/ad/ratelimit/ConcurrentWorker.java +++ b/src/main/java/org/opensearch/ad/ratelimit/ConcurrentWorker.java @@ -19,13 +19,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.threadpool.ThreadPool; import org.opensearch.timeseries.NodeStateManager; import org.opensearch.timeseries.TimeSeriesAnalyticsPlugin; +import org.opensearch.timeseries.breaker.CircuitBreakerService; /** * A queue to run concurrent requests (either batch or single request). @@ -74,7 +74,7 @@ public ConcurrentWorker( Setting maxHeapPercentForQueueSetting, ClusterService clusterService, Random random, - ADCircuitBreakerService adCircuitBreakerService, + CircuitBreakerService adCircuitBreakerService, ThreadPool threadPool, Settings settings, float maxQueuedTaskRatio, diff --git a/src/main/java/org/opensearch/ad/ratelimit/EntityColdStartWorker.java b/src/main/java/org/opensearch/ad/ratelimit/EntityColdStartWorker.java index 3d3b60328..72011e156 100644 --- a/src/main/java/org/opensearch/ad/ratelimit/EntityColdStartWorker.java +++ b/src/main/java/org/opensearch/ad/ratelimit/EntityColdStartWorker.java @@ -11,7 +11,7 @@ package org.opensearch.ad.ratelimit; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.ENTITY_COLD_START_QUEUE_CONCURRENCY; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_ENTITY_COLD_START_QUEUE_CONCURRENCY; import java.time.Clock; import java.time.Duration; @@ -23,7 +23,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.caching.CacheProvider; import org.opensearch.ad.ml.EntityColdStarter; import org.opensearch.ad.ml.EntityModel; @@ -37,6 +36,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.timeseries.AnalysisType; import org.opensearch.timeseries.NodeStateManager; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.util.ExceptionUtil; /** @@ -61,7 +61,7 @@ public EntityColdStartWorker( Setting maxHeapPercentForQueueSetting, ClusterService clusterService, Random random, - ADCircuitBreakerService adCircuitBreakerService, + CircuitBreakerService adCircuitBreakerService, ThreadPool threadPool, Settings settings, float maxQueuedTaskRatio, @@ -90,7 +90,7 @@ public EntityColdStartWorker( mediumSegmentPruneRatio, lowSegmentPruneRatio, maintenanceFreqConstant, - ENTITY_COLD_START_QUEUE_CONCURRENCY, + AD_ENTITY_COLD_START_QUEUE_CONCURRENCY, executionTtl, stateTtl, nodeStateManager diff --git a/src/main/java/org/opensearch/ad/ratelimit/RateLimitedRequestWorker.java b/src/main/java/org/opensearch/ad/ratelimit/RateLimitedRequestWorker.java index 15af1fc6e..911ae43a5 100644 --- a/src/main/java/org/opensearch/ad/ratelimit/RateLimitedRequestWorker.java +++ b/src/main/java/org/opensearch/ad/ratelimit/RateLimitedRequestWorker.java @@ -33,7 +33,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -44,6 +43,7 @@ import org.opensearch.timeseries.MaintenanceState; import org.opensearch.timeseries.NodeStateManager; import org.opensearch.timeseries.TimeSeriesAnalyticsPlugin; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.TimeSeriesException; /** @@ -175,7 +175,7 @@ public int clearExpiredRequests() { protected final ConcurrentSkipListMap requestQueues; private String lastSelectedRequestQueueId; protected Random random; - private ADCircuitBreakerService adCircuitBreakerService; + private CircuitBreakerService adCircuitBreakerService; protected ThreadPool threadPool; protected Instant cooldownStart; protected int coolDownMinutes; @@ -194,7 +194,7 @@ public RateLimitedRequestWorker( Setting maxHeapPercentForQueueSetting, ClusterService clusterService, Random random, - ADCircuitBreakerService adCircuitBreakerService, + CircuitBreakerService adCircuitBreakerService, ThreadPool threadPool, Settings settings, float maxQueuedTaskRatio, diff --git a/src/main/java/org/opensearch/ad/ratelimit/ResultWriteWorker.java b/src/main/java/org/opensearch/ad/ratelimit/ResultWriteWorker.java index 5c2ffd233..02152b086 100644 --- a/src/main/java/org/opensearch/ad/ratelimit/ResultWriteWorker.java +++ b/src/main/java/org/opensearch/ad/ratelimit/ResultWriteWorker.java @@ -25,7 +25,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.index.IndexRequest; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.model.AnomalyDetector; import org.opensearch.ad.model.AnomalyResult; import org.opensearch.ad.transport.ADResultBulkRequest; @@ -44,6 +43,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.timeseries.AnalysisType; import org.opensearch.timeseries.NodeStateManager; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.model.Config; import org.opensearch.timeseries.util.ExceptionUtil; @@ -60,7 +60,7 @@ public ResultWriteWorker( Setting maxHeapPercentForQueueSetting, ClusterService clusterService, Random random, - ADCircuitBreakerService adCircuitBreakerService, + CircuitBreakerService adCircuitBreakerService, ThreadPool threadPool, Settings settings, float maxQueuedTaskRatio, diff --git a/src/main/java/org/opensearch/ad/ratelimit/ScheduledWorker.java b/src/main/java/org/opensearch/ad/ratelimit/ScheduledWorker.java index 62edc017f..115d79882 100644 --- a/src/main/java/org/opensearch/ad/ratelimit/ScheduledWorker.java +++ b/src/main/java/org/opensearch/ad/ratelimit/ScheduledWorker.java @@ -18,7 +18,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -26,6 +25,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.timeseries.NodeStateManager; import org.opensearch.timeseries.TimeSeriesAnalyticsPlugin; +import org.opensearch.timeseries.breaker.CircuitBreakerService; public abstract class ScheduledWorker extends RateLimitedRequestWorker { @@ -45,7 +45,7 @@ public ScheduledWorker( Setting maxHeapPercentForQueueSetting, ClusterService clusterService, Random random, - ADCircuitBreakerService adCircuitBreakerService, + CircuitBreakerService adCircuitBreakerService, ThreadPool threadPool, Settings settings, float maxQueuedTaskRatio, diff --git a/src/main/java/org/opensearch/ad/ratelimit/SingleRequestWorker.java b/src/main/java/org/opensearch/ad/ratelimit/SingleRequestWorker.java index 9820bc20b..e789e36fa 100644 --- a/src/main/java/org/opensearch/ad/ratelimit/SingleRequestWorker.java +++ b/src/main/java/org/opensearch/ad/ratelimit/SingleRequestWorker.java @@ -19,13 +19,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.timeseries.NodeStateManager; +import org.opensearch.timeseries.breaker.CircuitBreakerService; public abstract class SingleRequestWorker extends ConcurrentWorker { private static final Logger LOG = LogManager.getLogger(SingleRequestWorker.class); @@ -37,7 +37,7 @@ public SingleRequestWorker( Setting maxHeapPercentForQueueSetting, ClusterService clusterService, Random random, - ADCircuitBreakerService adCircuitBreakerService, + CircuitBreakerService adCircuitBreakerService, ThreadPool threadPool, Settings settings, float maxQueuedTaskRatio, diff --git a/src/main/java/org/opensearch/ad/rest/AbstractAnomalyDetectorAction.java b/src/main/java/org/opensearch/ad/rest/AbstractAnomalyDetectorAction.java index 4d06e5ab2..ee0d410f5 100644 --- a/src/main/java/org/opensearch/ad/rest/AbstractAnomalyDetectorAction.java +++ b/src/main/java/org/opensearch/ad/rest/AbstractAnomalyDetectorAction.java @@ -11,12 +11,12 @@ package org.opensearch.ad.rest; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_MAX_HC_ANOMALY_DETECTORS; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_MAX_SINGLE_ENTITY_ANOMALY_DETECTORS; import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_REQUEST_TIMEOUT; import static org.opensearch.ad.settings.AnomalyDetectorSettings.DETECTION_INTERVAL; import static org.opensearch.ad.settings.AnomalyDetectorSettings.DETECTION_WINDOW_DELAY; import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_ANOMALY_FEATURES; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_MULTI_ENTITY_ANOMALY_DETECTORS; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_SINGLE_ENTITY_ANOMALY_DETECTORS; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; @@ -36,8 +36,8 @@ public AbstractAnomalyDetectorAction(Settings settings, ClusterService clusterSe this.requestTimeout = AD_REQUEST_TIMEOUT.get(settings); this.detectionInterval = DETECTION_INTERVAL.get(settings); this.detectionWindowDelay = DETECTION_WINDOW_DELAY.get(settings); - this.maxSingleEntityDetectors = MAX_SINGLE_ENTITY_ANOMALY_DETECTORS.get(settings); - this.maxMultiEntityDetectors = MAX_MULTI_ENTITY_ANOMALY_DETECTORS.get(settings); + this.maxSingleEntityDetectors = AD_MAX_SINGLE_ENTITY_ANOMALY_DETECTORS.get(settings); + this.maxMultiEntityDetectors = AD_MAX_HC_ANOMALY_DETECTORS.get(settings); this.maxAnomalyFeatures = MAX_ANOMALY_FEATURES.get(settings); // TODO: will add more cluster setting consumer later // TODO: inject ClusterSettings only if clusterService is only used to get ClusterSettings @@ -46,10 +46,8 @@ public AbstractAnomalyDetectorAction(Settings settings, ClusterService clusterSe clusterService.getClusterSettings().addSettingsUpdateConsumer(DETECTION_WINDOW_DELAY, it -> detectionWindowDelay = it); clusterService .getClusterSettings() - .addSettingsUpdateConsumer(MAX_SINGLE_ENTITY_ANOMALY_DETECTORS, it -> maxSingleEntityDetectors = it); - clusterService - .getClusterSettings() - .addSettingsUpdateConsumer(MAX_MULTI_ENTITY_ANOMALY_DETECTORS, it -> maxMultiEntityDetectors = it); + .addSettingsUpdateConsumer(AD_MAX_SINGLE_ENTITY_ANOMALY_DETECTORS, it -> maxSingleEntityDetectors = it); + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_MAX_HC_ANOMALY_DETECTORS, it -> maxMultiEntityDetectors = it); clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_ANOMALY_FEATURES, it -> maxAnomalyFeatures = it); } } diff --git a/src/main/java/org/opensearch/ad/settings/AnomalyDetectorSettings.java b/src/main/java/org/opensearch/ad/settings/AnomalyDetectorSettings.java index 4b98f0b6e..b5f10b383 100644 --- a/src/main/java/org/opensearch/ad/settings/AnomalyDetectorSettings.java +++ b/src/main/java/org/opensearch/ad/settings/AnomalyDetectorSettings.java @@ -11,8 +11,6 @@ package org.opensearch.ad.settings; -import java.time.Duration; - import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; import org.opensearch.timeseries.settings.TimeSeriesSettings; @@ -25,7 +23,7 @@ public final class AnomalyDetectorSettings { private AnomalyDetectorSettings() {} public static final int MAX_DETECTOR_UPPER_LIMIT = 10000; - public static final Setting MAX_SINGLE_ENTITY_ANOMALY_DETECTORS = Setting + public static final Setting AD_MAX_SINGLE_ENTITY_ANOMALY_DETECTORS = Setting .intSetting( "plugins.anomaly_detection.max_anomaly_detectors", LegacyOpenDistroAnomalyDetectorSettings.MAX_SINGLE_ENTITY_ANOMALY_DETECTORS, @@ -35,7 +33,7 @@ private AnomalyDetectorSettings() {} Setting.Property.Dynamic ); - public static final Setting MAX_MULTI_ENTITY_ANOMALY_DETECTORS = Setting + public static final Setting AD_MAX_HC_ANOMALY_DETECTORS = Setting .intSetting( "plugins.anomaly_detection.max_multi_entity_anomaly_detectors", LegacyOpenDistroAnomalyDetectorSettings.MAX_MULTI_ENTITY_ANOMALY_DETECTORS, @@ -114,7 +112,11 @@ private AnomalyDetectorSettings() {} Setting.Property.Dynamic ); - // Use TimeSeriesSettings.MAX_RETRY_FOR_UNRESPONSIVE_NODE + /** + * @deprecated This setting is deprecated because we need to manage fault tolerance for + * multiple analysis such as AD and forecasting. + * Use TimeSeriesSettings.MAX_RETRY_FOR_UNRESPONSIVE_NODE instead. + */ @Deprecated public static final Setting AD_MAX_RETRY_FOR_UNRESPONSIVE_NODE = Setting .intSetting( @@ -125,6 +127,12 @@ private AnomalyDetectorSettings() {} Setting.Property.Dynamic ); + /** + * @deprecated This setting is deprecated because we need to manage fault tolerance for + * multiple analysis such as AD and forecasting. + * Use TimeSeriesSettings.COOLDOWN_MINUTES instead. + */ + @Deprecated public static final Setting AD_COOLDOWN_MINUTES = Setting .positiveTimeSetting( "plugins.anomaly_detection.cooldown_minutes", @@ -133,7 +141,11 @@ private AnomalyDetectorSettings() {} Setting.Property.Dynamic ); - // Use TimeSeriesSettings.BACKOFF_MINUTES + /** + * @deprecated This setting is deprecated because we need to manage fault tolerance for + * multiple analysis such as AD and forecasting. + * Use TimeSeriesSettings.BACKOFF_MINUTES instead. + */ @Deprecated public static final Setting AD_BACKOFF_MINUTES = Setting .positiveTimeSetting( @@ -160,7 +172,7 @@ private AnomalyDetectorSettings() {} Setting.Property.Dynamic ); - public static final Setting MAX_RETRY_FOR_END_RUN_EXCEPTION = Setting + public static final Setting AD_MAX_RETRY_FOR_END_RUN_EXCEPTION = Setting .intSetting( "plugins.anomaly_detection.max_retry_for_end_run_exception", LegacyOpenDistroAnomalyDetectorSettings.MAX_RETRY_FOR_END_RUN_EXCEPTION, @@ -169,10 +181,10 @@ private AnomalyDetectorSettings() {} Setting.Property.Dynamic ); - public static final Setting FILTER_BY_BACKEND_ROLES = Setting + public static final Setting AD_FILTER_BY_BACKEND_ROLES = Setting .boolSetting( "plugins.anomaly_detection.filter_by_backend_roles", - LegacyOpenDistroAnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES, + LegacyOpenDistroAnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES, Setting.Property.NodeScope, Setting.Property.Dynamic ); @@ -181,14 +193,12 @@ private AnomalyDetectorSettings() {} public static final String ANOMALY_DETECTION_STATE_INDEX_MAPPING_FILE = "mappings/anomaly-detection-state.json"; public static final String CHECKPOINT_INDEX_MAPPING_FILE = "mappings/anomaly-checkpoint.json"; - public static final Duration HOURLY_MAINTENANCE = Duration.ofHours(1); - // saving checkpoint every 12 hours. // To support 1 million entities in 36 data nodes, each node has roughly 28K models. // In each hour, we roughly need to save 2400 models. Since each model saving can - // take about 1 seconds (default value of AnomalyDetectorSettings.EXPECTED_CHECKPOINT_MAINTAIN_TIME_IN_SECS) + // take about 1 seconds (default value of AD_EXPECTED_CHECKPOINT_MAINTAIN_TIME_IN_MILLISECS) // we can use up to 2400 seconds to finish saving checkpoints. - public static final Setting CHECKPOINT_SAVING_FREQ = Setting + public static final Setting AD_CHECKPOINT_SAVING_FREQ = Setting .positiveTimeSetting( "plugins.anomaly_detection.checkpoint_saving_freq", TimeValue.timeValueHours(12), @@ -196,7 +206,7 @@ private AnomalyDetectorSettings() {} Setting.Property.Dynamic ); - public static final Setting CHECKPOINT_TTL = Setting + public static final Setting AD_CHECKPOINT_TTL = Setting .positiveTimeSetting( "plugins.anomaly_detection.checkpoint_ttl", TimeValue.timeValueDays(7), @@ -207,52 +217,16 @@ private AnomalyDetectorSettings() {} // ====================================== // ML parameters // ====================================== - // RCF - public static final int NUM_SAMPLES_PER_TREE = 256; - - public static final int NUM_TREES = 30; - - public static final int TRAINING_SAMPLE_INTERVAL = 64; - - public static final double TIME_DECAY = 0.0001; - - // If we have 32 + shingleSize (hopefully recent) values, RCF can get up and running. It will be noisy — - // there is a reason that default size is 256 (+ shingle size), but it may be more useful for people to - /// start seeing some results. - public static final int NUM_MIN_SAMPLES = 32; - - // The threshold for splitting RCF models in single-stream detectors. - // The smallest machine in the Amazon managed service has 1GB heap. - // With the setting, the desired model size there is of 2 MB. - // By default, we can have at most 5 features. Since the default shingle size - // is 8, we have at most 40 dimensions in RCF. In our current RCF setting, - // 30 trees, and bounding box cache ratio 0, 40 dimensions use 449KB. - // Users can increase the number of features to 10 and shingle size to 60, - // 30 trees, bounding box cache ratio 0, 600 dimensions use 1.8 MB. - // Since these sizes are smaller than the threshold 2 MB, we won't split models - // even in the smallest machine. - public static final double DESIRED_MODEL_SIZE_PERCENTAGE = 0.002; - - public static final Setting MODEL_MAX_SIZE_PERCENTAGE = Setting + public static final Setting AD_MODEL_MAX_SIZE_PERCENTAGE = Setting .doubleSetting( "plugins.anomaly_detection.model_max_size_percent", LegacyOpenDistroAnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE, 0, - 0.7, + 0.9, Setting.Property.NodeScope, Setting.Property.Dynamic ); - // for a batch operation, we want all of the bounding box in-place for speed - public static final double BATCH_BOUNDING_BOX_CACHE_RATIO = 1; - - // Thresholding - public static final double THRESHOLD_MIN_PVALUE = 0.995; - - public static final double THRESHOLD_MAX_RANK_ERROR = 0.0001; - - public static final double THRESHOLD_MAX_SCORE = 8; - public static final int THRESHOLD_NUM_LOGNORMAL_QUANTILES = 400; public static final int THRESHOLD_DOWNSAMPLES = 5_000; @@ -273,9 +247,6 @@ private AnomalyDetectorSettings() {} // shingling public static final double MAX_SHINGLE_PROPORTION_MISSING = 0.25; - // AD JOB - public static final long DEFAULT_AD_JOB_LOC_DURATION_SECONDS = 60; - // Thread pool public static final int AD_THEAD_POOL_QUEUE_SIZE = 1000; @@ -297,7 +268,7 @@ private AnomalyDetectorSettings() {} * Other detectors cannot use space reserved by a detector's dedicated cache. * DEDICATED_CACHE_SIZE is a setting to make dedicated cache's size flexible. * When that setting is changed, if the size decreases, we will release memory - * if required (e.g., when a user also decreased AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE, + * if required (e.g., when a user also decreased AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE, * the max memory percentage that AD can use); * if the size increases, we may reject the setting change if we cannot fulfill * that request (e.g., when it will uses more memory than allowed for AD). @@ -309,25 +280,13 @@ private AnomalyDetectorSettings() {} * where 3.2 GB is from 10% memory limit of AD plugin. * That's why I am using 60_000 as the max limit. */ - public static final Setting DEDICATED_CACHE_SIZE = Setting + public static final Setting AD_DEDICATED_CACHE_SIZE = Setting .intSetting("plugins.anomaly_detection.dedicated_cache_size", 10, 0, 60_000, Setting.Property.NodeScope, Setting.Property.Dynamic); // We only keep priority (4 bytes float) in inactive cache. 1 million priorities // take up 4 MB. public static final int MAX_INACTIVE_ENTITIES = 1_000_000; - // Increase the value will adding pressure to indexing anomaly results and our feature query - // OpenSearch-only setting as previous the legacy default is too low (1000) - public static final Setting MAX_ENTITIES_PER_QUERY = Setting - .intSetting( - "plugins.anomaly_detection.max_entities_per_query", - 1_000_000, - 0, - 2_000_000, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - // save partial zero-anomaly grade results after indexing pressure reaching the limit // Opendistro version has similar setting. I lowered the value to make room // for INDEX_PRESSURE_HARD_LIMIT. I don't find a floatSetting that has both default @@ -365,12 +324,6 @@ private AnomalyDetectorSettings() {} Setting.Property.Dynamic ); - // max entity value's length - public static int MAX_ENTITY_LENGTH = 256; - - // number of bulk checkpoints per second - public static double CHECKPOINT_BULK_PER_SECOND = 0.02; - // ====================================== // Historical analysis // ====================================== @@ -386,6 +339,8 @@ private AnomalyDetectorSettings() {} Setting.Property.Dynamic ); + // Use TimeSeriesSettings.MAX_CACHED_DELETED_TASKS for both AD and forecasting + @Deprecated // Maximum number of deleted tasks can keep in cache. public static final Setting MAX_CACHED_DELETED_TASKS = Setting .intSetting( @@ -459,7 +414,7 @@ private AnomalyDetectorSettings() {} // ====================================== // the percentage of heap usage allowed for queues holding small requests // set it to 0 to disable the queue - public static final Setting COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT = Setting + public static final Setting AD_COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT = Setting .floatSetting( "plugins.anomaly_detection.cold_entity_queue_max_heap_percent", 0.001f, @@ -468,7 +423,7 @@ private AnomalyDetectorSettings() {} Setting.Property.Dynamic ); - public static final Setting CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT = Setting + public static final Setting AD_CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT = Setting .floatSetting( "plugins.anomaly_detection.checkpoint_read_queue_max_heap_percent", 0.001f, @@ -477,7 +432,7 @@ private AnomalyDetectorSettings() {} Setting.Property.Dynamic ); - public static final Setting ENTITY_COLD_START_QUEUE_MAX_HEAP_PERCENT = Setting + public static final Setting AD_ENTITY_COLD_START_QUEUE_MAX_HEAP_PERCENT = Setting .floatSetting( "plugins.anomaly_detection.entity_cold_start_queue_max_heap_percent", 0.001f, @@ -488,7 +443,7 @@ private AnomalyDetectorSettings() {} // the percentage of heap usage allowed for queues holding large requests // set it to 0 to disable the queue - public static final Setting CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT = Setting + public static final Setting AD_CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT = Setting .floatSetting( "plugins.anomaly_detection.checkpoint_write_queue_max_heap_percent", 0.01f, @@ -497,7 +452,7 @@ private AnomalyDetectorSettings() {} Setting.Property.Dynamic ); - public static final Setting RESULT_WRITE_QUEUE_MAX_HEAP_PERCENT = Setting + public static final Setting AD_RESULT_WRITE_QUEUE_MAX_HEAP_PERCENT = Setting .floatSetting( "plugins.anomaly_detection.result_write_queue_max_heap_percent", 0.01f, @@ -506,7 +461,7 @@ private AnomalyDetectorSettings() {} Setting.Property.Dynamic ); - public static final Setting CHECKPOINT_MAINTAIN_QUEUE_MAX_HEAP_PERCENT = Setting + public static final Setting AD_CHECKPOINT_MAINTAIN_QUEUE_MAX_HEAP_PERCENT = Setting .floatSetting( "plugins.anomaly_detection.checkpoint_maintain_queue_max_heap_percent", 0.001f, @@ -518,7 +473,7 @@ private AnomalyDetectorSettings() {} // expected execution time per cold entity request. This setting controls // the speed of cold entity requests execution. The larger, the faster, and // the more performance impact to customers' workload. - public static final Setting EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS = Setting + public static final Setting AD_EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS = Setting .intSetting( "plugins.anomaly_detection.expected_cold_entity_execution_time_in_millisecs", 3000, @@ -541,73 +496,10 @@ private AnomalyDetectorSettings() {} Setting.Property.Dynamic ); - /** - * EntityRequest has entityName (# category fields * 256, the recommended limit - * of a keyword field length), model Id (roughly 256 bytes), and QueuedRequest - * fields including detector Id(roughly 128 bytes), expirationEpochMs (long, - * 8 bytes), and priority (12 bytes). - * Plus Java object size (12 bytes), we have roughly 928 bytes per request - * assuming we have 2 categorical fields (plan to support 2 categorical fields now). - * We don't want the total size exceeds 0.1% of the heap. - * We can have at most 0.1% heap / 928 = heap / 928,000. - * For t3.small, 0.1% heap is of 1MB. The queue's size is up to - * 10^ 6 / 928 = 1078 - */ - public static int ENTITY_REQUEST_SIZE_IN_BYTES = 928; - - /** - * EntityFeatureRequest consists of EntityRequest (928 bytes, read comments - * of ENTITY_COLD_START_QUEUE_SIZE_CONSTANT), pointer to current feature - * (8 bytes), and dataStartTimeMillis (8 bytes). We have roughly - * 928 + 16 = 944 bytes per request. - * - * We don't want the total size exceeds 0.1% of the heap. - * We should have at most 0.1% heap / 944 = heap / 944,000 - * For t3.small, 0.1% heap is of 1MB. The queue's size is up to - * 10^ 6 / 944 = 1059 - */ - public static int ENTITY_FEATURE_REQUEST_SIZE_IN_BYTES = 944; - - /** - * ResultWriteRequest consists of index request (roughly 1KB), and QueuedRequest - * fields (148 bytes, read comments of ENTITY_REQUEST_SIZE_CONSTANT). - * Plus Java object size (12 bytes), we have roughly 1160 bytes per request - * - * We don't want the total size exceeds 1% of the heap. - * We should have at most 1% heap / 1148 = heap / 116,000 - * For t3.small, 1% heap is of 10MB. The queue's size is up to - * 10^ 7 / 1160 = 8621 - */ - public static int RESULT_WRITE_QUEUE_SIZE_IN_BYTES = 1160; - - /** - * CheckpointWriteRequest consists of IndexRequest (200 KB), and QueuedRequest - * fields (148 bytes, read comments of ENTITY_REQUEST_SIZE_CONSTANT). - * The total is roughly 200 KB per request. - * - * We don't want the total size exceeds 1% of the heap. - * We should have at most 1% heap / 200KB = heap / 20,000,000 - * For t3.small, 1% heap is of 10MB. The queue's size is up to - * 10^ 7 / 2.0 * 10^5 = 50 - */ - public static int CHECKPOINT_WRITE_QUEUE_SIZE_IN_BYTES = 200_000; - - /** - * CheckpointMaintainRequest has model Id (roughly 256 bytes), and QueuedRequest - * fields including detector Id(roughly 128 bytes), expirationEpochMs (long, - * 8 bytes), and priority (12 bytes). - * Plus Java object size (12 bytes), we have roughly 416 bytes per request. - * We don't want the total size exceeds 0.1% of the heap. - * We can have at most 0.1% heap / 416 = heap / 416,000. - * For t3.small, 0.1% heap is of 1MB. The queue's size is up to - * 10^ 6 / 416 = 2403 - */ - public static int CHECKPOINT_MAINTAIN_REQUEST_SIZE_IN_BYTES = 416; - /** * Max concurrent entity cold starts per node */ - public static final Setting ENTITY_COLD_START_QUEUE_CONCURRENCY = Setting + public static final Setting AD_ENTITY_COLD_START_QUEUE_CONCURRENCY = Setting .intSetting( "plugins.anomaly_detection.entity_cold_start_queue_concurrency", 1, @@ -700,48 +592,55 @@ private AnomalyDetectorSettings() {} Setting.Property.Dynamic ); - public static final Duration QUEUE_MAINTENANCE = Duration.ofMinutes(10); - - public static final float MAX_QUEUED_TASKS_RATIO = 0.5f; - - public static final float MEDIUM_SEGMENT_PRUNE_RATIO = 0.1f; - - public static final float LOW_SEGMENT_PRUNE_RATIO = 0.3f; - - // expensive maintenance (e.g., queue maintenance) with 1/10000 probability - public static final int MAINTENANCE_FREQ_CONSTANT = 10000; + /** + * EntityRequest has entityName (# category fields * 256, the recommended limit + * of a keyword field length), model Id (roughly 256 bytes), and QueuedRequest + * fields including detector Id(roughly 128 bytes), expirationEpochMs (long, + * 8 bytes), and priority (12 bytes). + * Plus Java object size (12 bytes), we have roughly 928 bytes per request + * assuming we have 2 categorical fields (plan to support 2 categorical fields now). + * We don't want the total size exceeds 0.1% of the heap. + * We can have at most 0.1% heap / 928 = heap / 928,000. + * For t3.small, 0.1% heap is of 1MB. The queue's size is up to + * 10^ 6 / 928 = 1078 + */ + // to be replaced by TimeSeriesSettings.FEATURE_REQUEST_SIZE_IN_BYTES + @Deprecated + public static int ENTITY_REQUEST_SIZE_IN_BYTES = 928; - // ====================================== - // Checkpoint setting - // ====================================== - // we won't accept a checkpoint larger than 30MB. Or we risk OOM. - // For reference, in RCF 1.0, the checkpoint of a RCF with 50 trees, 10 dimensions, - // 256 samples is of 3.2MB. - // In compact rcf, the same RCF is of 163KB. - // Since we allow at most 5 features, and the default shingle size is 8 and default - // tree number size is 100, we can have at most 25.6 MB in RCF 1.0. - // It is possible that cx increases the max features or shingle size, but we don't want - // to risk OOM for the flexibility. - public static final int MAX_CHECKPOINT_BYTES = 30_000_000; - - // Sets the cap on the number of buffer that can be allocated by the rcf deserialization - // buffer pool. Each buffer is of 512 bytes. Memory occupied by 20 buffers is 10.24 KB. - public static final int MAX_TOTAL_RCF_SERIALIZATION_BUFFERS = 20; - - // the size of the buffer used for rcf deserialization - public static final int SERIALIZATION_BUFFER_BYTES = 512; + /** + * EntityFeatureRequest consists of EntityRequest (928 bytes, read comments + * of ENTITY_COLD_START_QUEUE_SIZE_CONSTANT), pointer to current feature + * (8 bytes), and dataStartTimeMillis (8 bytes). We have roughly + * 928 + 16 = 944 bytes per request. + * + * We don't want the total size exceeds 0.1% of the heap. + * We should have at most 0.1% heap / 944 = heap / 944,000 + * For t3.small, 0.1% heap is of 1MB. The queue's size is up to + * 10^ 6 / 944 = 1059 + */ + // to be replaced by TimeSeriesSettings.FEATURE_REQUEST_SIZE_IN_BYTES + @Deprecated + public static int ENTITY_FEATURE_REQUEST_SIZE_IN_BYTES = 944; // ====================================== // pagination setting // ====================================== // pagination size - public static final Setting PAGE_SIZE = Setting + public static final Setting AD_PAGE_SIZE = Setting .intSetting("plugins.anomaly_detection.page_size", 1_000, 0, 10_000, Setting.Property.NodeScope, Setting.Property.Dynamic); - // within an interval, how many percents are used to process requests. - // 1.0 means we use all of the detection interval to process requests. - // to ensure we don't block next interval, it is better to set it less than 1.0. - public static final float INTERVAL_RATIO_FOR_REQUESTS = 0.9f; + // Increase the value will adding pressure to indexing anomaly results and our feature query + // OpenSearch-only setting as previous the legacy default is too low (1000) + public static final Setting AD_MAX_ENTITIES_PER_QUERY = Setting + .intSetting( + "plugins.anomaly_detection.max_entities_per_query", + 1_000_000, + 0, + 2_000_000, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); // ====================================== // preview setting @@ -792,7 +691,7 @@ private AnomalyDetectorSettings() {} // ====================================== // the max number of models to return per node. // the setting is used to limit resource usage due to showing models - public static final Setting MAX_MODEL_SIZE_PER_NODE = Setting + public static final Setting AD_MAX_MODEL_SIZE_PER_NODE = Setting .intSetting( "plugins.anomaly_detection.max_model_size_per_node", 100, @@ -802,20 +701,6 @@ private AnomalyDetectorSettings() {} Setting.Property.Dynamic ); - // profile API needs to report total entities. We can use cardinality aggregation for a single-category field. - // But we cannot do that for multi-category fields as it requires scripting to generate run time fields, - // which is expensive. We work around the problem by using a composite query to find the first 10_000 buckets. - // Generally, traversing all buckets/combinations can't be done without visiting all matches, which is costly - // for data with many entities. Given that it is often enough to have a lower bound of the number of entities, - // such as "there are at least 10000 entities", the default is set to 10,000. That is, requests will count the - // total entities up to 10,000. - public static final int MAX_TOTAL_ENTITIES_TO_TRACK = 10_000; - - // ====================================== - // Cold start setting - // ====================================== - public static int MAX_COLD_START_ROUNDS = 2; - // ====================================== // Validate Detector API setting // ====================================== diff --git a/src/main/java/org/opensearch/ad/settings/LegacyOpenDistroAnomalyDetectorSettings.java b/src/main/java/org/opensearch/ad/settings/LegacyOpenDistroAnomalyDetectorSettings.java index d8ca4b777..f552e1f5d 100644 --- a/src/main/java/org/opensearch/ad/settings/LegacyOpenDistroAnomalyDetectorSettings.java +++ b/src/main/java/org/opensearch/ad/settings/LegacyOpenDistroAnomalyDetectorSettings.java @@ -170,7 +170,7 @@ private LegacyOpenDistroAnomalyDetectorSettings() {} Setting.Property.Deprecated ); - public static final Setting FILTER_BY_BACKEND_ROLES = Setting + public static final Setting AD_FILTER_BY_BACKEND_ROLES = Setting .boolSetting( "opendistro.anomaly_detection.filter_by_backend_roles", false, diff --git a/src/main/java/org/opensearch/ad/stats/suppliers/ModelsOnNodeSupplier.java b/src/main/java/org/opensearch/ad/stats/suppliers/ModelsOnNodeSupplier.java index 3f5421032..2cdee5fb8 100644 --- a/src/main/java/org/opensearch/ad/stats/suppliers/ModelsOnNodeSupplier.java +++ b/src/main/java/org/opensearch/ad/stats/suppliers/ModelsOnNodeSupplier.java @@ -14,7 +14,7 @@ import static org.opensearch.ad.ml.ModelState.LAST_CHECKPOINT_TIME_KEY; import static org.opensearch.ad.ml.ModelState.LAST_USED_TIME_KEY; import static org.opensearch.ad.ml.ModelState.MODEL_TYPE_KEY; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_MODEL_SIZE_PER_NODE; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_MAX_MODEL_SIZE_PER_NODE; import java.util.ArrayList; import java.util.Arrays; @@ -68,8 +68,8 @@ public class ModelsOnNodeSupplier implements Supplier>> public ModelsOnNodeSupplier(ModelManager modelManager, CacheProvider cache, Settings settings, ClusterService clusterService) { this.modelManager = modelManager; this.cache = cache; - this.numModelsToReturn = MAX_MODEL_SIZE_PER_NODE.get(settings); - clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_MODEL_SIZE_PER_NODE, it -> this.numModelsToReturn = it); + this.numModelsToReturn = AD_MAX_MODEL_SIZE_PER_NODE.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_MAX_MODEL_SIZE_PER_NODE, it -> this.numModelsToReturn = it); } @Override diff --git a/src/main/java/org/opensearch/ad/task/ADBatchTaskCache.java b/src/main/java/org/opensearch/ad/task/ADBatchTaskCache.java index 2071ad97f..05897fe64 100644 --- a/src/main/java/org/opensearch/ad/task/ADBatchTaskCache.java +++ b/src/main/java/org/opensearch/ad/task/ADBatchTaskCache.java @@ -11,10 +11,10 @@ package org.opensearch.ad.task; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.NUM_MIN_SAMPLES; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.NUM_TREES; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.TIME_DECAY; +import static org.opensearch.timeseries.settings.TimeSeriesSettings.NUM_MIN_SAMPLES; +import static org.opensearch.timeseries.settings.TimeSeriesSettings.NUM_SAMPLES_PER_TREE; +import static org.opensearch.timeseries.settings.TimeSeriesSettings.NUM_TREES; +import static org.opensearch.timeseries.settings.TimeSeriesSettings.TIME_DECAY; import java.util.ArrayDeque; import java.util.Deque; @@ -26,8 +26,8 @@ import org.opensearch.ad.model.ADTask; import org.opensearch.ad.model.AnomalyDetector; -import org.opensearch.ad.settings.AnomalyDetectorSettings; import org.opensearch.timeseries.model.Entity; +import org.opensearch.timeseries.settings.TimeSeriesSettings; import com.amazon.randomcutforest.config.Precision; import com.amazon.randomcutforest.config.TransformMethod; @@ -78,9 +78,9 @@ protected ADBatchTaskCache(ADTask adTask) { .parallelExecutionEnabled(false) .compact(true) .precision(Precision.FLOAT_32) - .boundingBoxCacheFraction(AnomalyDetectorSettings.BATCH_BOUNDING_BOX_CACHE_RATIO) + .boundingBoxCacheFraction(TimeSeriesSettings.BATCH_BOUNDING_BOX_CACHE_RATIO) .shingleSize(shingleSize) - .anomalyRate(1 - AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE) + .anomalyRate(1 - TimeSeriesSettings.THRESHOLD_MIN_PVALUE) .transformMethod(TransformMethod.NORMALIZE) .alertOnce(true) .autoAdjust(true) diff --git a/src/main/java/org/opensearch/ad/task/ADBatchTaskRunner.java b/src/main/java/org/opensearch/ad/task/ADBatchTaskRunner.java index 391d87e26..e4d28957a 100644 --- a/src/main/java/org/opensearch/ad/task/ADBatchTaskRunner.java +++ b/src/main/java/org/opensearch/ad/task/ADBatchTaskRunner.java @@ -11,7 +11,6 @@ package org.opensearch.ad.task; -import static org.opensearch.ad.breaker.MemoryCircuitBreaker.DEFAULT_JVM_HEAP_USAGE_THRESHOLD; import static org.opensearch.ad.constant.ADCommonMessages.NO_ELIGIBLE_NODE_TO_RUN_DETECTOR; import static org.opensearch.ad.model.ADTask.CURRENT_PIECE_FIELD; import static org.opensearch.ad.model.ADTask.EXECUTION_END_TIME_FIELD; @@ -25,9 +24,10 @@ import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_RUNNING_ENTITIES_PER_DETECTOR_FOR_HISTORICAL_ANALYSIS; import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_TOP_ENTITIES_FOR_HISTORICAL_ANALYSIS; import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_TOP_ENTITIES_LIMIT_FOR_HISTORICAL_ANALYSIS; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.NUM_MIN_SAMPLES; import static org.opensearch.ad.stats.InternalStatNames.JVM_HEAP_USAGE; import static org.opensearch.timeseries.TimeSeriesAnalyticsPlugin.AD_BATCH_TASK_THREAD_POOL_NAME; +import static org.opensearch.timeseries.breaker.MemoryCircuitBreaker.DEFAULT_JVM_HEAP_USAGE_THRESHOLD; +import static org.opensearch.timeseries.settings.TimeSeriesSettings.NUM_MIN_SAMPLES; import static org.opensearch.timeseries.stats.StatNames.AD_EXECUTING_BATCH_TASK_COUNT; import static org.opensearch.timeseries.util.ParseUtils.isNullOrEmpty; @@ -49,7 +49,6 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.ThreadedActionListener; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.caching.PriorityTracker; import org.opensearch.ad.cluster.HashRing; import org.opensearch.ad.constant.ADCommonMessages; @@ -92,6 +91,7 @@ import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.threadpool.ThreadPool; import org.opensearch.timeseries.AnalysisType; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.EndRunException; import org.opensearch.timeseries.common.exception.LimitExceededException; import org.opensearch.timeseries.common.exception.ResourceNotFoundException; @@ -129,7 +129,7 @@ public class ADBatchTaskRunner { private final ADStats adStats; private final ClusterService clusterService; private final FeatureManager featureManager; - private final ADCircuitBreakerService adCircuitBreakerService; + private final CircuitBreakerService adCircuitBreakerService; private final ADTaskManager adTaskManager; private final AnomalyResultBulkIndexHandler anomalyResultBulkIndexHandler; private final ADIndexManagement anomalyDetectionIndices; @@ -155,7 +155,7 @@ public ADBatchTaskRunner( ClusterService clusterService, Client client, SecurityClientUtil clientUtil, - ADCircuitBreakerService adCircuitBreakerService, + CircuitBreakerService adCircuitBreakerService, FeatureManager featureManager, ADTaskManager adTaskManager, ADIndexManagement anomalyDetectionIndices, diff --git a/src/main/java/org/opensearch/ad/task/ADTaskCacheManager.java b/src/main/java/org/opensearch/ad/task/ADTaskCacheManager.java index 132f41869..014a9f798 100644 --- a/src/main/java/org/opensearch/ad/task/ADTaskCacheManager.java +++ b/src/main/java/org/opensearch/ad/task/ADTaskCacheManager.java @@ -11,13 +11,10 @@ package org.opensearch.ad.task; -import static org.opensearch.ad.MemoryTracker.Origin.HISTORICAL_SINGLE_ENTITY_DETECTOR; import static org.opensearch.ad.constant.ADCommonMessages.DETECTOR_IS_RUNNING; import static org.opensearch.ad.constant.ADCommonMessages.EXCEED_HISTORICAL_ANALYSIS_LIMIT; import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_BATCH_TASK_PER_NODE; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_CACHED_DELETED_TASKS; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.NUM_TREES; -import static org.opensearch.timeseries.util.ParseUtils.isNullOrEmpty; +import static org.opensearch.timeseries.MemoryTracker.Origin.HISTORICAL_SINGLE_ENTITY_DETECTOR; import java.time.Instant; import java.util.ArrayList; @@ -27,37 +24,33 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; -import java.util.Queue; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.Semaphore; import java.util.stream.Collectors; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.ad.MemoryTracker; import org.opensearch.ad.model.ADTask; import org.opensearch.ad.model.ADTaskType; import org.opensearch.ad.model.AnomalyDetector; -import org.opensearch.ad.settings.AnomalyDetectorSettings; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; -import org.opensearch.core.action.ActionListener; +import org.opensearch.timeseries.MemoryTracker; import org.opensearch.timeseries.common.exception.DuplicateTaskException; import org.opensearch.timeseries.common.exception.LimitExceededException; import org.opensearch.timeseries.model.Entity; -import org.opensearch.timeseries.model.TaskState; -import org.opensearch.transport.TransportService; +import org.opensearch.timeseries.settings.TimeSeriesSettings; +import org.opensearch.timeseries.task.TaskCacheManager; +import org.opensearch.timeseries.util.ParseUtils; import com.amazon.randomcutforest.RandomCutForest; import com.amazon.randomcutforest.parkservices.ThresholdedRandomCutForest; import com.google.common.collect.ImmutableList; -public class ADTaskCacheManager { +public class ADTaskCacheManager extends TaskCacheManager { private final Logger logger = LogManager.getLogger(ADTaskCacheManager.class); private volatile Integer maxAdBatchTaskPerNode; - private volatile Integer maxCachedDeletedTask; private final MemoryTracker memoryTracker; private final int numberSize = 8; public static final int TASK_RETRY_LIMIT = 3; @@ -89,19 +82,6 @@ public class ADTaskCacheManager { *

Key: detector id

*/ private Map detectorTaskSlotLimit; - /** - * This field is to cache all realtime tasks on coordinating node. - *

Node: coordinating node

- *

Key is detector id

- */ - private Map realtimeTaskCaches; - /** - * This field is to cache all deleted detector level tasks on coordinating node. - * Will try to clean up child task and AD result later. - *

Node: coordinating node

- * Check {@link ADTaskManager#cleanChildTasksAndADResultsOfDeletedTask()} - */ - private Queue deletedDetectorTasks; // =================================================================== // Fields below are caches on worker node @@ -126,17 +106,6 @@ public class ADTaskCacheManager { */ private Map> hcBatchTaskRunState; - // =================================================================== - // Fields below are caches on any data node serves delete detector - // request. Check ADTaskManager#deleteADResultOfDetector - // =================================================================== - /** - * This field is to cache deleted detector IDs. Hourly cron will poll this queue - * and clean AD results. Check {@link ADTaskManager#cleanADResultOfDeletedDetector()} - *

Node: any data node servers delete detector request

- */ - private Queue deletedDetectors; - /** * Constructor to create AD task cache manager. * @@ -145,17 +114,14 @@ public class ADTaskCacheManager { * @param memoryTracker AD memory tracker */ public ADTaskCacheManager(Settings settings, ClusterService clusterService, MemoryTracker memoryTracker) { + super(settings, clusterService); this.maxAdBatchTaskPerNode = MAX_BATCH_TASK_PER_NODE.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_BATCH_TASK_PER_NODE, it -> maxAdBatchTaskPerNode = it); - this.maxCachedDeletedTask = MAX_CACHED_DELETED_TASKS.get(settings); - clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_CACHED_DELETED_TASKS, it -> maxCachedDeletedTask = it); this.batchTaskCaches = new ConcurrentHashMap<>(); this.memoryTracker = memoryTracker; this.detectorTasks = new ConcurrentHashMap<>(); this.hcBatchTaskCaches = new ConcurrentHashMap<>(); - this.realtimeTaskCaches = new ConcurrentHashMap<>(); - this.deletedDetectorTasks = new ConcurrentLinkedQueue<>(); - this.deletedDetectors = new ConcurrentLinkedQueue<>(); + this.detectorTaskSlotLimit = new ConcurrentHashMap<>(); this.hcBatchTaskRunState = new ConcurrentHashMap<>(); this.cleanExpiredHCBatchTaskRunStatesSemaphore = new Semaphore(1); @@ -354,8 +320,8 @@ private long calculateADTaskCacheSize(ADTask adTask) { return memoryTracker .estimateTRCFModelSize( dimension, - NUM_TREES, - AnomalyDetectorSettings.BATCH_BOUNDING_BOX_CACHE_RATIO, + TimeSeriesSettings.NUM_TREES, + TimeSeriesSettings.BATCH_BOUNDING_BOX_CACHE_RATIO, detector.getShingleSize().intValue(), false ) + shingleMemorySize(detector.getShingleSize(), detector.getEnabledFeatureIds().size()); @@ -373,8 +339,7 @@ public long getModelSize(String taskId) { RandomCutForest rcfForest = tRCF.getForest(); int dimensions = rcfForest.getDimensions(); int numberOfTrees = rcfForest.getNumberOfTrees(); - return memoryTracker - .estimateTRCFModelSize(dimensions, numberOfTrees, AnomalyDetectorSettings.BATCH_BOUNDING_BOX_CACHE_RATIO, 1, false); + return memoryTracker.estimateTRCFModelSize(dimensions, numberOfTrees, TimeSeriesSettings.BATCH_BOUNDING_BOX_CACHE_RATIO, 1, false); } /** @@ -483,7 +448,7 @@ public ADTaskCancellationState cancelByDetectorId(String detectorId, String dete taskStateCache.setCancelReason(reason); taskStateCache.setCancelledBy(userName); - if (isNullOrEmpty(taskCaches)) { + if (ParseUtils.isNullOrEmpty(taskCaches)) { return ADTaskCancellationState.NOT_FOUND; } @@ -1012,174 +977,6 @@ public void clearPendingEntities(String detectorId) { } } - /** - * Check if realtime task field value change needed or not by comparing with cache. - * 1. If new field value is null, will consider changed needed to this field. - * 2. will consider the real time task change needed if - * 1) init progress is larger or the old init progress is null, or - * 2) if the state is different, and it is not changing from running to init. - * for other fields, as long as field values changed, will consider the realtime - * task change needed. We did this so that the init progress or state won't go backwards. - * 3. If realtime task cache not found, will consider the realtime task change needed. - * - * @param detectorId detector id - * @param newState new task state - * @param newInitProgress new init progress - * @param newError new error - * @return true if realtime task change needed. - */ - public boolean isRealtimeTaskChangeNeeded(String detectorId, String newState, Float newInitProgress, String newError) { - if (realtimeTaskCaches.containsKey(detectorId)) { - ADRealtimeTaskCache realtimeTaskCache = realtimeTaskCaches.get(detectorId); - boolean stateChangeNeeded = false; - String oldState = realtimeTaskCache.getState(); - if (newState != null - && !newState.equals(oldState) - && !(TaskState.INIT.name().equals(newState) && TaskState.RUNNING.name().equals(oldState))) { - stateChangeNeeded = true; - } - boolean initProgressChangeNeeded = false; - Float existingProgress = realtimeTaskCache.getInitProgress(); - if (newInitProgress != null - && !newInitProgress.equals(existingProgress) - && (existingProgress == null || newInitProgress > existingProgress)) { - initProgressChangeNeeded = true; - } - boolean errorChanged = false; - if (newError != null && !newError.equals(realtimeTaskCache.getError())) { - errorChanged = true; - } - if (stateChangeNeeded || initProgressChangeNeeded || errorChanged) { - return true; - } - return false; - } else { - return true; - } - } - - /** - * Update realtime task cache with new field values. If realtime task cache exist, update it - * directly if task is not done; if task is done, remove the detector's realtime task cache. - * - * If realtime task cache doesn't exist, will do nothing. Next realtime job run will re-init - * realtime task cache when it finds task cache not inited yet. - * Check {@link ADTaskManager#initRealtimeTaskCacheAndCleanupStaleCache(String, AnomalyDetector, TransportService, ActionListener)}, - * {@link ADTaskManager#updateLatestRealtimeTaskOnCoordinatingNode(String, String, Long, Long, String, ActionListener)} - * - * @param detectorId detector id - * @param newState new task state - * @param newInitProgress new init progress - * @param newError new error - */ - public void updateRealtimeTaskCache(String detectorId, String newState, Float newInitProgress, String newError) { - ADRealtimeTaskCache realtimeTaskCache = realtimeTaskCaches.get(detectorId); - if (realtimeTaskCache != null) { - if (newState != null) { - realtimeTaskCache.setState(newState); - } - if (newInitProgress != null) { - realtimeTaskCache.setInitProgress(newInitProgress); - } - if (newError != null) { - realtimeTaskCache.setError(newError); - } - if (newState != null && !TaskState.NOT_ENDED_STATES.contains(newState)) { - // If task is done, will remove its realtime task cache. - logger.info("Realtime task done with state {}, remove RT task cache for detector ", newState, detectorId); - removeRealtimeTaskCache(detectorId); - } - } else { - logger.debug("Realtime task cache is not inited yet for detector {}", detectorId); - } - } - - public void initRealtimeTaskCache(String detectorId, long detectorIntervalInMillis) { - realtimeTaskCaches.put(detectorId, new ADRealtimeTaskCache(null, null, null, detectorIntervalInMillis)); - logger.debug("Realtime task cache inited"); - } - - public void refreshRealtimeJobRunTime(String detectorId) { - ADRealtimeTaskCache taskCache = realtimeTaskCaches.get(detectorId); - if (taskCache != null) { - taskCache.setLastJobRunTime(Instant.now().toEpochMilli()); - } - } - - /** - * Get detector IDs from realtime task cache. - * @return array of detector id - */ - public String[] getDetectorIdsInRealtimeTaskCache() { - return realtimeTaskCaches.keySet().toArray(new String[0]); - } - - /** - * Remove detector's realtime task from cache. - * @param detectorId detector id - */ - public void removeRealtimeTaskCache(String detectorId) { - if (realtimeTaskCaches.containsKey(detectorId)) { - logger.info("Delete realtime cache for detector {}", detectorId); - realtimeTaskCaches.remove(detectorId); - } - } - - public ADRealtimeTaskCache getRealtimeTaskCache(String detectorId) { - return realtimeTaskCaches.get(detectorId); - } - - /** - * Clear realtime task cache. - */ - public void clearRealtimeTaskCache() { - realtimeTaskCaches.clear(); - } - - /** - * Add deleted task's id to deleted detector tasks queue. - * @param taskId task id - */ - public void addDeletedDetectorTask(String taskId) { - if (deletedDetectorTasks.size() < maxCachedDeletedTask) { - deletedDetectorTasks.add(taskId); - } - } - - /** - * Check if deleted task queue has items. - * @return true if has deleted detector task in cache - */ - public boolean hasDeletedDetectorTask() { - return !deletedDetectorTasks.isEmpty(); - } - - /** - * Poll one deleted detector task. - * @return task id - */ - public String pollDeletedDetectorTask() { - return this.deletedDetectorTasks.poll(); - } - - /** - * Add deleted detector's id to deleted detector queue. - * @param detectorId detector id - */ - public void addDeletedDetector(String detectorId) { - if (deletedDetectors.size() < maxCachedDeletedTask) { - deletedDetectors.add(detectorId); - } - } - - /** - * Poll one deleted detector. - * @return detector id - */ - public String pollDeletedDetector() { - return this.deletedDetectors.poll(); - } - public String getDetectorTaskId(String detectorId) { return detectorTasks.get(detectorId); } @@ -1317,7 +1114,7 @@ public void cleanExpiredHCBatchTaskRunStates() { for (Map.Entry> detectorRunStates : hcBatchTaskRunState.entrySet()) { List taskIdOfExpiredStates = new ArrayList<>(); String detectorId = detectorRunStates.getKey(); - boolean noRunningTask = isNullOrEmpty(getTasksOfDetector(detectorId)); + boolean noRunningTask = ParseUtils.isNullOrEmpty(getTasksOfDetector(detectorId)); Map taskRunStates = detectorRunStates.getValue(); if (taskRunStates == null) { // If detector's task run state is null, add detector id to detectorIdOfEmptyStates and remove it from @@ -1362,32 +1159,4 @@ public void cleanExpiredHCBatchTaskRunStates() { } } - /** - * We query result index to check if there are any result generated for detector to tell whether it passed initialization of not. - * To avoid repeated query when there is no data, record whether we have done that or not. - * @param id detector id - */ - public void markResultIndexQueried(String id) { - ADRealtimeTaskCache realtimeTaskCache = realtimeTaskCaches.get(id); - // we initialize a real time cache at the beginning of AnomalyResultTransportAction if it - // cannot be found. If the cache is empty, we will return early and wait it for it to be - // initialized. - if (realtimeTaskCache != null) { - realtimeTaskCache.setQueriedResultIndex(true); - } - } - - /** - * We query result index to check if there are any result generated for detector to tell whether it passed initialization of not. - * - * @param id detector id - * @return whether we have queried result index or not. - */ - public boolean hasQueriedResultIndex(String id) { - ADRealtimeTaskCache realtimeTaskCache = realtimeTaskCaches.get(id); - if (realtimeTaskCache != null) { - return realtimeTaskCache.hasQueriedResultIndex(); - } - return false; - } } diff --git a/src/main/java/org/opensearch/ad/task/ADTaskManager.java b/src/main/java/org/opensearch/ad/task/ADTaskManager.java index 8b67e746a..d6a2c7242 100644 --- a/src/main/java/org/opensearch/ad/task/ADTaskManager.java +++ b/src/main/java/org/opensearch/ad/task/ADTaskManager.java @@ -43,7 +43,6 @@ import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_OLD_AD_TASK_DOCS; import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_OLD_AD_TASK_DOCS_PER_DETECTOR; import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_RUNNING_ENTITIES_PER_DETECTOR_FOR_HISTORICAL_ANALYSIS; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.NUM_MIN_SAMPLES; import static org.opensearch.ad.stats.InternalStatNames.AD_DETECTOR_ASSIGNED_BATCH_TASK_SLOT_COUNT; import static org.opensearch.ad.stats.InternalStatNames.AD_USED_BATCH_TASK_SLOT_COUNT; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; @@ -53,6 +52,7 @@ import static org.opensearch.timeseries.constant.CommonName.TASK_ID_FIELD; import static org.opensearch.timeseries.model.TaskState.NOT_ENDED_STATES; import static org.opensearch.timeseries.model.TaskType.taskTypeToString; +import static org.opensearch.timeseries.settings.TimeSeriesSettings.NUM_MIN_SAMPLES; import static org.opensearch.timeseries.util.ExceptionUtil.getErrorMessage; import static org.opensearch.timeseries.util.ExceptionUtil.getShardsFailure; import static org.opensearch.timeseries.util.ParseUtils.isNullOrEmpty; @@ -166,6 +166,7 @@ import org.opensearch.timeseries.model.Entity; import org.opensearch.timeseries.model.Job; import org.opensearch.timeseries.model.TaskState; +import org.opensearch.timeseries.task.RealtimeTaskCache; import org.opensearch.timeseries.util.DiscoveryNodeFilterer; import org.opensearch.timeseries.util.RestHandlerUtils; import org.opensearch.transport.TransportRequestOptions; @@ -1686,7 +1687,7 @@ protected void deleteTaskDocs( if (!bulkItemResponse.isFailed()) { logger.debug("Add detector task into cache. Task id: {}", bulkItemResponse.getId()); // add deleted task in cache and delete its child tasks and AD results - adTaskCacheManager.addDeletedDetectorTask(bulkItemResponse.getId()); + adTaskCacheManager.addDeletedTask(bulkItemResponse.getId()); } } } @@ -1716,11 +1717,11 @@ protected void deleteTaskDocs( * Poll deleted detector task from cache and delete its child tasks and AD results. */ public void cleanChildTasksAndADResultsOfDeletedTask() { - if (!adTaskCacheManager.hasDeletedDetectorTask()) { + if (!adTaskCacheManager.hasDeletedTask()) { return; } threadPool.schedule(() -> { - String taskId = adTaskCacheManager.pollDeletedDetectorTask(); + String taskId = adTaskCacheManager.pollDeletedTask(); if (taskId == null) { return; } @@ -1932,7 +1933,7 @@ private void deleteADResultOfDetector(String detectorId) { ActionListener .wrap(response -> { logger.debug("Successfully deleted AD results of detector " + detectorId); }, exception -> { logger.error("Failed to delete AD results of detector " + detectorId, exception); - adTaskCacheManager.addDeletedDetector(detectorId); + adTaskCacheManager.addDeletedConfig(detectorId); }) ); } @@ -1941,7 +1942,7 @@ private void deleteADResultOfDetector(String detectorId) { * Clean AD results of deleted detector. */ public void cleanADResultOfDeletedDetector() { - String detectorId = adTaskCacheManager.pollDeletedDetector(); + String detectorId = adTaskCacheManager.pollDeletedConfig(); if (detectorId != null) { deleteADResultOfDetector(detectorId); } @@ -2805,7 +2806,7 @@ public synchronized void removeStaleRunningEntity( } public boolean skipUpdateHCRealtimeTask(String detectorId, String error) { - ADRealtimeTaskCache realtimeTaskCache = adTaskCacheManager.getRealtimeTaskCache(detectorId); + RealtimeTaskCache realtimeTaskCache = adTaskCacheManager.getRealtimeTaskCache(detectorId); return realtimeTaskCache != null && realtimeTaskCache.getInitProgress() != null && realtimeTaskCache.getInitProgress().floatValue() == 1.0 @@ -2813,7 +2814,7 @@ public boolean skipUpdateHCRealtimeTask(String detectorId, String error) { } public boolean isHCRealtimeTaskStartInitializing(String detectorId) { - ADRealtimeTaskCache realtimeTaskCache = adTaskCacheManager.getRealtimeTaskCache(detectorId); + RealtimeTaskCache realtimeTaskCache = adTaskCacheManager.getRealtimeTaskCache(detectorId); return realtimeTaskCache != null && realtimeTaskCache.getInitProgress() != null && realtimeTaskCache.getInitProgress().floatValue() > 0; @@ -3077,7 +3078,7 @@ public void maintainRunningRealtimeTasks() { } for (int i = 0; i < detectorIds.length; i++) { String detectorId = detectorIds[i]; - ADRealtimeTaskCache taskCache = adTaskCacheManager.getRealtimeTaskCache(detectorId); + RealtimeTaskCache taskCache = adTaskCacheManager.getRealtimeTaskCache(detectorId); if (taskCache != null && taskCache.expired()) { adTaskCacheManager.removeRealtimeTaskCache(detectorId); } diff --git a/src/main/java/org/opensearch/ad/transport/AnomalyDetectorJobTransportAction.java b/src/main/java/org/opensearch/ad/transport/AnomalyDetectorJobTransportAction.java index 5f4a7acff..5a81c43ae 100644 --- a/src/main/java/org/opensearch/ad/transport/AnomalyDetectorJobTransportAction.java +++ b/src/main/java/org/opensearch/ad/transport/AnomalyDetectorJobTransportAction.java @@ -13,8 +13,8 @@ import static org.opensearch.ad.constant.ADCommonMessages.FAIL_TO_START_DETECTOR; import static org.opensearch.ad.constant.ADCommonMessages.FAIL_TO_STOP_DETECTOR; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES; import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_REQUEST_TIMEOUT; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES; import static org.opensearch.timeseries.util.ParseUtils.getUserContext; import static org.opensearch.timeseries.util.ParseUtils.resolveUserAndExecute; import static org.opensearch.timeseries.util.RestHandlerUtils.wrapRestActionListener; @@ -75,8 +75,8 @@ public AnomalyDetectorJobTransportAction( this.anomalyDetectionIndices = anomalyDetectionIndices; this.xContentRegistry = xContentRegistry; this.adTaskManager = adTaskManager; - filterByEnabled = FILTER_BY_BACKEND_ROLES.get(settings); - clusterService.getClusterSettings().addSettingsUpdateConsumer(FILTER_BY_BACKEND_ROLES, it -> filterByEnabled = it); + filterByEnabled = AD_FILTER_BY_BACKEND_ROLES.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_FILTER_BY_BACKEND_ROLES, it -> filterByEnabled = it); this.recorder = recorder; } diff --git a/src/main/java/org/opensearch/ad/transport/AnomalyResultTransportAction.java b/src/main/java/org/opensearch/ad/transport/AnomalyResultTransportAction.java index 404640e01..084db7f42 100644 --- a/src/main/java/org/opensearch/ad/transport/AnomalyResultTransportAction.java +++ b/src/main/java/org/opensearch/ad/transport/AnomalyResultTransportAction.java @@ -11,8 +11,8 @@ package org.opensearch.ad.transport; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_ENTITIES_PER_QUERY; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.PAGE_SIZE; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_MAX_ENTITIES_PER_QUERY; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_PAGE_SIZE; import static org.opensearch.timeseries.constant.CommonMessages.INVALID_SEARCH_QUERY_MSG; import java.net.ConnectException; @@ -42,7 +42,6 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.ThreadedActionListener; import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.cluster.HashRing; import org.opensearch.ad.constant.ADCommonMessages; import org.opensearch.ad.constant.ADCommonName; @@ -79,6 +78,7 @@ import org.opensearch.timeseries.AnalysisType; import org.opensearch.timeseries.NodeStateManager; import org.opensearch.timeseries.TimeSeriesAnalyticsPlugin; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.ClientException; import org.opensearch.timeseries.common.exception.EndRunException; import org.opensearch.timeseries.common.exception.InternalFailure; @@ -92,6 +92,7 @@ import org.opensearch.timeseries.model.Entity; import org.opensearch.timeseries.model.FeatureData; import org.opensearch.timeseries.model.IntervalTimeConfiguration; +import org.opensearch.timeseries.settings.TimeSeriesSettings; import org.opensearch.timeseries.stats.StatNames; import org.opensearch.timeseries.util.ExceptionUtil; import org.opensearch.timeseries.util.ParseUtils; @@ -124,7 +125,7 @@ public class AnomalyResultTransportAction extends HandledTransportAction(); this.xContentRegistry = xContentRegistry; - this.intervalRatioForRequest = AnomalyDetectorSettings.INTERVAL_RATIO_FOR_REQUESTS; + this.intervalRatioForRequest = TimeSeriesSettings.INTERVAL_RATIO_FOR_REQUESTS; - this.maxEntitiesPerInterval = MAX_ENTITIES_PER_QUERY.get(settings); - clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_ENTITIES_PER_QUERY, it -> maxEntitiesPerInterval = it); + this.maxEntitiesPerInterval = AD_MAX_ENTITIES_PER_QUERY.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_MAX_ENTITIES_PER_QUERY, it -> maxEntitiesPerInterval = it); - this.pageSize = PAGE_SIZE.get(settings); - clusterService.getClusterSettings().addSettingsUpdateConsumer(PAGE_SIZE, it -> pageSize = it); + this.pageSize = AD_PAGE_SIZE.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_PAGE_SIZE, it -> pageSize = it); this.adTaskManager = adTaskManager; } diff --git a/src/main/java/org/opensearch/ad/transport/DeleteAnomalyDetectorTransportAction.java b/src/main/java/org/opensearch/ad/transport/DeleteAnomalyDetectorTransportAction.java index 6ec539790..9b8caba19 100644 --- a/src/main/java/org/opensearch/ad/transport/DeleteAnomalyDetectorTransportAction.java +++ b/src/main/java/org/opensearch/ad/transport/DeleteAnomalyDetectorTransportAction.java @@ -13,7 +13,7 @@ import static org.opensearch.ad.constant.ADCommonMessages.FAIL_TO_DELETE_DETECTOR; import static org.opensearch.ad.model.ADTaskType.HISTORICAL_DETECTOR_TASK_TYPES; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.timeseries.util.ParseUtils.getUserContext; import static org.opensearch.timeseries.util.ParseUtils.resolveUserAndExecute; @@ -80,8 +80,8 @@ public DeleteAnomalyDetectorTransportAction( this.clusterService = clusterService; this.xContentRegistry = xContentRegistry; this.adTaskManager = adTaskManager; - filterByEnabled = AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES.get(settings); - clusterService.getClusterSettings().addSettingsUpdateConsumer(FILTER_BY_BACKEND_ROLES, it -> filterByEnabled = it); + filterByEnabled = AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_FILTER_BY_BACKEND_ROLES, it -> filterByEnabled = it); } @Override diff --git a/src/main/java/org/opensearch/ad/transport/DeleteAnomalyResultsTransportAction.java b/src/main/java/org/opensearch/ad/transport/DeleteAnomalyResultsTransportAction.java index 25e947117..e2db9ed4a 100644 --- a/src/main/java/org/opensearch/ad/transport/DeleteAnomalyResultsTransportAction.java +++ b/src/main/java/org/opensearch/ad/transport/DeleteAnomalyResultsTransportAction.java @@ -12,7 +12,7 @@ package org.opensearch.ad.transport; import static org.opensearch.ad.constant.ADCommonMessages.FAIL_TO_DELETE_AD_RESULT; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES; import static org.opensearch.timeseries.util.ParseUtils.addUserBackendRolesFilter; import static org.opensearch.timeseries.util.ParseUtils.getUserContext; import static org.opensearch.timeseries.util.RestHandlerUtils.wrapRestActionListener; @@ -50,8 +50,8 @@ public DeleteAnomalyResultsTransportAction( ) { super(DeleteAnomalyResultsAction.NAME, transportService, actionFilters, DeleteByQueryRequest::new); this.client = client; - filterEnabled = FILTER_BY_BACKEND_ROLES.get(settings); - clusterService.getClusterSettings().addSettingsUpdateConsumer(FILTER_BY_BACKEND_ROLES, it -> filterEnabled = it); + filterEnabled = AD_FILTER_BY_BACKEND_ROLES.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_FILTER_BY_BACKEND_ROLES, it -> filterEnabled = it); } @Override diff --git a/src/main/java/org/opensearch/ad/transport/EntityResultTransportAction.java b/src/main/java/org/opensearch/ad/transport/EntityResultTransportAction.java index 1fc954a52..d17ce7137 100644 --- a/src/main/java/org/opensearch/ad/transport/EntityResultTransportAction.java +++ b/src/main/java/org/opensearch/ad/transport/EntityResultTransportAction.java @@ -26,7 +26,6 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.caching.CacheProvider; import org.opensearch.ad.constant.ADCommonName; import org.opensearch.ad.indices.ADIndex; @@ -52,6 +51,7 @@ import org.opensearch.timeseries.AnalysisType; import org.opensearch.timeseries.NodeStateManager; import org.opensearch.timeseries.TimeSeriesAnalyticsPlugin; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.EndRunException; import org.opensearch.timeseries.common.exception.LimitExceededException; import org.opensearch.timeseries.constant.CommonMessages; @@ -85,7 +85,7 @@ public class EntityResultTransportAction extends HandledTransportAction filterByEnabled = it); + filterByEnabled = AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_FILTER_BY_BACKEND_ROLES, it -> filterByEnabled = it); this.transportService = transportService; this.adTaskManager = adTaskManager; } @@ -169,7 +170,7 @@ protected void getExecute(GetAnomalyDetectorRequest request, ActionListener filterByEnabled = it); + filterByEnabled = AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_FILTER_BY_BACKEND_ROLES, it -> filterByEnabled = it); this.settings = settings; } diff --git a/src/main/java/org/opensearch/ad/transport/PreviewAnomalyDetectorTransportAction.java b/src/main/java/org/opensearch/ad/transport/PreviewAnomalyDetectorTransportAction.java index 8ab887fdc..5f6c6c9d3 100644 --- a/src/main/java/org/opensearch/ad/transport/PreviewAnomalyDetectorTransportAction.java +++ b/src/main/java/org/opensearch/ad/transport/PreviewAnomalyDetectorTransportAction.java @@ -12,7 +12,7 @@ package org.opensearch.ad.transport; import static org.opensearch.ad.constant.ADCommonMessages.FAIL_TO_PREVIEW_DETECTOR; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES; import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_ANOMALY_FEATURES; import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_CONCURRENT_PREVIEW; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; @@ -34,7 +34,6 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.ad.AnomalyDetectorRunner; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.constant.ADCommonMessages; import org.opensearch.ad.model.AnomalyDetector; import org.opensearch.ad.model.AnomalyResult; @@ -51,6 +50,7 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.tasks.Task; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.ClientException; import org.opensearch.timeseries.common.exception.LimitExceededException; import org.opensearch.timeseries.common.exception.TimeSeriesException; @@ -68,7 +68,7 @@ public class PreviewAnomalyDetectorTransportAction extends private final NamedXContentRegistry xContentRegistry; private volatile Integer maxAnomalyFeatures; private volatile Boolean filterByEnabled; - private final ADCircuitBreakerService adCircuitBreakerService; + private final CircuitBreakerService adCircuitBreakerService; private Semaphore lock; @Inject @@ -80,7 +80,7 @@ public PreviewAnomalyDetectorTransportAction( Client client, AnomalyDetectorRunner anomalyDetectorRunner, NamedXContentRegistry xContentRegistry, - ADCircuitBreakerService adCircuitBreakerService + CircuitBreakerService adCircuitBreakerService ) { super(PreviewAnomalyDetectorAction.NAME, transportService, actionFilters, PreviewAnomalyDetectorRequest::new); this.clusterService = clusterService; @@ -89,8 +89,8 @@ public PreviewAnomalyDetectorTransportAction( this.xContentRegistry = xContentRegistry; maxAnomalyFeatures = MAX_ANOMALY_FEATURES.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_ANOMALY_FEATURES, it -> maxAnomalyFeatures = it); - filterByEnabled = AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES.get(settings); - clusterService.getClusterSettings().addSettingsUpdateConsumer(FILTER_BY_BACKEND_ROLES, it -> filterByEnabled = it); + filterByEnabled = AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_FILTER_BY_BACKEND_ROLES, it -> filterByEnabled = it); this.adCircuitBreakerService = adCircuitBreakerService; this.lock = new Semaphore(MAX_CONCURRENT_PREVIEW.get(settings), true); clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_CONCURRENT_PREVIEW, it -> { lock = new Semaphore(it); }); diff --git a/src/main/java/org/opensearch/ad/transport/ProfileTransportAction.java b/src/main/java/org/opensearch/ad/transport/ProfileTransportAction.java index e05251f2f..af1bbed50 100644 --- a/src/main/java/org/opensearch/ad/transport/ProfileTransportAction.java +++ b/src/main/java/org/opensearch/ad/transport/ProfileTransportAction.java @@ -11,7 +11,7 @@ package org.opensearch.ad.transport; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_MODEL_SIZE_PER_NODE; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_MAX_MODEL_SIZE_PER_NODE; import java.io.IOException; import java.util.List; @@ -83,8 +83,8 @@ public ProfileTransportAction( this.modelManager = modelManager; this.featureManager = featureManager; this.cacheProvider = cacheProvider; - this.numModelsToReturn = MAX_MODEL_SIZE_PER_NODE.get(settings); - clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_MODEL_SIZE_PER_NODE, it -> this.numModelsToReturn = it); + this.numModelsToReturn = AD_MAX_MODEL_SIZE_PER_NODE.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_MAX_MODEL_SIZE_PER_NODE, it -> this.numModelsToReturn = it); } @Override diff --git a/src/main/java/org/opensearch/ad/transport/RCFResultTransportAction.java b/src/main/java/org/opensearch/ad/transport/RCFResultTransportAction.java index 0b3e6e921..d7df181bb 100644 --- a/src/main/java/org/opensearch/ad/transport/RCFResultTransportAction.java +++ b/src/main/java/org/opensearch/ad/transport/RCFResultTransportAction.java @@ -20,7 +20,6 @@ import org.opensearch.Version; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.cluster.HashRing; import org.opensearch.ad.ml.ModelManager; import org.opensearch.ad.stats.ADStats; @@ -28,6 +27,7 @@ import org.opensearch.common.inject.Inject; import org.opensearch.core.action.ActionListener; import org.opensearch.tasks.Task; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.LimitExceededException; import org.opensearch.timeseries.constant.CommonMessages; import org.opensearch.timeseries.stats.StatNames; @@ -37,7 +37,7 @@ public class RCFResultTransportAction extends HandledTransportAction filterByEnabled = it); + this.filterByEnabled = AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_FILTER_BY_BACKEND_ROLES, it -> filterByEnabled = it); this.searchFeatureDao = searchFeatureDao; this.clock = Clock.systemUTC(); this.settings = settings; diff --git a/src/main/java/org/opensearch/ad/transport/handler/ADSearchHandler.java b/src/main/java/org/opensearch/ad/transport/handler/ADSearchHandler.java index eab12d195..8e23243d8 100644 --- a/src/main/java/org/opensearch/ad/transport/handler/ADSearchHandler.java +++ b/src/main/java/org/opensearch/ad/transport/handler/ADSearchHandler.java @@ -12,7 +12,7 @@ package org.opensearch.ad.transport.handler; import static org.opensearch.ad.constant.ADCommonMessages.FAIL_TO_SEARCH; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES; import static org.opensearch.timeseries.util.ParseUtils.addUserBackendRolesFilter; import static org.opensearch.timeseries.util.ParseUtils.getUserContext; import static org.opensearch.timeseries.util.ParseUtils.isAdmin; @@ -40,8 +40,8 @@ public class ADSearchHandler { public ADSearchHandler(Settings settings, ClusterService clusterService, Client client) { this.client = client; - filterEnabled = AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES.get(settings); - clusterService.getClusterSettings().addSettingsUpdateConsumer(FILTER_BY_BACKEND_ROLES, it -> filterEnabled = it); + filterEnabled = AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_FILTER_BY_BACKEND_ROLES, it -> filterEnabled = it); } /** diff --git a/src/main/java/org/opensearch/ad/MemoryTracker.java b/src/main/java/org/opensearch/timeseries/MemoryTracker.java similarity index 89% rename from src/main/java/org/opensearch/ad/MemoryTracker.java rename to src/main/java/org/opensearch/timeseries/MemoryTracker.java index 1e40ef47a..a474ae21e 100644 --- a/src/main/java/org/opensearch/ad/MemoryTracker.java +++ b/src/main/java/org/opensearch/timeseries/MemoryTracker.java @@ -9,9 +9,9 @@ * GitHub history for details. */ -package org.opensearch.ad; +package org.opensearch.timeseries; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE; import java.util.EnumMap; import java.util.Locale; @@ -19,55 +19,48 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.monitor.jvm.JvmService; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.LimitExceededException; import com.amazon.randomcutforest.RandomCutForest; import com.amazon.randomcutforest.parkservices.ThresholdedRandomCutForest; -/** - * Class to track AD memory usage. - * - */ public class MemoryTracker { private static final Logger LOG = LogManager.getLogger(MemoryTracker.class); public enum Origin { - SINGLE_ENTITY_DETECTOR, - HC_DETECTOR, + REAL_TIME_DETECTOR, HISTORICAL_SINGLE_ENTITY_DETECTOR, + REAL_TIME_FORECASTER } // memory tracker for total consumption of bytes - private long totalMemoryBytes; - private final Map totalMemoryBytesByOrigin; + protected long totalMemoryBytes; + protected final Map totalMemoryBytesByOrigin; // reserved for models. Cannot be deleted at will. - private long reservedMemoryBytes; - private final Map reservedMemoryBytesByOrigin; - private long heapSize; - private long heapLimitBytes; - private long desiredModelSize; + protected long reservedMemoryBytes; + protected final Map reservedMemoryBytesByOrigin; + protected long heapSize; + protected long heapLimitBytes; // we observe threshold model uses a fixed size array and the size is the same - private int thresholdModelBytes; - private ADCircuitBreakerService adCircuitBreakerService; + protected int thresholdModelBytes; + protected CircuitBreakerService timeSeriesCircuitBreakerService; /** * Constructor * * @param jvmService Service providing jvm info * @param modelMaxSizePercentage Percentage of heap for the max size of a model - * @param modelDesiredSizePercentage percentage of heap for the desired size of a model * @param clusterService Cluster service object - * @param adCircuitBreakerService Memory circuit breaker + * @param timeSeriesCircuitBreakerService Memory circuit breaker */ public MemoryTracker( JvmService jvmService, double modelMaxSizePercentage, - double modelDesiredSizePercentage, ClusterService clusterService, - ADCircuitBreakerService adCircuitBreakerService + CircuitBreakerService timeSeriesCircuitBreakerService ) { this.totalMemoryBytes = 0; this.totalMemoryBytesByOrigin = new EnumMap(Origin.class); @@ -75,40 +68,14 @@ public MemoryTracker( this.reservedMemoryBytesByOrigin = new EnumMap(Origin.class); this.heapSize = jvmService.info().getMem().getHeapMax().getBytes(); this.heapLimitBytes = (long) (heapSize * modelMaxSizePercentage); - this.desiredModelSize = (long) (heapSize * modelDesiredSizePercentage); if (clusterService != null) { clusterService .getClusterSettings() - .addSettingsUpdateConsumer(MODEL_MAX_SIZE_PERCENTAGE, it -> this.heapLimitBytes = (long) (heapSize * it)); + .addSettingsUpdateConsumer(AD_MODEL_MAX_SIZE_PERCENTAGE, it -> this.heapLimitBytes = (long) (heapSize * it)); } this.thresholdModelBytes = 180_000; - this.adCircuitBreakerService = adCircuitBreakerService; - } - - /** - * This function derives from the old code: https://tinyurl.com/2eaabja6 - * - * @param detectorId Detector Id - * @param trcf Thresholded random cut forest model - * @return true if there is enough memory; otherwise throw LimitExceededException. - */ - public synchronized boolean isHostingAllowed(String detectorId, ThresholdedRandomCutForest trcf) { - long requiredBytes = estimateTRCFModelSize(trcf); - if (canAllocateReserved(requiredBytes)) { - return true; - } else { - throw new LimitExceededException( - detectorId, - String - .format( - Locale.ROOT, - "Exceeded memory limit. New size is %d bytes and max limit is %d bytes", - reservedMemoryBytes + requiredBytes, - heapLimitBytes - ) - ); - } + this.timeSeriesCircuitBreakerService = timeSeriesCircuitBreakerService; } /** @@ -117,7 +84,7 @@ public synchronized boolean isHostingAllowed(String detectorId, ThresholdedRando * true when circuit breaker is closed and there is enough reserved memory. */ public synchronized boolean canAllocateReserved(long requiredBytes) { - return (false == adCircuitBreakerService.isOpen() && reservedMemoryBytes + requiredBytes <= heapLimitBytes); + return (false == timeSeriesCircuitBreakerService.isOpen() && reservedMemoryBytes + requiredBytes <= heapLimitBytes); } /** @@ -126,7 +93,7 @@ public synchronized boolean canAllocateReserved(long requiredBytes) { * true when circuit breaker is closed and there is enough overall memory. */ public synchronized boolean canAllocate(long bytes) { - return false == adCircuitBreakerService.isOpen() && totalMemoryBytes + bytes <= heapLimitBytes; + return false == timeSeriesCircuitBreakerService.isOpen() && totalMemoryBytes + bytes <= heapLimitBytes; } public synchronized void consumeMemory(long memoryToConsume, boolean reserved, Origin origin) { @@ -159,23 +126,6 @@ private void adjustOriginMemoryRelease(long memoryToConsume, Origin origin, Map< } } - /** - * Gets the estimated size of an entity's model. - * - * @param trcf ThresholdedRandomCutForest object - * @return estimated model size in bytes - */ - public long estimateTRCFModelSize(ThresholdedRandomCutForest trcf) { - RandomCutForest forest = trcf.getForest(); - return estimateTRCFModelSize( - forest.getDimensions(), - forest.getNumberOfTrees(), - forest.getBoundingBoxCacheFraction(), - forest.getShingleSize(), - forest.isInternalShinglingEnabled() - ); - } - /** * Gets the estimated size of an entity's model. * @@ -306,14 +256,6 @@ public long getHeapLimit() { return heapLimitBytes; } - /** - * - * @return Desired model partition size in bytes - */ - public long getDesiredModelSize() { - return desiredModelSize; - } - public long getTotalMemoryBytes() { return totalMemoryBytes; } @@ -360,4 +302,46 @@ public synchronized boolean syncMemoryState(Origin origin, long totalBytes, long public int getThresholdModelBytes() { return thresholdModelBytes; } + + /** + * This function derives from the old code: https://tinyurl.com/2eaabja6 + * + * @param configId Config Id + * @param trcf Thresholded random cut forest model + * @return true if there is enough memory; otherwise throw LimitExceededException. + */ + public synchronized boolean isHostingAllowed(String configId, ThresholdedRandomCutForest trcf) { + long requiredBytes = estimateTRCFModelSize(trcf); + if (canAllocateReserved(requiredBytes)) { + return true; + } else { + throw new LimitExceededException( + configId, + String + .format( + Locale.ROOT, + "Exceeded memory limit. New size is %d bytes and max limit is %d bytes", + reservedMemoryBytes + requiredBytes, + heapLimitBytes + ) + ); + } + } + + /** + * Gets the estimated size of an entity's model. + * + * @param trcf ThresholdedRandomCutForest object + * @return estimated model size in bytes + */ + public long estimateTRCFModelSize(ThresholdedRandomCutForest trcf) { + RandomCutForest forest = trcf.getForest(); + return estimateTRCFModelSize( + forest.getDimensions(), + forest.getNumberOfTrees(), + forest.getBoundingBoxCacheFraction(), + forest.getShingleSize(), + forest.isInternalShinglingEnabled() + ); + } } diff --git a/src/main/java/org/opensearch/timeseries/TimeSeriesAnalyticsPlugin.java b/src/main/java/org/opensearch/timeseries/TimeSeriesAnalyticsPlugin.java index 171faf727..7dadac650 100644 --- a/src/main/java/org/opensearch/timeseries/TimeSeriesAnalyticsPlugin.java +++ b/src/main/java/org/opensearch/timeseries/TimeSeriesAnalyticsPlugin.java @@ -36,8 +36,6 @@ import org.opensearch.ad.AnomalyDetectorJobRunner; import org.opensearch.ad.AnomalyDetectorRunner; import org.opensearch.ad.ExecuteADResultResponseRecorder; -import org.opensearch.ad.MemoryTracker; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.caching.CacheProvider; import org.opensearch.ad.caching.EntityCache; import org.opensearch.ad.caching.PriorityCache; @@ -190,6 +188,7 @@ import org.opensearch.threadpool.ExecutorBuilder; import org.opensearch.threadpool.ScalingExecutorBuilder; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.constant.CommonName; import org.opensearch.timeseries.dataprocessor.Imputer; import org.opensearch.timeseries.dataprocessor.LinearUniformImputer; @@ -383,7 +382,7 @@ public Collection createComponents( securityClientUtil, settings, clusterService, - AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE + TimeSeriesSettings.NUM_SAMPLES_PER_TREE ); JvmService jvmService = new JvmService(environment.settings()); @@ -393,17 +392,11 @@ public Collection createComponents( mapper.setPartialTreeStateEnabled(true); V1JsonToV3StateConverter converter = new V1JsonToV3StateConverter(); - double modelMaxSizePercent = AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE.get(settings); + double modelMaxSizePercent = AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE.get(settings); - ADCircuitBreakerService adCircuitBreakerService = new ADCircuitBreakerService(jvmService).init(); + CircuitBreakerService adCircuitBreakerService = new CircuitBreakerService(jvmService).init(); - MemoryTracker memoryTracker = new MemoryTracker( - jvmService, - modelMaxSizePercent, - AnomalyDetectorSettings.DESIRED_MODEL_SIZE_PERCENTAGE, - clusterService, - adCircuitBreakerService - ); + MemoryTracker memoryTracker = new MemoryTracker(jvmService, modelMaxSizePercent, clusterService, adCircuitBreakerService); FeatureManager featureManager = new FeatureManager( searchFeatureDao, @@ -417,7 +410,7 @@ public Collection createComponents( AnomalyDetectorSettings.MAX_IMPUTATION_NEIGHBOR_DISTANCE, AnomalyDetectorSettings.PREVIEW_SAMPLE_RATE, AnomalyDetectorSettings.MAX_PREVIEW_SAMPLES, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, threadPool, AD_THREAD_POOL_NAME ); @@ -430,7 +423,7 @@ public GenericObjectPool run() { return new GenericObjectPool<>(new BasePooledObjectFactory() { @Override public LinkedBuffer create() throws Exception { - return LinkedBuffer.allocate(AnomalyDetectorSettings.SERIALIZATION_BUFFER_BYTES); + return LinkedBuffer.allocate(TimeSeriesSettings.SERIALIZATION_BUFFER_BYTES); } @Override @@ -440,11 +433,11 @@ public PooledObject wrap(LinkedBuffer obj) { }); } }); - serializeRCFBufferPool.setMaxTotal(AnomalyDetectorSettings.MAX_TOTAL_RCF_SERIALIZATION_BUFFERS); - serializeRCFBufferPool.setMaxIdle(AnomalyDetectorSettings.MAX_TOTAL_RCF_SERIALIZATION_BUFFERS); + serializeRCFBufferPool.setMaxTotal(TimeSeriesSettings.MAX_TOTAL_RCF_SERIALIZATION_BUFFERS); + serializeRCFBufferPool.setMaxIdle(TimeSeriesSettings.MAX_TOTAL_RCF_SERIALIZATION_BUFFERS); serializeRCFBufferPool.setMinIdle(0); serializeRCFBufferPool.setBlockWhenExhausted(false); - serializeRCFBufferPool.setTimeBetweenEvictionRuns(AnomalyDetectorSettings.HOURLY_MAINTENANCE); + serializeRCFBufferPool.setTimeBetweenEvictionRuns(TimeSeriesSettings.HOURLY_MAINTENANCE); CheckpointDao checkpoint = new CheckpointDao( client, @@ -461,10 +454,10 @@ public PooledObject wrap(LinkedBuffer obj) { ), HybridThresholdingModel.class, anomalyDetectionIndices, - AnomalyDetectorSettings.MAX_CHECKPOINT_BYTES, + TimeSeriesSettings.MAX_CHECKPOINT_BYTES, serializeRCFBufferPool, - AnomalyDetectorSettings.SERIALIZATION_BUFFER_BYTES, - 1 - AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE + TimeSeriesSettings.SERIALIZATION_BUFFER_BYTES, + 1 - TimeSeriesSettings.THRESHOLD_MIN_PVALUE ); Random random = new Random(42); @@ -475,7 +468,7 @@ public PooledObject wrap(LinkedBuffer obj) { cacheProvider, checkpoint, ADCommonName.CHECKPOINT_INDEX_NAME, - AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ, + AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ, getClock(), clusterService, settings @@ -483,62 +476,62 @@ public PooledObject wrap(LinkedBuffer obj) { CheckpointWriteWorker checkpointWriteQueue = new CheckpointWriteWorker( heapSizeBytes, - AnomalyDetectorSettings.CHECKPOINT_WRITE_QUEUE_SIZE_IN_BYTES, - AnomalyDetectorSettings.CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT, + TimeSeriesSettings.CHECKPOINT_WRITE_QUEUE_SIZE_IN_BYTES, + AnomalyDetectorSettings.AD_CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT, clusterService, random, adCircuitBreakerService, threadPool, settings, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, getClock(), - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, - AnomalyDetectorSettings.QUEUE_MAINTENANCE, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.QUEUE_MAINTENANCE, checkpoint, ADCommonName.CHECKPOINT_INDEX_NAME, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, stateManager, - AnomalyDetectorSettings.HOURLY_MAINTENANCE + TimeSeriesSettings.HOURLY_MAINTENANCE ); CheckpointMaintainWorker checkpointMaintainQueue = new CheckpointMaintainWorker( heapSizeBytes, - AnomalyDetectorSettings.CHECKPOINT_MAINTAIN_REQUEST_SIZE_IN_BYTES, - AnomalyDetectorSettings.CHECKPOINT_MAINTAIN_QUEUE_MAX_HEAP_PERCENT, + TimeSeriesSettings.CHECKPOINT_MAINTAIN_REQUEST_SIZE_IN_BYTES, + AnomalyDetectorSettings.AD_CHECKPOINT_MAINTAIN_QUEUE_MAX_HEAP_PERCENT, clusterService, random, adCircuitBreakerService, threadPool, settings, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, getClock(), - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, checkpointWriteQueue, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, stateManager, adapter ); EntityCache cache = new PriorityCache( checkpoint, - AnomalyDetectorSettings.DEDICATED_CACHE_SIZE.get(settings), - AnomalyDetectorSettings.CHECKPOINT_TTL, + AnomalyDetectorSettings.AD_DEDICATED_CACHE_SIZE.get(settings), + AnomalyDetectorSettings.AD_CHECKPOINT_TTL, AnomalyDetectorSettings.MAX_INACTIVE_ENTITIES, memoryTracker, - AnomalyDetectorSettings.NUM_TREES, + TimeSeriesSettings.NUM_TREES, getClock(), clusterService, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, threadPool, checkpointWriteQueue, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, checkpointMaintainQueue, settings, - AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ + AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ ); cacheProvider.set(cache); @@ -547,39 +540,39 @@ public PooledObject wrap(LinkedBuffer obj) { getClock(), threadPool, stateManager, - AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE, - AnomalyDetectorSettings.NUM_TREES, - AnomalyDetectorSettings.TIME_DECAY, - AnomalyDetectorSettings.NUM_MIN_SAMPLES, + TimeSeriesSettings.NUM_SAMPLES_PER_TREE, + TimeSeriesSettings.NUM_TREES, + TimeSeriesSettings.TIME_DECAY, + TimeSeriesSettings.NUM_MIN_SAMPLES, AnomalyDetectorSettings.MAX_SAMPLE_STRIDE, AnomalyDetectorSettings.MAX_TRAIN_SAMPLE, imputer, searchFeatureDao, - AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE, + TimeSeriesSettings.THRESHOLD_MIN_PVALUE, featureManager, settings, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, checkpointWriteQueue, - AnomalyDetectorSettings.MAX_COLD_START_ROUNDS + TimeSeriesSettings.MAX_COLD_START_ROUNDS ); EntityColdStartWorker coldstartQueue = new EntityColdStartWorker( heapSizeBytes, AnomalyDetectorSettings.ENTITY_REQUEST_SIZE_IN_BYTES, - AnomalyDetectorSettings.ENTITY_COLD_START_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_ENTITY_COLD_START_QUEUE_MAX_HEAP_PERCENT, clusterService, random, adCircuitBreakerService, threadPool, settings, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, getClock(), - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, - AnomalyDetectorSettings.QUEUE_MAINTENANCE, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.QUEUE_MAINTENANCE, entityColdStarter, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, stateManager, cacheProvider ); @@ -587,14 +580,14 @@ public PooledObject wrap(LinkedBuffer obj) { ModelManager modelManager = new ModelManager( checkpoint, getClock(), - AnomalyDetectorSettings.NUM_TREES, - AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE, - AnomalyDetectorSettings.TIME_DECAY, - AnomalyDetectorSettings.NUM_MIN_SAMPLES, - AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE, + TimeSeriesSettings.NUM_TREES, + TimeSeriesSettings.NUM_SAMPLES_PER_TREE, + TimeSeriesSettings.TIME_DECAY, + TimeSeriesSettings.NUM_MIN_SAMPLES, + TimeSeriesSettings.THRESHOLD_MIN_PVALUE, AnomalyDetectorSettings.MIN_PREVIEW_SIZE, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, - AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ, + TimeSeriesSettings.HOURLY_MAINTENANCE, + AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ, entityColdStarter, featureManager, memoryTracker, @@ -614,23 +607,23 @@ public PooledObject wrap(LinkedBuffer obj) { ResultWriteWorker resultWriteQueue = new ResultWriteWorker( heapSizeBytes, - AnomalyDetectorSettings.RESULT_WRITE_QUEUE_SIZE_IN_BYTES, - AnomalyDetectorSettings.RESULT_WRITE_QUEUE_MAX_HEAP_PERCENT, + TimeSeriesSettings.RESULT_WRITE_QUEUE_SIZE_IN_BYTES, + AnomalyDetectorSettings.AD_RESULT_WRITE_QUEUE_MAX_HEAP_PERCENT, clusterService, random, adCircuitBreakerService, threadPool, settings, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, getClock(), - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, - AnomalyDetectorSettings.QUEUE_MAINTENANCE, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.QUEUE_MAINTENANCE, multiEntityResultHandler, xContentRegistry, stateManager, - AnomalyDetectorSettings.HOURLY_MAINTENANCE + TimeSeriesSettings.HOURLY_MAINTENANCE ); Map> stats = ImmutableMap @@ -679,18 +672,18 @@ public PooledObject wrap(LinkedBuffer obj) { CheckpointReadWorker checkpointReadQueue = new CheckpointReadWorker( heapSizeBytes, AnomalyDetectorSettings.ENTITY_FEATURE_REQUEST_SIZE_IN_BYTES, - AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, clusterService, random, adCircuitBreakerService, threadPool, settings, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, getClock(), - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, - AnomalyDetectorSettings.QUEUE_MAINTENANCE, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.QUEUE_MAINTENANCE, modelManager, checkpoint, coldstartQueue, @@ -698,7 +691,7 @@ public PooledObject wrap(LinkedBuffer obj) { stateManager, anomalyDetectionIndices, cacheProvider, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, checkpointWriteQueue, adStats ); @@ -706,19 +699,19 @@ public PooledObject wrap(LinkedBuffer obj) { ColdEntityWorker coldEntityQueue = new ColdEntityWorker( heapSizeBytes, AnomalyDetectorSettings.ENTITY_FEATURE_REQUEST_SIZE_IN_BYTES, - AnomalyDetectorSettings.COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT, clusterService, random, adCircuitBreakerService, threadPool, settings, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, getClock(), - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, checkpointReadQueue, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, stateManager ); @@ -788,7 +781,7 @@ public PooledObject wrap(LinkedBuffer obj) { client, stateManager, adTaskCacheManager, - AnomalyDetectorSettings.NUM_MIN_SAMPLES + TimeSeriesSettings.NUM_MIN_SAMPLES ); // return objects used by Guice to inject dependencies for e.g., @@ -815,7 +808,7 @@ public PooledObject wrap(LinkedBuffer obj) { getClock(), clientUtil, nodeFilter, - AnomalyDetectorSettings.CHECKPOINT_TTL, + AnomalyDetectorSettings.AD_CHECKPOINT_TTL, settings ), nodeFilter, @@ -880,7 +873,7 @@ public List> getSettings() { // ====================================== // HCAD cache LegacyOpenDistroAnomalyDetectorSettings.MAX_CACHE_MISS_HANDLING_PER_SECOND, - AnomalyDetectorSettings.DEDICATED_CACHE_SIZE, + AnomalyDetectorSettings.AD_DEDICATED_CACHE_SIZE, // Detector config LegacyOpenDistroAnomalyDetectorSettings.DETECTION_INTERVAL, LegacyOpenDistroAnomalyDetectorSettings.DETECTION_WINDOW_DELAY, @@ -914,15 +907,15 @@ public List> getSettings() { LegacyOpenDistroAnomalyDetectorSettings.MAX_MULTI_ENTITY_ANOMALY_DETECTORS, LegacyOpenDistroAnomalyDetectorSettings.INDEX_PRESSURE_SOFT_LIMIT, LegacyOpenDistroAnomalyDetectorSettings.MAX_PRIMARY_SHARDS, - AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE, - AnomalyDetectorSettings.MAX_SINGLE_ENTITY_ANOMALY_DETECTORS, - AnomalyDetectorSettings.MAX_MULTI_ENTITY_ANOMALY_DETECTORS, + AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE, + AnomalyDetectorSettings.AD_MAX_SINGLE_ENTITY_ANOMALY_DETECTORS, + AnomalyDetectorSettings.AD_MAX_HC_ANOMALY_DETECTORS, AnomalyDetectorSettings.AD_INDEX_PRESSURE_SOFT_LIMIT, AnomalyDetectorSettings.AD_INDEX_PRESSURE_HARD_LIMIT, AnomalyDetectorSettings.AD_MAX_PRIMARY_SHARDS, // Security - LegacyOpenDistroAnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES, - AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES, + LegacyOpenDistroAnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES, + AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES, // Historical LegacyOpenDistroAnomalyDetectorSettings.MAX_BATCH_TASK_PER_NODE, LegacyOpenDistroAnomalyDetectorSettings.BATCH_TASK_PIECE_INTERVAL_SECONDS, @@ -938,32 +931,32 @@ public List> getSettings() { // rate limiting AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_CONCURRENCY, AnomalyDetectorSettings.AD_CHECKPOINT_WRITE_QUEUE_CONCURRENCY, - AnomalyDetectorSettings.ENTITY_COLD_START_QUEUE_CONCURRENCY, + AnomalyDetectorSettings.AD_ENTITY_COLD_START_QUEUE_CONCURRENCY, AnomalyDetectorSettings.AD_RESULT_WRITE_QUEUE_CONCURRENCY, AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_BATCH_SIZE, AnomalyDetectorSettings.AD_CHECKPOINT_WRITE_QUEUE_BATCH_SIZE, AnomalyDetectorSettings.AD_RESULT_WRITE_QUEUE_BATCH_SIZE, - AnomalyDetectorSettings.COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT, - AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, - AnomalyDetectorSettings.CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT, - AnomalyDetectorSettings.RESULT_WRITE_QUEUE_MAX_HEAP_PERCENT, - AnomalyDetectorSettings.CHECKPOINT_MAINTAIN_QUEUE_MAX_HEAP_PERCENT, - AnomalyDetectorSettings.ENTITY_COLD_START_QUEUE_MAX_HEAP_PERCENT, - AnomalyDetectorSettings.EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS, + AnomalyDetectorSettings.AD_COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_RESULT_WRITE_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_CHECKPOINT_MAINTAIN_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_ENTITY_COLD_START_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS, AnomalyDetectorSettings.AD_EXPECTED_CHECKPOINT_MAINTAIN_TIME_IN_MILLISECS, - AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ, - AnomalyDetectorSettings.CHECKPOINT_TTL, + AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ, + AnomalyDetectorSettings.AD_CHECKPOINT_TTL, // query limit LegacyOpenDistroAnomalyDetectorSettings.MAX_ENTITIES_PER_QUERY, LegacyOpenDistroAnomalyDetectorSettings.MAX_ENTITIES_FOR_PREVIEW, - AnomalyDetectorSettings.MAX_ENTITIES_PER_QUERY, + AnomalyDetectorSettings.AD_MAX_ENTITIES_PER_QUERY, AnomalyDetectorSettings.MAX_ENTITIES_FOR_PREVIEW, AnomalyDetectorSettings.MAX_CONCURRENT_PREVIEW, - AnomalyDetectorSettings.PAGE_SIZE, + AnomalyDetectorSettings.AD_PAGE_SIZE, // clean resource AnomalyDetectorSettings.DELETE_AD_RESULT_WHEN_DELETE_DETECTOR, // stats/profile API - AnomalyDetectorSettings.MAX_MODEL_SIZE_PER_NODE, + AnomalyDetectorSettings.AD_MAX_MODEL_SIZE_PER_NODE, // ====================================== // Forecast settings // ====================================== diff --git a/src/main/java/org/opensearch/ad/breaker/BreakerName.java b/src/main/java/org/opensearch/timeseries/breaker/BreakerName.java similarity index 92% rename from src/main/java/org/opensearch/ad/breaker/BreakerName.java rename to src/main/java/org/opensearch/timeseries/breaker/BreakerName.java index a6405cf1f..5c744355b 100644 --- a/src/main/java/org/opensearch/ad/breaker/BreakerName.java +++ b/src/main/java/org/opensearch/timeseries/breaker/BreakerName.java @@ -9,7 +9,7 @@ * GitHub history for details. */ -package org.opensearch.ad.breaker; +package org.opensearch.timeseries.breaker; public enum BreakerName { diff --git a/src/main/java/org/opensearch/ad/breaker/CircuitBreaker.java b/src/main/java/org/opensearch/timeseries/breaker/CircuitBreaker.java similarity index 91% rename from src/main/java/org/opensearch/ad/breaker/CircuitBreaker.java rename to src/main/java/org/opensearch/timeseries/breaker/CircuitBreaker.java index 2825d2f98..5258ac64e 100644 --- a/src/main/java/org/opensearch/ad/breaker/CircuitBreaker.java +++ b/src/main/java/org/opensearch/timeseries/breaker/CircuitBreaker.java @@ -9,7 +9,7 @@ * GitHub history for details. */ -package org.opensearch.ad.breaker; +package org.opensearch.timeseries.breaker; /** * An interface for circuit breaker. diff --git a/src/main/java/org/opensearch/ad/breaker/ADCircuitBreakerService.java b/src/main/java/org/opensearch/timeseries/breaker/CircuitBreakerService.java similarity index 84% rename from src/main/java/org/opensearch/ad/breaker/ADCircuitBreakerService.java rename to src/main/java/org/opensearch/timeseries/breaker/CircuitBreakerService.java index 9c9ab5b34..efa48ec7f 100644 --- a/src/main/java/org/opensearch/ad/breaker/ADCircuitBreakerService.java +++ b/src/main/java/org/opensearch/timeseries/breaker/CircuitBreakerService.java @@ -9,7 +9,7 @@ * GitHub history for details. */ -package org.opensearch.ad.breaker; +package org.opensearch.timeseries.breaker; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -20,23 +20,23 @@ import org.opensearch.monitor.jvm.JvmService; /** - * Class {@code ADCircuitBreakerService} provide storing, retrieving circuit breakers functions. + * Class {@code CircuitBreakerService} provide storing, retrieving circuit breakers functions. * * This service registers internal system breakers and provide API for users to register their own breakers. */ -public class ADCircuitBreakerService { +public class CircuitBreakerService { private final ConcurrentMap breakers = new ConcurrentHashMap<>(); private final JvmService jvmService; - private static final Logger logger = LogManager.getLogger(ADCircuitBreakerService.class); + private static final Logger logger = LogManager.getLogger(CircuitBreakerService.class); /** * Constructor. * * @param jvmService jvm info */ - public ADCircuitBreakerService(JvmService jvmService) { + public CircuitBreakerService(JvmService jvmService) { this.jvmService = jvmService; } @@ -67,7 +67,7 @@ public CircuitBreaker getBreaker(String name) { * * @return ADCircuitBreakerService */ - public ADCircuitBreakerService init() { + public CircuitBreakerService init() { // Register memory circuit breaker registerBreaker(BreakerName.MEM.getName(), new MemoryCircuitBreaker(this.jvmService)); logger.info("Registered memory breaker."); diff --git a/src/main/java/org/opensearch/ad/breaker/MemoryCircuitBreaker.java b/src/main/java/org/opensearch/timeseries/breaker/MemoryCircuitBreaker.java similarity index 95% rename from src/main/java/org/opensearch/ad/breaker/MemoryCircuitBreaker.java rename to src/main/java/org/opensearch/timeseries/breaker/MemoryCircuitBreaker.java index c4628c639..cf4b47d71 100644 --- a/src/main/java/org/opensearch/ad/breaker/MemoryCircuitBreaker.java +++ b/src/main/java/org/opensearch/timeseries/breaker/MemoryCircuitBreaker.java @@ -9,7 +9,7 @@ * GitHub history for details. */ -package org.opensearch.ad.breaker; +package org.opensearch.timeseries.breaker; import org.opensearch.monitor.jvm.JvmService; diff --git a/src/main/java/org/opensearch/ad/breaker/ThresholdCircuitBreaker.java b/src/main/java/org/opensearch/timeseries/breaker/ThresholdCircuitBreaker.java similarity index 94% rename from src/main/java/org/opensearch/ad/breaker/ThresholdCircuitBreaker.java rename to src/main/java/org/opensearch/timeseries/breaker/ThresholdCircuitBreaker.java index 30959b0c4..5d69ce1f9 100644 --- a/src/main/java/org/opensearch/ad/breaker/ThresholdCircuitBreaker.java +++ b/src/main/java/org/opensearch/timeseries/breaker/ThresholdCircuitBreaker.java @@ -9,7 +9,7 @@ * GitHub history for details. */ -package org.opensearch.ad.breaker; +package org.opensearch.timeseries.breaker; /** * An abstract class for all breakers with threshold. diff --git a/src/main/java/org/opensearch/timeseries/feature/SearchFeatureDao.java b/src/main/java/org/opensearch/timeseries/feature/SearchFeatureDao.java index f69013670..2b4e3493d 100644 --- a/src/main/java/org/opensearch/timeseries/feature/SearchFeatureDao.java +++ b/src/main/java/org/opensearch/timeseries/feature/SearchFeatureDao.java @@ -12,8 +12,8 @@ package org.opensearch.timeseries.feature; import static org.apache.commons.math3.linear.MatrixUtils.createRealMatrix; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_PAGE_SIZE; import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_ENTITIES_FOR_PREVIEW; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.PAGE_SIZE; import static org.opensearch.ad.settings.AnomalyDetectorSettings.PREVIEW_TIMEOUT_IN_MILLIS; import static org.opensearch.timeseries.util.ParseUtils.batchFeatureQuery; @@ -120,7 +120,7 @@ public SearchFeatureDao( if (clusterService != null) { clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_ENTITIES_FOR_PREVIEW, it -> this.maxEntitiesForPreview = it); - clusterService.getClusterSettings().addSettingsUpdateConsumer(PAGE_SIZE, it -> this.pageSize = it); + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_PAGE_SIZE, it -> this.pageSize = it); } this.minimumDocCountForPreview = minimumDocCount; this.previewTimeoutInMilliseconds = previewTimeoutInMilliseconds; @@ -158,7 +158,7 @@ public SearchFeatureDao( minimumDocCount, Clock.systemUTC(), MAX_ENTITIES_FOR_PREVIEW.get(settings), - PAGE_SIZE.get(settings), + AD_PAGE_SIZE.get(settings), PREVIEW_TIMEOUT_IN_MILLIS ); } diff --git a/src/main/java/org/opensearch/timeseries/model/Job.java b/src/main/java/org/opensearch/timeseries/model/Job.java index 17371b362..958152e2c 100644 --- a/src/main/java/org/opensearch/timeseries/model/Job.java +++ b/src/main/java/org/opensearch/timeseries/model/Job.java @@ -11,8 +11,8 @@ package org.opensearch.timeseries.model; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.DEFAULT_AD_JOB_LOC_DURATION_SECONDS; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.opensearch.timeseries.settings.TimeSeriesSettings.DEFAULT_JOB_LOC_DURATION_SECONDS; import java.io.IOException; import java.time.Instant; @@ -175,7 +175,7 @@ public static Job parse(XContentParser parser) throws IOException { Instant enabledTime = null; Instant disabledTime = null; Instant lastUpdateTime = null; - Long lockDurationSeconds = DEFAULT_AD_JOB_LOC_DURATION_SECONDS; + Long lockDurationSeconds = DEFAULT_JOB_LOC_DURATION_SECONDS; User user = null; String resultIndex = null; diff --git a/src/main/java/org/opensearch/timeseries/settings/TimeSeriesSettings.java b/src/main/java/org/opensearch/timeseries/settings/TimeSeriesSettings.java index 8ce4cbf9b..56bbe187a 100644 --- a/src/main/java/org/opensearch/timeseries/settings/TimeSeriesSettings.java +++ b/src/main/java/org/opensearch/timeseries/settings/TimeSeriesSettings.java @@ -197,4 +197,16 @@ public class TimeSeriesSettings { // JOB // ====================================== public static final long DEFAULT_JOB_LOC_DURATION_SECONDS = 60; + + // ====================================== + // stats/profile API setting + // ====================================== + // profile API needs to report total entities. We can use cardinality aggregation for a single-category field. + // But we cannot do that for multi-category fields as it requires scripting to generate run time fields, + // which is expensive. We work around the problem by using a composite query to find the first 10_000 buckets. + // Generally, traversing all buckets/combinations can't be done without visiting all matches, which is costly + // for data with many entities. Given that it is often enough to have a lower bound of the number of entities, + // such as "there are at least 10000 entities", the default is set to 10,000. That is, requests will count the + // total entities up to 10,000. + public static final int MAX_TOTAL_ENTITIES_TO_TRACK = 10_000; } diff --git a/src/main/java/org/opensearch/ad/task/ADRealtimeTaskCache.java b/src/main/java/org/opensearch/timeseries/task/RealtimeTaskCache.java similarity index 92% rename from src/main/java/org/opensearch/ad/task/ADRealtimeTaskCache.java rename to src/main/java/org/opensearch/timeseries/task/RealtimeTaskCache.java index bf8cbb860..fd970907d 100644 --- a/src/main/java/org/opensearch/ad/task/ADRealtimeTaskCache.java +++ b/src/main/java/org/opensearch/timeseries/task/RealtimeTaskCache.java @@ -9,7 +9,7 @@ * GitHub history for details. */ -package org.opensearch.ad.task; +package org.opensearch.timeseries.task; import java.time.Instant; @@ -21,7 +21,7 @@ * 4. last job run time * 5. detector interval */ -public class ADRealtimeTaskCache { +public class RealtimeTaskCache { // task state private String state; @@ -42,7 +42,7 @@ public class ADRealtimeTaskCache { // To avoid repeated query when there is no data, record whether we have done that or not. private boolean queriedResultIndex; - public ADRealtimeTaskCache(String state, Float initProgress, String error, long detectorIntervalInMillis) { + public RealtimeTaskCache(String state, Float initProgress, String error, long detectorIntervalInMillis) { this.state = state; this.initProgress = initProgress; this.error = error; diff --git a/src/main/java/org/opensearch/timeseries/task/TaskCacheManager.java b/src/main/java/org/opensearch/timeseries/task/TaskCacheManager.java new file mode 100644 index 000000000..6d40ae42b --- /dev/null +++ b/src/main/java/org/opensearch/timeseries/task/TaskCacheManager.java @@ -0,0 +1,251 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.timeseries.task; + +import static org.opensearch.timeseries.settings.TimeSeriesSettings.MAX_CACHED_DELETED_TASKS; + +import java.time.Instant; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.timeseries.model.TaskState; + +public class TaskCacheManager { + private final Logger logger = LogManager.getLogger(TaskCacheManager.class); + /** + * This field is to cache all realtime tasks on coordinating node. + *

Node: coordinating node

+ *

Key is config id

+ */ + private Map realtimeTaskCaches; + + /** + * This field is to cache all deleted config level tasks on coordinating node. + * Will try to clean up child task and result later. + *

Node: coordinating node

+ * Check {@link ForecastTaskManager#cleanChildTasksAndResultsOfDeletedTask()} + */ + private Queue deletedTasks; + + protected volatile Integer maxCachedDeletedTask; + /** + * This field is to cache deleted detector IDs. Hourly cron will poll this queue + * and clean AD results. Check ADTaskManager#cleanResultOfDeletedConfig() + *

Node: any data node servers delete detector request

+ */ + protected Queue deletedConfigs; + + public TaskCacheManager(Settings settings, ClusterService clusterService) { + this.realtimeTaskCaches = new ConcurrentHashMap<>(); + this.deletedTasks = new ConcurrentLinkedQueue<>(); + this.maxCachedDeletedTask = MAX_CACHED_DELETED_TASKS.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_CACHED_DELETED_TASKS, it -> maxCachedDeletedTask = it); + this.deletedConfigs = new ConcurrentLinkedQueue<>(); + } + + public RealtimeTaskCache getRealtimeTaskCache(String configId) { + return realtimeTaskCaches.get(configId); + } + + public void initRealtimeTaskCache(String configId, long configIntervalInMillis) { + realtimeTaskCaches.put(configId, new RealtimeTaskCache(null, null, null, configIntervalInMillis)); + logger.debug("Realtime task cache inited"); + } + + /** + * Add deleted task's id to deleted detector tasks queue. + * @param taskId task id + */ + public void addDeletedTask(String taskId) { + if (deletedTasks.size() < maxCachedDeletedTask) { + deletedTasks.add(taskId); + } + } + + /** + * Check if deleted task queue has items. + * @return true if has deleted detector task in cache + */ + public boolean hasDeletedTask() { + return !deletedTasks.isEmpty(); + } + + /** + * Poll one deleted forecaster task. + * @return task id + */ + public String pollDeletedTask() { + return this.deletedTasks.poll(); + } + + /** + * Clear realtime task cache. + */ + public void clearRealtimeTaskCache() { + realtimeTaskCaches.clear(); + } + + /** + * Check if realtime task field value change needed or not by comparing with cache. + * 1. If new field value is null, will consider changed needed to this field. + * 2. will consider the real time task change needed if + * 1) init progress is larger or the old init progress is null, or + * 2) if the state is different, and it is not changing from running to init. + * for other fields, as long as field values changed, will consider the realtime + * task change needed. We did this so that the init progress or state won't go backwards. + * 3. If realtime task cache not found, will consider the realtime task change needed. + * + * @param detectorId detector id + * @param newState new task state + * @param newInitProgress new init progress + * @param newError new error + * @return true if realtime task change needed. + */ + public boolean isRealtimeTaskChangeNeeded(String detectorId, String newState, Float newInitProgress, String newError) { + if (realtimeTaskCaches.containsKey(detectorId)) { + RealtimeTaskCache realtimeTaskCache = realtimeTaskCaches.get(detectorId); + boolean stateChangeNeeded = false; + String oldState = realtimeTaskCache.getState(); + if (newState != null + && !newState.equals(oldState) + && !(TaskState.INIT.name().equals(newState) && TaskState.RUNNING.name().equals(oldState))) { + stateChangeNeeded = true; + } + boolean initProgressChangeNeeded = false; + Float existingProgress = realtimeTaskCache.getInitProgress(); + if (newInitProgress != null + && !newInitProgress.equals(existingProgress) + && (existingProgress == null || newInitProgress > existingProgress)) { + initProgressChangeNeeded = true; + } + boolean errorChanged = false; + if (newError != null && !newError.equals(realtimeTaskCache.getError())) { + errorChanged = true; + } + if (stateChangeNeeded || initProgressChangeNeeded || errorChanged) { + return true; + } + return false; + } else { + return true; + } + } + + /** + * Update realtime task cache with new field values. If realtime task cache exist, update it + * directly if task is not done; if task is done, remove the detector's realtime task cache. + * + * If realtime task cache doesn't exist, will do nothing. Next realtime job run will re-init + * realtime task cache when it finds task cache not inited yet. + * Check ADTaskManager#initCacheWithCleanupIfRequired(String, AnomalyDetector, TransportService, ActionListener), + * ADTaskManager#updateLatestRealtimeTaskOnCoordinatingNode(String, String, Long, Long, String, ActionListener) + * + * @param detectorId detector id + * @param newState new task state + * @param newInitProgress new init progress + * @param newError new error + */ + public void updateRealtimeTaskCache(String detectorId, String newState, Float newInitProgress, String newError) { + RealtimeTaskCache realtimeTaskCache = realtimeTaskCaches.get(detectorId); + if (realtimeTaskCache != null) { + if (newState != null) { + realtimeTaskCache.setState(newState); + } + if (newInitProgress != null) { + realtimeTaskCache.setInitProgress(newInitProgress); + } + if (newError != null) { + realtimeTaskCache.setError(newError); + } + if (newState != null && !TaskState.NOT_ENDED_STATES.contains(newState)) { + // If task is done, will remove its realtime task cache. + logger.info("Realtime task done with state {}, remove RT task cache for detector ", newState, detectorId); + removeRealtimeTaskCache(detectorId); + } + } else { + logger.debug("Realtime task cache is not inited yet for detector {}", detectorId); + } + } + + public void refreshRealtimeJobRunTime(String detectorId) { + RealtimeTaskCache taskCache = realtimeTaskCaches.get(detectorId); + if (taskCache != null) { + taskCache.setLastJobRunTime(Instant.now().toEpochMilli()); + } + } + + /** + * Get detector IDs from realtime task cache. + * @return array of detector id + */ + public String[] getDetectorIdsInRealtimeTaskCache() { + return realtimeTaskCaches.keySet().toArray(new String[0]); + } + + /** + * Remove detector's realtime task from cache. + * @param detectorId detector id + */ + public void removeRealtimeTaskCache(String detectorId) { + if (realtimeTaskCaches.containsKey(detectorId)) { + logger.info("Delete realtime cache for detector {}", detectorId); + realtimeTaskCaches.remove(detectorId); + } + } + + /** + * We query result index to check if there are any result generated for detector to tell whether it passed initialization of not. + * To avoid repeated query when there is no data, record whether we have done that or not. + * @param id detector id + */ + public void markResultIndexQueried(String id) { + RealtimeTaskCache realtimeTaskCache = realtimeTaskCaches.get(id); + // we initialize a real time cache at the beginning of AnomalyResultTransportAction if it + // cannot be found. If the cache is empty, we will return early and wait it for it to be + // initialized. + if (realtimeTaskCache != null) { + realtimeTaskCache.setQueriedResultIndex(true); + } + } + + /** + * We query result index to check if there are any result generated for detector to tell whether it passed initialization of not. + * + * @param id detector id + * @return whether we have queried result index or not. + */ + public boolean hasQueriedResultIndex(String id) { + RealtimeTaskCache realtimeTaskCache = realtimeTaskCaches.get(id); + if (realtimeTaskCache != null) { + return realtimeTaskCache.hasQueriedResultIndex(); + } + return false; + } + + /** + * Add deleted config's id to deleted config queue. + * @param configId config id + */ + public void addDeletedConfig(String configId) { + if (deletedConfigs.size() < maxCachedDeletedTask) { + deletedConfigs.add(configId); + } + } + + /** + * Poll one deleted config. + * @return config id + */ + public String pollDeletedConfig() { + return this.deletedConfigs.poll(); + } +} diff --git a/src/test/java/org/opensearch/ad/AnomalyDetectorJobRunnerTests.java b/src/test/java/org/opensearch/ad/AnomalyDetectorJobRunnerTests.java index 9de62f771..ed5be8fb0 100644 --- a/src/test/java/org/opensearch/ad/AnomalyDetectorJobRunnerTests.java +++ b/src/test/java/org/opensearch/ad/AnomalyDetectorJobRunnerTests.java @@ -22,8 +22,8 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.NUM_MIN_SAMPLES; import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; +import static org.opensearch.timeseries.settings.TimeSeriesSettings.NUM_MIN_SAMPLES; import java.io.IOException; import java.time.Instant; @@ -84,6 +84,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.timeseries.AbstractTimeSeriesTest; import org.opensearch.timeseries.AnalysisType; +import org.opensearch.timeseries.MemoryTracker; import org.opensearch.timeseries.NodeStateManager; import org.opensearch.timeseries.TestHelpers; import org.opensearch.timeseries.common.exception.EndRunException; @@ -92,6 +93,7 @@ import org.opensearch.timeseries.model.FeatureData; import org.opensearch.timeseries.model.IntervalTimeConfiguration; import org.opensearch.timeseries.model.Job; +import org.opensearch.timeseries.settings.TimeSeriesSettings; import org.opensearch.timeseries.util.ClientUtil; import org.opensearch.timeseries.util.DiscoveryNodeFilterer; @@ -553,7 +555,7 @@ public Instant confirmInitializedSetup() { Collections.singletonList(new FeatureData("123", "abc", 0d)), randomAlphaOfLength(4), // not fully initialized - Long.valueOf(AnomalyDetectorSettings.NUM_MIN_SAMPLES - 1), + Long.valueOf(TimeSeriesSettings.NUM_MIN_SAMPLES - 1), randomLong(), // not an HC detector false, @@ -716,7 +718,7 @@ public void testMarkResultIndexQueried() throws IOException { Settings settings = Settings .builder() .put(AnomalyDetectorSettings.MAX_BATCH_TASK_PER_NODE.getKey(), 2) - .put(AnomalyDetectorSettings.MAX_CACHED_DELETED_TASKS.getKey(), 100) + .put(TimeSeriesSettings.MAX_CACHED_DELETED_TASKS.getKey(), 100) .build(); clusterService = mock(ClusterService.class); @@ -725,7 +727,7 @@ public void testMarkResultIndexQueried() throws IOException { Collections .unmodifiableSet( new HashSet<>( - Arrays.asList(AnomalyDetectorSettings.MAX_BATCH_TASK_PER_NODE, AnomalyDetectorSettings.MAX_CACHED_DELETED_TASKS) + Arrays.asList(AnomalyDetectorSettings.MAX_BATCH_TASK_PER_NODE, TimeSeriesSettings.MAX_CACHED_DELETED_TASKS) ) ) ); diff --git a/src/test/java/org/opensearch/ad/MemoryTrackerTests.java b/src/test/java/org/opensearch/ad/MemoryTrackerTests.java index 00060a060..631240072 100644 --- a/src/test/java/org/opensearch/ad/MemoryTrackerTests.java +++ b/src/test/java/org/opensearch/ad/MemoryTrackerTests.java @@ -18,7 +18,6 @@ import java.util.Collections; import java.util.HashSet; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.model.AnomalyDetector; import org.opensearch.ad.settings.AnomalyDetectorSettings; import org.opensearch.cluster.service.ClusterService; @@ -29,6 +28,8 @@ import org.opensearch.monitor.jvm.JvmInfo.Mem; import org.opensearch.monitor.jvm.JvmService; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.timeseries.MemoryTracker; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.LimitExceededException; import org.opensearch.timeseries.settings.TimeSeriesSettings; @@ -58,7 +59,7 @@ public class MemoryTrackerTests extends OpenSearchTestCase { double modelDesiredSizePercentage; JvmService jvmService; AnomalyDetector detector; - ADCircuitBreakerService circuitBreaker; + CircuitBreakerService circuitBreaker; @Override public void setUp() throws Exception { @@ -86,10 +87,10 @@ public void setUp() throws Exception { clusterService = mock(ClusterService.class); modelMaxPercen = 0.1f; - Settings settings = Settings.builder().put(AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE.getKey(), modelMaxPercen).build(); + Settings settings = Settings.builder().put(AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE.getKey(), modelMaxPercen).build(); ClusterSettings clusterSettings = new ClusterSettings( settings, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE))) + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE))) ); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); @@ -119,20 +120,20 @@ public void setUp() throws Exception { when(detector.getEnabledFeatureIds()).thenReturn(Collections.singletonList("a")); when(detector.getShingleSize()).thenReturn(1); - circuitBreaker = mock(ADCircuitBreakerService.class); + circuitBreaker = mock(CircuitBreakerService.class); when(circuitBreaker.isOpen()).thenReturn(false); } private void setUpBigHeap() { ByteSizeValue value = new ByteSizeValue(largeHeapSize); when(mem.getHeapMax()).thenReturn(value); - tracker = new MemoryTracker(jvmService, modelMaxSizePercentage, modelDesiredSizePercentage, clusterService, circuitBreaker); + tracker = new MemoryTracker(jvmService, modelMaxSizePercentage, clusterService, circuitBreaker); } private void setUpSmallHeap() { ByteSizeValue value = new ByteSizeValue(smallHeapSize); when(mem.getHeapMax()).thenReturn(value); - tracker = new MemoryTracker(jvmService, modelMaxSizePercentage, modelDesiredSizePercentage, clusterService, circuitBreaker); + tracker = new MemoryTracker(jvmService, modelMaxSizePercentage, clusterService, circuitBreaker); } public void testEstimateModelSize() { @@ -174,7 +175,7 @@ public void testEstimateModelSize() { .parallelExecutionEnabled(false) .compact(true) .precision(Precision.FLOAT_32) - .boundingBoxCacheFraction(AnomalyDetectorSettings.BATCH_BOUNDING_BOX_CACHE_RATIO) + .boundingBoxCacheFraction(TimeSeriesSettings.BATCH_BOUNDING_BOX_CACHE_RATIO) .internalShinglingEnabled(false) // same with dimension for opportunistic memory saving .shingleSize(1) @@ -332,10 +333,10 @@ public void testCanAllocate() { assertTrue(!tracker.canAllocate((long) (largeHeapSize * modelMaxPercen + 10))); long bytesToUse = 100_000; - tracker.consumeMemory(bytesToUse, false, MemoryTracker.Origin.HC_DETECTOR); + tracker.consumeMemory(bytesToUse, false, MemoryTracker.Origin.REAL_TIME_DETECTOR); assertTrue(!tracker.canAllocate((long) (largeHeapSize * modelMaxPercen))); - tracker.releaseMemory(bytesToUse, false, MemoryTracker.Origin.HC_DETECTOR); + tracker.releaseMemory(bytesToUse, false, MemoryTracker.Origin.REAL_TIME_DETECTOR); assertTrue(tracker.canAllocate((long) (largeHeapSize * modelMaxPercen))); } @@ -348,12 +349,11 @@ public void testMemoryToShed() { setUpSmallHeap(); long bytesToUse = 100_000; assertEquals(bytesToUse, tracker.getHeapLimit()); - assertEquals((long) (smallHeapSize * modelDesiredSizePercentage), tracker.getDesiredModelSize()); - tracker.consumeMemory(bytesToUse, false, MemoryTracker.Origin.HC_DETECTOR); - tracker.consumeMemory(bytesToUse, true, MemoryTracker.Origin.HC_DETECTOR); + tracker.consumeMemory(bytesToUse, false, MemoryTracker.Origin.REAL_TIME_DETECTOR); + tracker.consumeMemory(bytesToUse, true, MemoryTracker.Origin.REAL_TIME_DETECTOR); assertEquals(2 * bytesToUse, tracker.getTotalMemoryBytes()); assertEquals(bytesToUse, tracker.memoryToShed()); - assertTrue(!tracker.syncMemoryState(MemoryTracker.Origin.HC_DETECTOR, 2 * bytesToUse, bytesToUse)); + assertTrue(!tracker.syncMemoryState(MemoryTracker.Origin.REAL_TIME_DETECTOR, 2 * bytesToUse, bytesToUse)); } } diff --git a/src/test/java/org/opensearch/ad/breaker/ADCircuitBreakerServiceTests.java b/src/test/java/org/opensearch/ad/breaker/ADCircuitBreakerServiceTests.java index 7a5be47b6..df2fa513d 100644 --- a/src/test/java/org/opensearch/ad/breaker/ADCircuitBreakerServiceTests.java +++ b/src/test/java/org/opensearch/ad/breaker/ADCircuitBreakerServiceTests.java @@ -25,11 +25,15 @@ import org.mockito.MockitoAnnotations; import org.opensearch.monitor.jvm.JvmService; import org.opensearch.monitor.jvm.JvmStats; +import org.opensearch.timeseries.breaker.BreakerName; +import org.opensearch.timeseries.breaker.CircuitBreaker; +import org.opensearch.timeseries.breaker.CircuitBreakerService; +import org.opensearch.timeseries.breaker.MemoryCircuitBreaker; public class ADCircuitBreakerServiceTests { @InjectMocks - private ADCircuitBreakerService adCircuitBreakerService; + private CircuitBreakerService adCircuitBreakerService; @Mock JvmService jvmService; diff --git a/src/test/java/org/opensearch/ad/breaker/MemoryCircuitBreakerTests.java b/src/test/java/org/opensearch/ad/breaker/MemoryCircuitBreakerTests.java index e9249df82..6264808cc 100644 --- a/src/test/java/org/opensearch/ad/breaker/MemoryCircuitBreakerTests.java +++ b/src/test/java/org/opensearch/ad/breaker/MemoryCircuitBreakerTests.java @@ -21,6 +21,8 @@ import org.mockito.MockitoAnnotations; import org.opensearch.monitor.jvm.JvmService; import org.opensearch.monitor.jvm.JvmStats; +import org.opensearch.timeseries.breaker.CircuitBreaker; +import org.opensearch.timeseries.breaker.MemoryCircuitBreaker; public class MemoryCircuitBreakerTests { diff --git a/src/test/java/org/opensearch/ad/caching/AbstractCacheTest.java b/src/test/java/org/opensearch/ad/caching/AbstractCacheTest.java index 5045b45bb..2c990682a 100644 --- a/src/test/java/org/opensearch/ad/caching/AbstractCacheTest.java +++ b/src/test/java/org/opensearch/ad/caching/AbstractCacheTest.java @@ -22,16 +22,16 @@ import java.util.Random; import org.junit.Before; -import org.opensearch.ad.MemoryTracker; import org.opensearch.ad.ml.EntityModel; import org.opensearch.ad.ml.ModelManager.ModelType; import org.opensearch.ad.ml.ModelState; import org.opensearch.ad.model.AnomalyDetector; import org.opensearch.ad.ratelimit.CheckpointMaintainWorker; import org.opensearch.ad.ratelimit.CheckpointWriteWorker; -import org.opensearch.ad.settings.AnomalyDetectorSettings; import org.opensearch.timeseries.AbstractTimeSeriesTest; +import org.opensearch.timeseries.MemoryTracker; import org.opensearch.timeseries.model.Entity; +import org.opensearch.timeseries.settings.TimeSeriesSettings; public class AbstractCacheTest extends AbstractTimeSeriesTest { protected String modelId1, modelId2, modelId3, modelId4; @@ -94,7 +94,7 @@ public void setUp() throws Exception { memoryPerEntity, memoryTracker, clock, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, detectorId, checkpointWriteQueue, checkpointMaintainQueue, diff --git a/src/test/java/org/opensearch/ad/caching/CacheBufferTests.java b/src/test/java/org/opensearch/ad/caching/CacheBufferTests.java index 7332edf4b..265560ab5 100644 --- a/src/test/java/org/opensearch/ad/caching/CacheBufferTests.java +++ b/src/test/java/org/opensearch/ad/caching/CacheBufferTests.java @@ -22,8 +22,8 @@ import java.util.Optional; import org.mockito.ArgumentCaptor; -import org.opensearch.ad.MemoryTracker; import org.opensearch.ad.ratelimit.CheckpointMaintainRequest; +import org.opensearch.timeseries.MemoryTracker; import test.org.opensearch.ad.util.MLUtil; import test.org.opensearch.ad.util.RandomModelStateConfig; @@ -83,7 +83,7 @@ public void testRemovalCandidate2() throws InterruptedException { assertEquals(3 * memoryPerEntity, capturedMemoryReleased.stream().reduce(0L, (a, b) -> a + b).intValue()); assertTrue(capturedreserved.get(0)); assertTrue(!capturedreserved.get(1)); - assertEquals(MemoryTracker.Origin.HC_DETECTOR, capturedOrigin.get(0)); + assertEquals(MemoryTracker.Origin.REAL_TIME_DETECTOR, capturedOrigin.get(0)); assertTrue(!cacheBuffer.expired(Duration.ofHours(1))); } diff --git a/src/test/java/org/opensearch/ad/caching/PriorityCacheTests.java b/src/test/java/org/opensearch/ad/caching/PriorityCacheTests.java index f43457318..4154687cf 100644 --- a/src/test/java/org/opensearch/ad/caching/PriorityCacheTests.java +++ b/src/test/java/org/opensearch/ad/caching/PriorityCacheTests.java @@ -44,8 +44,6 @@ import org.apache.logging.log4j.Logger; import org.junit.Before; import org.mockito.ArgumentCaptor; -import org.opensearch.ad.MemoryTracker; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.ml.CheckpointDao; import org.opensearch.ad.ml.EntityModel; import org.opensearch.ad.ml.ModelManager; @@ -63,9 +61,12 @@ import org.opensearch.monitor.jvm.JvmService; import org.opensearch.threadpool.Scheduler.ScheduledCancellable; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.timeseries.MemoryTracker; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.LimitExceededException; import org.opensearch.timeseries.common.exception.TimeSeriesException; import org.opensearch.timeseries.model.Entity; +import org.opensearch.timeseries.settings.TimeSeriesSettings; public class PriorityCacheTests extends AbstractCacheTest { private static final Logger LOG = LogManager.getLogger(PriorityCacheTests.class); @@ -98,11 +99,11 @@ public void setUp() throws Exception { new HashSet<>( Arrays .asList( - AnomalyDetectorSettings.DEDICATED_CACHE_SIZE, - AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE, - AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE, - AnomalyDetectorSettings.CHECKPOINT_TTL, - AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ + AnomalyDetectorSettings.AD_DEDICATED_CACHE_SIZE, + AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE, + AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE, + AnomalyDetectorSettings.AD_CHECKPOINT_TTL, + AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ ) ) ) @@ -117,19 +118,19 @@ public void setUp() throws Exception { EntityCache cache = new PriorityCache( checkpoint, dedicatedCacheSize, - AnomalyDetectorSettings.CHECKPOINT_TTL, + AnomalyDetectorSettings.AD_CHECKPOINT_TTL, AnomalyDetectorSettings.MAX_INACTIVE_ENTITIES, memoryTracker, - AnomalyDetectorSettings.NUM_TREES, + TimeSeriesSettings.NUM_TREES, clock, clusterService, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, threadPool, checkpointWriteQueue, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, checkpointMaintainQueue, Settings.EMPTY, - AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ + AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ ); CacheProvider cacheProvider = new CacheProvider(); @@ -160,31 +161,32 @@ public void testCacheHit() { // ClusterService clusterService = mock(ClusterService.class); float modelMaxPercen = 0.1f; - // Settings settings = Settings.builder().put(AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE.getKey(), modelMaxPercen).build(); + // Settings settings = Settings.builder().put(AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE.getKey(), + // modelMaxPercen).build(); // ClusterSettings clusterSettings = new ClusterSettings( // settings, - // Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE))) + // Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE))) // ); // when(clusterService.getClusterSettings()).thenReturn(clusterSettings); - memoryTracker = spy(new MemoryTracker(jvmService, modelMaxPercen, 0.002, clusterService, mock(ADCircuitBreakerService.class))); + memoryTracker = spy(new MemoryTracker(jvmService, modelMaxPercen, clusterService, mock(CircuitBreakerService.class))); EntityCache cache = new PriorityCache( checkpoint, dedicatedCacheSize, - AnomalyDetectorSettings.CHECKPOINT_TTL, + AnomalyDetectorSettings.AD_CHECKPOINT_TTL, AnomalyDetectorSettings.MAX_INACTIVE_ENTITIES, memoryTracker, - AnomalyDetectorSettings.NUM_TREES, + TimeSeriesSettings.NUM_TREES, clock, clusterService, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, threadPool, checkpointWriteQueue, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, checkpointMaintainQueue, Settings.EMPTY, - AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ + AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ ); CacheProvider cacheProvider = new CacheProvider(); @@ -215,7 +217,7 @@ public void testCacheHit() { verify(memoryTracker, times(1)).consumeMemory(memoryConsumed.capture(), reserved.capture(), origin.capture()); assertEquals(dedicatedCacheSize * expectedMemoryPerEntity, memoryConsumed.getValue().intValue()); assertEquals(true, reserved.getValue().booleanValue()); - assertEquals(MemoryTracker.Origin.HC_DETECTOR, origin.getValue()); + assertEquals(MemoryTracker.Origin.REAL_TIME_DETECTOR, origin.getValue()); // for (int i = 0; i < 2; i++) { // cacheProvider.get(modelId2, detector); diff --git a/src/test/java/org/opensearch/ad/cluster/ClusterManagerEventListenerTests.java b/src/test/java/org/opensearch/ad/cluster/ClusterManagerEventListenerTests.java index 2ba8f391a..9c2e79236 100644 --- a/src/test/java/org/opensearch/ad/cluster/ClusterManagerEventListenerTests.java +++ b/src/test/java/org/opensearch/ad/cluster/ClusterManagerEventListenerTests.java @@ -60,7 +60,7 @@ public void setUp() throws Exception { clusterService = mock(ClusterService.class); ClusterSettings settings = new ClusterSettings( Settings.EMPTY, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.CHECKPOINT_TTL))) + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.AD_CHECKPOINT_TTL))) ); when(clusterService.getClusterSettings()).thenReturn(settings); @@ -85,7 +85,7 @@ public void setUp() throws Exception { clock, clientUtil, nodeFilter, - AnomalyDetectorSettings.CHECKPOINT_TTL, + AnomalyDetectorSettings.AD_CHECKPOINT_TTL, Settings.EMPTY ); } diff --git a/src/test/java/org/opensearch/ad/ml/AbstractCosineDataTest.java b/src/test/java/org/opensearch/ad/ml/AbstractCosineDataTest.java index c4b17264a..1a86e45d4 100644 --- a/src/test/java/org/opensearch/ad/ml/AbstractCosineDataTest.java +++ b/src/test/java/org/opensearch/ad/ml/AbstractCosineDataTest.java @@ -15,7 +15,6 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ; import java.time.Clock; import java.time.Instant; @@ -30,7 +29,6 @@ import org.opensearch.Version; import org.opensearch.action.get.GetRequest; import org.opensearch.action.get.GetResponse; -import org.opensearch.ad.MemoryTracker; import org.opensearch.ad.feature.FeatureManager; import org.opensearch.ad.model.AnomalyDetector; import org.opensearch.ad.ratelimit.CheckpointWriteWorker; @@ -47,6 +45,7 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.timeseries.AbstractTimeSeriesTest; +import org.opensearch.timeseries.MemoryTracker; import org.opensearch.timeseries.NodeStateManager; import org.opensearch.timeseries.TestHelpers; import org.opensearch.timeseries.TimeSeriesAnalyticsPlugin; @@ -96,7 +95,7 @@ public class AbstractCosineDataTest extends AbstractTimeSeriesTest { @Override public void setUp() throws Exception { super.setUp(); - numMinSamples = AnomalyDetectorSettings.NUM_MIN_SAMPLES; + numMinSamples = TimeSeriesSettings.NUM_MIN_SAMPLES; clock = mock(Clock.class); when(clock.instant()).thenReturn(Instant.now()); @@ -126,7 +125,7 @@ public void setUp() throws Exception { nodestateSetting = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); nodestateSetting.add(TimeSeriesSettings.MAX_RETRY_FOR_UNRESPONSIVE_NODE); nodestateSetting.add(TimeSeriesSettings.BACKOFF_MINUTES); - nodestateSetting.add(CHECKPOINT_SAVING_FREQ); + nodestateSetting.add(AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ); clusterSettings = new ClusterSettings(Settings.EMPTY, nodestateSetting); discoveryNode = new DiscoveryNode( @@ -145,7 +144,7 @@ public void setUp() throws Exception { settings, clientUtil, clock, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, clusterService, TimeSeriesSettings.MAX_RETRY_FOR_UNRESPONSIVE_NODE, TimeSeriesSettings.BACKOFF_MINUTES @@ -168,7 +167,7 @@ public void setUp() throws Exception { AnomalyDetectorSettings.MAX_IMPUTATION_NEIGHBOR_DISTANCE, AnomalyDetectorSettings.PREVIEW_SAMPLE_RATE, AnomalyDetectorSettings.MAX_PREVIEW_SAMPLES, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, threadPool, TimeSeriesAnalyticsPlugin.AD_THREAD_POOL_NAME ); @@ -180,21 +179,21 @@ public void setUp() throws Exception { clock, threadPool, stateManager, - AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE, - AnomalyDetectorSettings.NUM_TREES, - AnomalyDetectorSettings.TIME_DECAY, + TimeSeriesSettings.NUM_SAMPLES_PER_TREE, + TimeSeriesSettings.NUM_TREES, + TimeSeriesSettings.TIME_DECAY, numMinSamples, AnomalyDetectorSettings.MAX_SAMPLE_STRIDE, AnomalyDetectorSettings.MAX_TRAIN_SAMPLE, imputer, searchFeatureDao, - AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE, + TimeSeriesSettings.THRESHOLD_MIN_PVALUE, featureManager, settings, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, checkpointWriteQueue, rcfSeed, - AnomalyDetectorSettings.MAX_COLD_START_ROUNDS + TimeSeriesSettings.MAX_COLD_START_ROUNDS ); detectorId = "123"; @@ -215,14 +214,14 @@ public void setUp() throws Exception { modelManager = new ModelManager( mock(CheckpointDao.class), mock(Clock.class), - AnomalyDetectorSettings.NUM_TREES, - AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE, - AnomalyDetectorSettings.TIME_DECAY, - AnomalyDetectorSettings.NUM_MIN_SAMPLES, - AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE, + TimeSeriesSettings.NUM_TREES, + TimeSeriesSettings.NUM_SAMPLES_PER_TREE, + TimeSeriesSettings.TIME_DECAY, + TimeSeriesSettings.NUM_MIN_SAMPLES, + TimeSeriesSettings.THRESHOLD_MIN_PVALUE, AnomalyDetectorSettings.MIN_PREVIEW_SIZE, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, - AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ, + TimeSeriesSettings.HOURLY_MAINTENANCE, + AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ, entityColdStarter, mock(FeatureManager.class), mock(MemoryTracker.class), diff --git a/src/test/java/org/opensearch/ad/ml/CheckpointDaoTests.java b/src/test/java/org/opensearch/ad/ml/CheckpointDaoTests.java index 9a36be401..489d683b8 100644 --- a/src/test/java/org/opensearch/ad/ml/CheckpointDaoTests.java +++ b/src/test/java/org/opensearch/ad/ml/CheckpointDaoTests.java @@ -95,7 +95,6 @@ import org.opensearch.action.update.UpdateResponse; import org.opensearch.ad.constant.ADCommonName; import org.opensearch.ad.indices.ADIndexManagement; -import org.opensearch.ad.settings.AnomalyDetectorSettings; import org.opensearch.client.Client; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; @@ -103,6 +102,7 @@ import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.timeseries.constant.CommonName; +import org.opensearch.timeseries.settings.TimeSeriesSettings; import org.opensearch.timeseries.util.ClientUtil; import test.org.opensearch.ad.util.JsonDeserializer; @@ -195,7 +195,7 @@ public GenericObjectPool run() { return new GenericObjectPool<>(new BasePooledObjectFactory() { @Override public LinkedBuffer create() throws Exception { - return LinkedBuffer.allocate(AnomalyDetectorSettings.SERIALIZATION_BUFFER_BYTES); + return LinkedBuffer.allocate(TimeSeriesSettings.SERIALIZATION_BUFFER_BYTES); } @Override @@ -205,11 +205,11 @@ public PooledObject wrap(LinkedBuffer obj) { }); } })); - serializeRCFBufferPool.setMaxTotal(AnomalyDetectorSettings.MAX_TOTAL_RCF_SERIALIZATION_BUFFERS); - serializeRCFBufferPool.setMaxIdle(AnomalyDetectorSettings.MAX_TOTAL_RCF_SERIALIZATION_BUFFERS); + serializeRCFBufferPool.setMaxTotal(TimeSeriesSettings.MAX_TOTAL_RCF_SERIALIZATION_BUFFERS); + serializeRCFBufferPool.setMaxIdle(TimeSeriesSettings.MAX_TOTAL_RCF_SERIALIZATION_BUFFERS); serializeRCFBufferPool.setMinIdle(0); serializeRCFBufferPool.setBlockWhenExhausted(false); - serializeRCFBufferPool.setTimeBetweenEvictionRuns(AnomalyDetectorSettings.HOURLY_MAINTENANCE); + serializeRCFBufferPool.setTimeBetweenEvictionRuns(TimeSeriesSettings.HOURLY_MAINTENANCE); anomalyRate = 0.005; checkpointDao = new CheckpointDao( @@ -225,7 +225,7 @@ public PooledObject wrap(LinkedBuffer obj) { indexUtil, maxCheckpointBytes, serializeRCFBufferPool, - AnomalyDetectorSettings.SERIALIZATION_BUFFER_BYTES, + TimeSeriesSettings.SERIALIZATION_BUFFER_BYTES, anomalyRate ); @@ -693,7 +693,7 @@ public void test_too_large_checkpoint() throws IOException { indexUtil, 1, // make the max checkpoint size 1 byte only serializeRCFBufferPool, - AnomalyDetectorSettings.SERIALIZATION_BUFFER_BYTES, + TimeSeriesSettings.SERIALIZATION_BUFFER_BYTES, anomalyRate ); @@ -730,7 +730,7 @@ public void testBorrowFromPoolFailure() throws Exception { indexUtil, 1, // make the max checkpoint size 1 byte only mockSerializeRCFBufferPool, - AnomalyDetectorSettings.SERIALIZATION_BUFFER_BYTES, + TimeSeriesSettings.SERIALIZATION_BUFFER_BYTES, anomalyRate ); @@ -755,7 +755,7 @@ public void testMapperFailure() throws IOException { indexUtil, 1, // make the max checkpoint size 1 byte only serializeRCFBufferPool, - AnomalyDetectorSettings.SERIALIZATION_BUFFER_BYTES, + TimeSeriesSettings.SERIALIZATION_BUFFER_BYTES, anomalyRate ); @@ -803,7 +803,7 @@ private void setUpMockTrcf() { indexUtil, maxCheckpointBytes, serializeRCFBufferPool, - AnomalyDetectorSettings.SERIALIZATION_BUFFER_BYTES, + TimeSeriesSettings.SERIALIZATION_BUFFER_BYTES, anomalyRate ); } @@ -940,7 +940,7 @@ public void testFromEntityModelCheckpointModelTooLarge() throws FileNotFoundExce indexUtil, 100_000, // checkpoint_2.json is of 224603 bytes. serializeRCFBufferPool, - AnomalyDetectorSettings.SERIALIZATION_BUFFER_BYTES, + TimeSeriesSettings.SERIALIZATION_BUFFER_BYTES, anomalyRate ); Optional> result = checkpointDao.fromEntityModelCheckpoint(modelPair.getLeft(), this.modelId); diff --git a/src/test/java/org/opensearch/ad/ml/EntityColdStarterTests.java b/src/test/java/org/opensearch/ad/ml/EntityColdStarterTests.java index 67ef7f7ab..f838da051 100644 --- a/src/test/java/org/opensearch/ad/ml/EntityColdStarterTests.java +++ b/src/test/java/org/opensearch/ad/ml/EntityColdStarterTests.java @@ -43,7 +43,6 @@ import org.junit.BeforeClass; import org.opensearch.action.get.GetRequest; import org.opensearch.action.get.GetResponse; -import org.opensearch.ad.MemoryTracker; import org.opensearch.ad.feature.FeatureManager; import org.opensearch.ad.ml.ModelManager.ModelType; import org.opensearch.ad.settings.ADEnabledSetting; @@ -56,6 +55,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.timeseries.AnalysisType; +import org.opensearch.timeseries.MemoryTracker; import org.opensearch.timeseries.TestHelpers; import org.opensearch.timeseries.common.exception.TimeSeriesException; import org.opensearch.timeseries.constant.CommonName; @@ -213,16 +213,16 @@ private void diffTesting(ModelState modelState, List cold .dimensions(inputDimension * detector.getShingleSize()) .precision(Precision.FLOAT_32) .randomSeed(rcfSeed) - .numberOfTrees(AnomalyDetectorSettings.NUM_TREES) + .numberOfTrees(TimeSeriesSettings.NUM_TREES) .shingleSize(detector.getShingleSize()) .boundingBoxCacheFraction(TimeSeriesSettings.REAL_TIME_BOUNDING_BOX_CACHE_RATIO) - .timeDecay(AnomalyDetectorSettings.TIME_DECAY) + .timeDecay(TimeSeriesSettings.TIME_DECAY) .outputAfter(numMinSamples) .initialAcceptFraction(0.125d) .parallelExecutionEnabled(false) - .sampleSize(AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE) + .sampleSize(TimeSeriesSettings.NUM_SAMPLES_PER_TREE) .internalShinglingEnabled(true) - .anomalyRate(1 - AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE) + .anomalyRate(1 - TimeSeriesSettings.THRESHOLD_MIN_PVALUE) .transformMethod(TransformMethod.NORMALIZE) .alertOnce(true) .autoAdjust(true) @@ -511,16 +511,16 @@ public void testTrainModelFromExistingSamplesEnoughSamples() { .dimensions(dimensions) .precision(Precision.FLOAT_32) .randomSeed(rcfSeed) - .numberOfTrees(AnomalyDetectorSettings.NUM_TREES) + .numberOfTrees(TimeSeriesSettings.NUM_TREES) .shingleSize(detector.getShingleSize()) .boundingBoxCacheFraction(TimeSeriesSettings.REAL_TIME_BOUNDING_BOX_CACHE_RATIO) - .timeDecay(AnomalyDetectorSettings.TIME_DECAY) + .timeDecay(TimeSeriesSettings.TIME_DECAY) .outputAfter(numMinSamples) .initialAcceptFraction(0.125d) .parallelExecutionEnabled(false) - .sampleSize(AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE) + .sampleSize(TimeSeriesSettings.NUM_SAMPLES_PER_TREE) .internalShinglingEnabled(true) - .anomalyRate(1 - AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE) + .anomalyRate(1 - TimeSeriesSettings.THRESHOLD_MIN_PVALUE) .transformMethod(TransformMethod.NORMALIZE) .alertOnce(true) .autoAdjust(true); @@ -555,7 +555,7 @@ public void testTrainModelFromExistingSamplesNotEnoughSamples() { @SuppressWarnings("unchecked") private void accuracyTemplate(int detectorIntervalMins, float precisionThreshold, float recallThreshold) throws Exception { int baseDimension = 2; - int dataSize = 20 * AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE; + int dataSize = 20 * TimeSeriesSettings.NUM_SAMPLES_PER_TREE; int trainTestSplit = 300; // detector interval int interval = detectorIntervalMins; @@ -705,34 +705,34 @@ public void testAccuracyOneMinuteIntervalNoInterpolation() throws Exception { clock, threadPool, stateManager, - AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE, - AnomalyDetectorSettings.NUM_TREES, - AnomalyDetectorSettings.TIME_DECAY, + TimeSeriesSettings.NUM_SAMPLES_PER_TREE, + TimeSeriesSettings.NUM_TREES, + TimeSeriesSettings.TIME_DECAY, numMinSamples, AnomalyDetectorSettings.MAX_SAMPLE_STRIDE, AnomalyDetectorSettings.MAX_TRAIN_SAMPLE, imputer, searchFeatureDao, - AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE, + TimeSeriesSettings.THRESHOLD_MIN_PVALUE, featureManager, settings, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, checkpointWriteQueue, rcfSeed, - AnomalyDetectorSettings.MAX_COLD_START_ROUNDS + TimeSeriesSettings.MAX_COLD_START_ROUNDS ); modelManager = new ModelManager( mock(CheckpointDao.class), mock(Clock.class), - AnomalyDetectorSettings.NUM_TREES, - AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE, - AnomalyDetectorSettings.TIME_DECAY, - AnomalyDetectorSettings.NUM_MIN_SAMPLES, - AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE, + TimeSeriesSettings.NUM_TREES, + TimeSeriesSettings.NUM_SAMPLES_PER_TREE, + TimeSeriesSettings.TIME_DECAY, + TimeSeriesSettings.NUM_MIN_SAMPLES, + TimeSeriesSettings.THRESHOLD_MIN_PVALUE, AnomalyDetectorSettings.MIN_PREVIEW_SIZE, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, - AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ, + TimeSeriesSettings.HOURLY_MAINTENANCE, + AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ, entityColdStarter, mock(FeatureManager.class), mock(MemoryTracker.class), diff --git a/src/test/java/org/opensearch/ad/ml/HCADModelPerfTests.java b/src/test/java/org/opensearch/ad/ml/HCADModelPerfTests.java index 46a2a3ec3..f0e87b160 100644 --- a/src/test/java/org/opensearch/ad/ml/HCADModelPerfTests.java +++ b/src/test/java/org/opensearch/ad/ml/HCADModelPerfTests.java @@ -34,7 +34,6 @@ import org.apache.lucene.tests.util.TimeUnits; import org.opensearch.action.get.GetRequest; import org.opensearch.action.get.GetResponse; -import org.opensearch.ad.MemoryTracker; import org.opensearch.ad.feature.FeatureManager; import org.opensearch.ad.ml.ModelManager.ModelType; import org.opensearch.ad.settings.AnomalyDetectorSettings; @@ -43,6 +42,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.timeseries.AnalysisType; +import org.opensearch.timeseries.MemoryTracker; import org.opensearch.timeseries.TestHelpers; import org.opensearch.timeseries.TimeSeriesAnalyticsPlugin; import org.opensearch.timeseries.constant.CommonName; @@ -78,7 +78,7 @@ private void averageAccuracyTemplate( int baseDimension, boolean anomalyIndependent ) throws Exception { - int dataSize = 20 * AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE; + int dataSize = 20 * TimeSeriesSettings.NUM_SAMPLES_PER_TREE; int trainTestSplit = 300; // detector interval int interval = detectorIntervalMins; @@ -126,7 +126,7 @@ private void averageAccuracyTemplate( AnomalyDetectorSettings.MAX_IMPUTATION_NEIGHBOR_DISTANCE, AnomalyDetectorSettings.PREVIEW_SAMPLE_RATE, AnomalyDetectorSettings.MAX_PREVIEW_SAMPLES, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, threadPool, TimeSeriesAnalyticsPlugin.AD_THREAD_POOL_NAME ); @@ -135,34 +135,34 @@ private void averageAccuracyTemplate( clock, threadPool, stateManager, - AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE, - AnomalyDetectorSettings.NUM_TREES, - AnomalyDetectorSettings.TIME_DECAY, + TimeSeriesSettings.NUM_SAMPLES_PER_TREE, + TimeSeriesSettings.NUM_TREES, + TimeSeriesSettings.TIME_DECAY, numMinSamples, AnomalyDetectorSettings.MAX_SAMPLE_STRIDE, AnomalyDetectorSettings.MAX_TRAIN_SAMPLE, imputer, searchFeatureDao, - AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE, + TimeSeriesSettings.THRESHOLD_MIN_PVALUE, featureManager, settings, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, checkpointWriteQueue, seed, - AnomalyDetectorSettings.MAX_COLD_START_ROUNDS + TimeSeriesSettings.MAX_COLD_START_ROUNDS ); modelManager = new ModelManager( mock(CheckpointDao.class), mock(Clock.class), - AnomalyDetectorSettings.NUM_TREES, - AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE, - AnomalyDetectorSettings.TIME_DECAY, - AnomalyDetectorSettings.NUM_MIN_SAMPLES, - AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE, + TimeSeriesSettings.NUM_TREES, + TimeSeriesSettings.NUM_SAMPLES_PER_TREE, + TimeSeriesSettings.TIME_DECAY, + TimeSeriesSettings.NUM_MIN_SAMPLES, + TimeSeriesSettings.THRESHOLD_MIN_PVALUE, AnomalyDetectorSettings.MIN_PREVIEW_SIZE, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, - AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ, + TimeSeriesSettings.HOURLY_MAINTENANCE, + AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ, entityColdStarter, mock(FeatureManager.class), mock(MemoryTracker.class), diff --git a/src/test/java/org/opensearch/ad/ml/ModelManagerTests.java b/src/test/java/org/opensearch/ad/ml/ModelManagerTests.java index 837cb2d7f..eda0cfb46 100644 --- a/src/test/java/org/opensearch/ad/ml/ModelManagerTests.java +++ b/src/test/java/org/opensearch/ad/ml/ModelManagerTests.java @@ -54,8 +54,6 @@ import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import org.opensearch.ad.MemoryTracker; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.caching.EntityCache; import org.opensearch.ad.feature.FeatureManager; import org.opensearch.ad.ml.ModelManager.ModelType; @@ -69,14 +67,17 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.monitor.jvm.JvmService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.timeseries.MemoryTracker; import org.opensearch.timeseries.NodeStateManager; import org.opensearch.timeseries.TimeSeriesAnalyticsPlugin; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.LimitExceededException; import org.opensearch.timeseries.common.exception.ResourceNotFoundException; import org.opensearch.timeseries.dataprocessor.LinearUniformImputer; import org.opensearch.timeseries.feature.SearchFeatureDao; import org.opensearch.timeseries.ml.SingleStreamModelIdMapper; import org.opensearch.timeseries.model.Entity; +import org.opensearch.timeseries.settings.TimeSeriesSettings; import org.opensearch.timeseries.util.DiscoveryNodeFilterer; import test.org.opensearch.ad.util.MLUtil; @@ -163,7 +164,7 @@ public class ModelManagerTests { private Instant now; @Mock - private ADCircuitBreakerService adCircuitBreakerService; + private CircuitBreakerService adCircuitBreakerService; private String modelId = "modelId"; @@ -235,7 +236,7 @@ public void setup() { thresholdMinPvalue, minPreviewSize, modelTtl, - AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ, + AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ, entityColdStarter, featureManager, memoryTracker, @@ -409,13 +410,7 @@ public void getRcfResult_throwToListener_whenHeapLimitExceed() { when(jvmService.info().getMem().getHeapMax().getBytes()).thenReturn(1_000L); - MemoryTracker memoryTracker = new MemoryTracker( - jvmService, - modelMaxSizePercentage, - modelDesiredSizePercentage, - null, - adCircuitBreakerService - ); + MemoryTracker memoryTracker = new MemoryTracker(jvmService, modelMaxSizePercentage, null, adCircuitBreakerService); ActionListener listener = mock(ActionListener.class); @@ -431,7 +426,7 @@ public void getRcfResult_throwToListener_whenHeapLimitExceed() { thresholdMinPvalue, minPreviewSize, modelTtl, - AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ, + AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ, entityColdStarter, featureManager, memoryTracker, @@ -923,7 +918,7 @@ public void getEmptyStateFullSamples() { AnomalyDetectorSettings.MAX_IMPUTATION_NEIGHBOR_DISTANCE, AnomalyDetectorSettings.PREVIEW_SAMPLE_RATE, AnomalyDetectorSettings.MAX_PREVIEW_SAMPLES, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, threadPool, TimeSeriesAnalyticsPlugin.AD_THREAD_POOL_NAME ); @@ -934,20 +929,20 @@ public void getEmptyStateFullSamples() { clock, threadPool, stateManager, - AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE, - AnomalyDetectorSettings.NUM_TREES, - AnomalyDetectorSettings.TIME_DECAY, + TimeSeriesSettings.NUM_SAMPLES_PER_TREE, + TimeSeriesSettings.NUM_TREES, + TimeSeriesSettings.TIME_DECAY, numMinSamples, AnomalyDetectorSettings.MAX_SAMPLE_STRIDE, AnomalyDetectorSettings.MAX_TRAIN_SAMPLE, interpolator, searchFeatureDao, - AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE, + TimeSeriesSettings.THRESHOLD_MIN_PVALUE, featureManager, settings, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, checkpointWriteQueue, - AnomalyDetectorSettings.MAX_COLD_START_ROUNDS + TimeSeriesSettings.MAX_COLD_START_ROUNDS ); modelManager = spy( @@ -961,7 +956,7 @@ public void getEmptyStateFullSamples() { thresholdMinPvalue, minPreviewSize, modelTtl, - AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ, + AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ, entityColdStarter, featureManager, memoryTracker, diff --git a/src/test/java/org/opensearch/ad/mock/transport/MockAnomalyDetectorJobTransportActionWithUser.java b/src/test/java/org/opensearch/ad/mock/transport/MockAnomalyDetectorJobTransportActionWithUser.java index 6d34fa05d..bf339161b 100644 --- a/src/test/java/org/opensearch/ad/mock/transport/MockAnomalyDetectorJobTransportActionWithUser.java +++ b/src/test/java/org/opensearch/ad/mock/transport/MockAnomalyDetectorJobTransportActionWithUser.java @@ -11,8 +11,8 @@ package org.opensearch.ad.mock.transport; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES; import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_REQUEST_TIMEOUT; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES; import static org.opensearch.timeseries.util.ParseUtils.resolveUserAndExecute; import org.apache.logging.log4j.LogManager; @@ -76,8 +76,8 @@ public MockAnomalyDetectorJobTransportActionWithUser( this.anomalyDetectionIndices = anomalyDetectionIndices; this.xContentRegistry = xContentRegistry; this.adTaskManager = adTaskManager; - filterByEnabled = FILTER_BY_BACKEND_ROLES.get(settings); - clusterService.getClusterSettings().addSettingsUpdateConsumer(FILTER_BY_BACKEND_ROLES, it -> filterByEnabled = it); + filterByEnabled = AD_FILTER_BY_BACKEND_ROLES.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(AD_FILTER_BY_BACKEND_ROLES, it -> filterByEnabled = it); ThreadContext threadContext = new ThreadContext(settings); context = threadContext.stashContext(); diff --git a/src/test/java/org/opensearch/ad/ratelimit/CheckPointMaintainRequestAdapterTests.java b/src/test/java/org/opensearch/ad/ratelimit/CheckPointMaintainRequestAdapterTests.java index d1fe526de..830ac3f65 100644 --- a/src/test/java/org/opensearch/ad/ratelimit/CheckPointMaintainRequestAdapterTests.java +++ b/src/test/java/org/opensearch/ad/ratelimit/CheckPointMaintainRequestAdapterTests.java @@ -59,7 +59,7 @@ public void setUp() throws Exception { cache = mock(CacheProvider.class); checkpointDao = mock(CheckpointDao.class); indexName = ADCommonName.CHECKPOINT_INDEX_NAME; - checkpointInterval = AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ; + checkpointInterval = AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ; EntityCache entityCache = mock(EntityCache.class); when(cache.get()).thenReturn(entityCache); state = MLUtil.randomModelState(new RandomModelStateConfig.Builder().fullModel(true).build()); @@ -67,7 +67,7 @@ public void setUp() throws Exception { clusterService = mock(ClusterService.class); ClusterSettings settings = new ClusterSettings( Settings.EMPTY, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ))) + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ))) ); when(clusterService.getClusterSettings()).thenReturn(settings); adapter = new CheckPointMaintainRequestAdapter( diff --git a/src/test/java/org/opensearch/ad/ratelimit/CheckpointMaintainWorkerTests.java b/src/test/java/org/opensearch/ad/ratelimit/CheckpointMaintainWorkerTests.java index cba7e8a45..0d05259fc 100644 --- a/src/test/java/org/opensearch/ad/ratelimit/CheckpointMaintainWorkerTests.java +++ b/src/test/java/org/opensearch/ad/ratelimit/CheckpointMaintainWorkerTests.java @@ -32,7 +32,6 @@ import java.util.Optional; import java.util.Random; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.caching.CacheProvider; import org.opensearch.ad.caching.EntityCache; import org.opensearch.ad.constant.ADCommonName; @@ -45,6 +44,8 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.timeseries.breaker.CircuitBreakerService; +import org.opensearch.timeseries.settings.TimeSeriesSettings; import test.org.opensearch.ad.util.MLUtil; import test.org.opensearch.ad.util.RandomModelStateConfig; @@ -71,9 +72,9 @@ public void setUp() throws Exception { Arrays .asList( AnomalyDetectorSettings.AD_EXPECTED_CHECKPOINT_MAINTAIN_TIME_IN_MILLISECS, - AnomalyDetectorSettings.CHECKPOINT_MAINTAIN_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_CHECKPOINT_MAINTAIN_QUEUE_MAX_HEAP_PERCENT, AnomalyDetectorSettings.AD_CHECKPOINT_WRITE_QUEUE_BATCH_SIZE, - AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ + AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ ) ) ) @@ -85,7 +86,7 @@ public void setUp() throws Exception { CacheProvider cache = mock(CacheProvider.class); checkpointDao = mock(CheckpointDao.class); String indexName = ADCommonName.CHECKPOINT_INDEX_NAME; - Setting checkpointInterval = AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ; + Setting checkpointInterval = AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ; EntityCache entityCache = mock(EntityCache.class); when(cache.get()).thenReturn(entityCache); ModelState state = MLUtil.randomModelState(new RandomModelStateConfig.Builder().fullModel(true).build()); @@ -104,19 +105,19 @@ public void setUp() throws Exception { cpMaintainWorker = new CheckpointMaintainWorker( Integer.MAX_VALUE, AnomalyDetectorSettings.ENTITY_FEATURE_REQUEST_SIZE_IN_BYTES, - AnomalyDetectorSettings.CHECKPOINT_MAINTAIN_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_CHECKPOINT_MAINTAIN_QUEUE_MAX_HEAP_PERCENT, clusterService, new Random(42), - mock(ADCircuitBreakerService.class), + mock(CircuitBreakerService.class), threadPool, settings, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, clock, - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, writeWorker, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, nodeStateManager, adapter ); diff --git a/src/test/java/org/opensearch/ad/ratelimit/CheckpointReadWorkerTests.java b/src/test/java/org/opensearch/ad/ratelimit/CheckpointReadWorkerTests.java index 7d2a48e65..ae8ba3a54 100644 --- a/src/test/java/org/opensearch/ad/ratelimit/CheckpointReadWorkerTests.java +++ b/src/test/java/org/opensearch/ad/ratelimit/CheckpointReadWorkerTests.java @@ -45,7 +45,6 @@ import org.opensearch.action.get.GetResponse; import org.opensearch.action.get.MultiGetItemResponse; import org.opensearch.action.get.MultiGetResponse; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.caching.CacheProvider; import org.opensearch.ad.caching.EntityCache; import org.opensearch.ad.constant.ADCommonName; @@ -74,8 +73,10 @@ import org.opensearch.timeseries.AnalysisType; import org.opensearch.timeseries.TestHelpers; import org.opensearch.timeseries.TimeSeriesAnalyticsPlugin; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.LimitExceededException; import org.opensearch.timeseries.model.Entity; +import org.opensearch.timeseries.settings.TimeSeriesSettings; import org.opensearch.timeseries.stats.StatNames; import test.org.opensearch.ad.util.MLUtil; @@ -113,7 +114,7 @@ public void setUp() throws Exception { new HashSet<>( Arrays .asList( - AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_CONCURRENCY, AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_BATCH_SIZE ) @@ -156,18 +157,18 @@ public void setUp() throws Exception { worker = new CheckpointReadWorker( Integer.MAX_VALUE, AnomalyDetectorSettings.ENTITY_FEATURE_REQUEST_SIZE_IN_BYTES, - AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, clusterService, new Random(42), - mock(ADCircuitBreakerService.class), + mock(CircuitBreakerService.class), threadPool, Settings.EMPTY, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, clock, - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, - AnomalyDetectorSettings.QUEUE_MAINTENANCE, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.QUEUE_MAINTENANCE, modelManager, checkpoint, coldstartQueue, @@ -175,7 +176,7 @@ public void setUp() throws Exception { nodeStateManager, anomalyDetectionIndices, cacheProvider, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, checkpointWriteQueue, adStats ); @@ -535,18 +536,18 @@ public void testRemoveUnusedQueues() { worker = new CheckpointReadWorker( Integer.MAX_VALUE, AnomalyDetectorSettings.ENTITY_FEATURE_REQUEST_SIZE_IN_BYTES, - AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, clusterService, new Random(42), - mock(ADCircuitBreakerService.class), + mock(CircuitBreakerService.class), threadPool, Settings.EMPTY, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, clock, - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, - AnomalyDetectorSettings.QUEUE_MAINTENANCE, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.QUEUE_MAINTENANCE, modelManager, checkpoint, coldstartQueue, @@ -554,7 +555,7 @@ public void testRemoveUnusedQueues() { nodeStateManager, anomalyDetectionIndices, cacheProvider, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, checkpointWriteQueue, adStats ); @@ -565,7 +566,7 @@ public void testRemoveUnusedQueues() { assertEquals(CheckpointReadWorker.WORKER_NAME, worker.getWorkerName()); // make RequestQueue.expired return true - when(clock.instant()).thenReturn(Instant.now().plusSeconds(AnomalyDetectorSettings.HOURLY_MAINTENANCE.getSeconds() + 1)); + when(clock.instant()).thenReturn(Instant.now().plusSeconds(TimeSeriesSettings.HOURLY_MAINTENANCE.getSeconds() + 1)); // removed the expired queue worker.maintenance(); @@ -587,18 +588,18 @@ public void testSettingUpdatable() { worker = new CheckpointReadWorker( 2000, 1, - AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, clusterService, new Random(42), - mock(ADCircuitBreakerService.class), + mock(CircuitBreakerService.class), threadPool, Settings.EMPTY, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, clock, - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, - AnomalyDetectorSettings.QUEUE_MAINTENANCE, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.QUEUE_MAINTENANCE, modelManager, checkpoint, coldstartQueue, @@ -606,7 +607,7 @@ public void testSettingUpdatable() { nodeStateManager, anomalyDetectionIndices, cacheProvider, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, checkpointWriteQueue, adStats ); @@ -621,7 +622,7 @@ public void testSettingUpdatable() { Settings newSettings = Settings .builder() - .put(AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT.getKey(), "0.0001") + .put(AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT.getKey(), "0.0001") .build(); Settings.Builder target = Settings.builder(); clusterSettings.updateDynamicSettings(newSettings, target, Settings.builder(), "test"); @@ -634,24 +635,24 @@ public void testSettingUpdatable() { public void testOpenCircuitBreaker() { maintenanceSetup(); - ADCircuitBreakerService breaker = mock(ADCircuitBreakerService.class); + CircuitBreakerService breaker = mock(CircuitBreakerService.class); when(breaker.isOpen()).thenReturn(true); worker = new CheckpointReadWorker( Integer.MAX_VALUE, AnomalyDetectorSettings.ENTITY_FEATURE_REQUEST_SIZE_IN_BYTES, - AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, clusterService, new Random(42), breaker, threadPool, Settings.EMPTY, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, clock, - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, - AnomalyDetectorSettings.QUEUE_MAINTENANCE, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.QUEUE_MAINTENANCE, modelManager, checkpoint, coldstartQueue, @@ -659,7 +660,7 @@ public void testOpenCircuitBreaker() { nodeStateManager, anomalyDetectionIndices, cacheProvider, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, checkpointWriteQueue, adStats ); diff --git a/src/test/java/org/opensearch/ad/ratelimit/CheckpointWriteWorkerTests.java b/src/test/java/org/opensearch/ad/ratelimit/CheckpointWriteWorkerTests.java index 475a4bc3d..be83484ee 100644 --- a/src/test/java/org/opensearch/ad/ratelimit/CheckpointWriteWorkerTests.java +++ b/src/test/java/org/opensearch/ad/ratelimit/CheckpointWriteWorkerTests.java @@ -45,7 +45,6 @@ import org.opensearch.action.bulk.BulkItemResponse.Failure; import org.opensearch.action.bulk.BulkResponse; import org.opensearch.action.index.IndexResponse; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.constant.ADCommonName; import org.opensearch.ad.ml.CheckpointDao; import org.opensearch.ad.ml.EntityModel; @@ -64,7 +63,9 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.timeseries.AnalysisType; import org.opensearch.timeseries.TimeSeriesAnalyticsPlugin; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.constant.CommonName; +import org.opensearch.timeseries.settings.TimeSeriesSettings; import test.org.opensearch.ad.util.MLUtil; import test.org.opensearch.ad.util.RandomModelStateConfig; @@ -89,7 +90,7 @@ public void setUp() throws Exception { new HashSet<>( Arrays .asList( - AnomalyDetectorSettings.CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT, AnomalyDetectorSettings.AD_CHECKPOINT_WRITE_QUEUE_CONCURRENCY, AnomalyDetectorSettings.AD_CHECKPOINT_WRITE_QUEUE_BATCH_SIZE ) @@ -107,24 +108,24 @@ public void setUp() throws Exception { // Integer.MAX_VALUE makes a huge heap worker = new CheckpointWriteWorker( Integer.MAX_VALUE, - AnomalyDetectorSettings.CHECKPOINT_WRITE_QUEUE_SIZE_IN_BYTES, - AnomalyDetectorSettings.CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT, + TimeSeriesSettings.CHECKPOINT_WRITE_QUEUE_SIZE_IN_BYTES, + AnomalyDetectorSettings.AD_CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT, clusterService, new Random(42), - mock(ADCircuitBreakerService.class), + mock(CircuitBreakerService.class), threadPool, Settings.EMPTY, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, clock, - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, - AnomalyDetectorSettings.QUEUE_MAINTENANCE, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.QUEUE_MAINTENANCE, checkpoint, ADCommonName.CHECKPOINT_INDEX_NAME, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, nodeStateManager, - AnomalyDetectorSettings.HOURLY_MAINTENANCE + TimeSeriesSettings.HOURLY_MAINTENANCE ); state = MLUtil.randomModelState(new RandomModelStateConfig.Builder().build()); @@ -211,24 +212,24 @@ public void testTriggerAutoFlush() throws InterruptedException { // create a worker to use mockThreadPool worker = new CheckpointWriteWorker( Integer.MAX_VALUE, - AnomalyDetectorSettings.CHECKPOINT_WRITE_QUEUE_SIZE_IN_BYTES, - AnomalyDetectorSettings.CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT, + TimeSeriesSettings.CHECKPOINT_WRITE_QUEUE_SIZE_IN_BYTES, + AnomalyDetectorSettings.AD_CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT, clusterService, new Random(42), - mock(ADCircuitBreakerService.class), + mock(CircuitBreakerService.class), mockThreadPool, Settings.EMPTY, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, clock, - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, - AnomalyDetectorSettings.QUEUE_MAINTENANCE, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.QUEUE_MAINTENANCE, checkpoint, ADCommonName.CHECKPOINT_INDEX_NAME, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, nodeStateManager, - AnomalyDetectorSettings.HOURLY_MAINTENANCE + TimeSeriesSettings.HOURLY_MAINTENANCE ); // our concurrency is 2, so first 2 requests cause two batches. And the diff --git a/src/test/java/org/opensearch/ad/ratelimit/ColdEntityWorkerTests.java b/src/test/java/org/opensearch/ad/ratelimit/ColdEntityWorkerTests.java index f4af298c8..d093f20ae 100644 --- a/src/test/java/org/opensearch/ad/ratelimit/ColdEntityWorkerTests.java +++ b/src/test/java/org/opensearch/ad/ratelimit/ColdEntityWorkerTests.java @@ -27,12 +27,13 @@ import java.util.List; import java.util.Random; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.settings.AnomalyDetectorSettings; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.timeseries.breaker.CircuitBreakerService; +import org.opensearch.timeseries.settings.TimeSeriesSettings; public class ColdEntityWorkerTests extends AbstractRateLimitingTest { ClusterService clusterService; @@ -53,8 +54,8 @@ public void setUp() throws Exception { new HashSet<>( Arrays .asList( - AnomalyDetectorSettings.EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS, - AnomalyDetectorSettings.COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS, + AnomalyDetectorSettings.AD_COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT, AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_BATCH_SIZE ) ) @@ -68,19 +69,19 @@ public void setUp() throws Exception { coldWorker = new ColdEntityWorker( Integer.MAX_VALUE, AnomalyDetectorSettings.ENTITY_FEATURE_REQUEST_SIZE_IN_BYTES, - AnomalyDetectorSettings.COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT, clusterService, new Random(42), - mock(ADCircuitBreakerService.class), + mock(CircuitBreakerService.class), threadPool, settings, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, clock, - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, readWorker, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, nodeStateManager ); @@ -99,7 +100,7 @@ public void setUp() throws Exception { TimeValue value = invocation.getArgument(1); // since we have only 1 request each time - long expectedExecutionPerRequestMilli = AnomalyDetectorSettings.EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS + long expectedExecutionPerRequestMilli = AnomalyDetectorSettings.AD_EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS .getDefault(Settings.EMPTY); long delay = value.getMillis(); assertTrue(delay == expectedExecutionPerRequestMilli); @@ -143,8 +144,8 @@ public void testDelay() { new HashSet<>( Arrays .asList( - AnomalyDetectorSettings.EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS, - AnomalyDetectorSettings.COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS, + AnomalyDetectorSettings.AD_COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT, AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_BATCH_SIZE ) ) @@ -156,19 +157,19 @@ public void testDelay() { coldWorker = new ColdEntityWorker( Integer.MAX_VALUE, AnomalyDetectorSettings.ENTITY_FEATURE_REQUEST_SIZE_IN_BYTES, - AnomalyDetectorSettings.COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT, clusterService, new Random(42), - mock(ADCircuitBreakerService.class), + mock(CircuitBreakerService.class), threadPool, Settings.EMPTY, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, clock, - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, readWorker, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, nodeStateManager ); diff --git a/src/test/java/org/opensearch/ad/ratelimit/EntityColdStartWorkerTests.java b/src/test/java/org/opensearch/ad/ratelimit/EntityColdStartWorkerTests.java index 4b123b2e3..9fdf5a396 100644 --- a/src/test/java/org/opensearch/ad/ratelimit/EntityColdStartWorkerTests.java +++ b/src/test/java/org/opensearch/ad/ratelimit/EntityColdStartWorkerTests.java @@ -28,7 +28,6 @@ import java.util.Random; import org.opensearch.OpenSearchStatusException; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.caching.CacheProvider; import org.opensearch.ad.ml.EntityColdStarter; import org.opensearch.ad.ml.EntityModel; @@ -40,6 +39,8 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.rest.RestStatus; +import org.opensearch.timeseries.breaker.CircuitBreakerService; +import org.opensearch.timeseries.settings.TimeSeriesSettings; import test.org.opensearch.ad.util.MLUtil; @@ -60,8 +61,8 @@ public void setUp() throws Exception { new HashSet<>( Arrays .asList( - AnomalyDetectorSettings.ENTITY_COLD_START_QUEUE_MAX_HEAP_PERCENT, - AnomalyDetectorSettings.ENTITY_COLD_START_QUEUE_CONCURRENCY + AnomalyDetectorSettings.AD_ENTITY_COLD_START_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_ENTITY_COLD_START_QUEUE_CONCURRENCY ) ) ) @@ -76,20 +77,20 @@ public void setUp() throws Exception { worker = new EntityColdStartWorker( Integer.MAX_VALUE, AnomalyDetectorSettings.ENTITY_REQUEST_SIZE_IN_BYTES, - AnomalyDetectorSettings.ENTITY_COLD_START_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_ENTITY_COLD_START_QUEUE_MAX_HEAP_PERCENT, clusterService, new Random(42), - mock(ADCircuitBreakerService.class), + mock(CircuitBreakerService.class), threadPool, Settings.EMPTY, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, clock, - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, - AnomalyDetectorSettings.QUEUE_MAINTENANCE, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.QUEUE_MAINTENANCE, entityColdStarter, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, nodeStateManager, cacheProvider ); diff --git a/src/test/java/org/opensearch/ad/ratelimit/ResultWriteWorkerTests.java b/src/test/java/org/opensearch/ad/ratelimit/ResultWriteWorkerTests.java index 10d39fc99..304a942c7 100644 --- a/src/test/java/org/opensearch/ad/ratelimit/ResultWriteWorkerTests.java +++ b/src/test/java/org/opensearch/ad/ratelimit/ResultWriteWorkerTests.java @@ -33,7 +33,6 @@ import org.opensearch.OpenSearchStatusException; import org.opensearch.action.index.IndexRequest; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.constant.ADCommonName; import org.opensearch.ad.model.AnomalyResult; import org.opensearch.ad.settings.AnomalyDetectorSettings; @@ -49,6 +48,8 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.threadpool.ThreadPool; import org.opensearch.timeseries.TestHelpers; +import org.opensearch.timeseries.breaker.CircuitBreakerService; +import org.opensearch.timeseries.settings.TimeSeriesSettings; import org.opensearch.timeseries.util.RestHandlerUtils; public class ResultWriteWorkerTests extends AbstractRateLimitingTest { @@ -69,7 +70,7 @@ public void setUp() throws Exception { new HashSet<>( Arrays .asList( - AnomalyDetectorSettings.RESULT_WRITE_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_RESULT_WRITE_QUEUE_MAX_HEAP_PERCENT, AnomalyDetectorSettings.AD_RESULT_WRITE_QUEUE_CONCURRENCY, AnomalyDetectorSettings.AD_RESULT_WRITE_QUEUE_BATCH_SIZE ) @@ -85,23 +86,23 @@ public void setUp() throws Exception { resultWriteQueue = new ResultWriteWorker( Integer.MAX_VALUE, - AnomalyDetectorSettings.RESULT_WRITE_QUEUE_SIZE_IN_BYTES, - AnomalyDetectorSettings.RESULT_WRITE_QUEUE_MAX_HEAP_PERCENT, + TimeSeriesSettings.RESULT_WRITE_QUEUE_SIZE_IN_BYTES, + AnomalyDetectorSettings.AD_RESULT_WRITE_QUEUE_MAX_HEAP_PERCENT, clusterService, new Random(42), - mock(ADCircuitBreakerService.class), + mock(CircuitBreakerService.class), threadPool, Settings.EMPTY, - AnomalyDetectorSettings.MAX_QUEUED_TASKS_RATIO, + TimeSeriesSettings.MAX_QUEUED_TASKS_RATIO, clock, - AnomalyDetectorSettings.MEDIUM_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.LOW_SEGMENT_PRUNE_RATIO, - AnomalyDetectorSettings.MAINTENANCE_FREQ_CONSTANT, - AnomalyDetectorSettings.QUEUE_MAINTENANCE, + TimeSeriesSettings.MEDIUM_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.LOW_SEGMENT_PRUNE_RATIO, + TimeSeriesSettings.MAINTENANCE_FREQ_CONSTANT, + TimeSeriesSettings.QUEUE_MAINTENANCE, resultHandler, xContentRegistry(), nodeStateManager, - AnomalyDetectorSettings.HOURLY_MAINTENANCE + TimeSeriesSettings.HOURLY_MAINTENANCE ); detectResult = TestHelpers.randomHCADAnomalyDetectResult(0.8, Double.NaN, null); diff --git a/src/test/java/org/opensearch/ad/settings/AnomalyDetectorSettingsTests.java b/src/test/java/org/opensearch/ad/settings/AnomalyDetectorSettingsTests.java index 18dba3df4..085ea5959 100644 --- a/src/test/java/org/opensearch/ad/settings/AnomalyDetectorSettingsTests.java +++ b/src/test/java/org/opensearch/ad/settings/AnomalyDetectorSettingsTests.java @@ -58,7 +58,7 @@ public void testAllLegacyOpenDistroSettingsReturned() { LegacyOpenDistroAnomalyDetectorSettings.MAX_ENTITIES_FOR_PREVIEW, LegacyOpenDistroAnomalyDetectorSettings.INDEX_PRESSURE_SOFT_LIMIT, LegacyOpenDistroAnomalyDetectorSettings.MAX_PRIMARY_SHARDS, - LegacyOpenDistroAnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES, + LegacyOpenDistroAnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES, LegacyOpenDistroAnomalyDetectorSettings.MAX_CACHE_MISS_HANDLING_PER_SECOND, LegacyOpenDistroAnomalyDetectorSettings.MAX_BATCH_TASK_PER_NODE, LegacyOpenDistroAnomalyDetectorSettings.BATCH_TASK_PIECE_INTERVAL_SECONDS, @@ -77,8 +77,8 @@ public void testAllOpenSearchSettingsReturned() { .containsAll( Arrays .asList( - AnomalyDetectorSettings.MAX_SINGLE_ENTITY_ANOMALY_DETECTORS, - AnomalyDetectorSettings.MAX_MULTI_ENTITY_ANOMALY_DETECTORS, + AnomalyDetectorSettings.AD_MAX_SINGLE_ENTITY_ANOMALY_DETECTORS, + AnomalyDetectorSettings.AD_MAX_HC_ANOMALY_DETECTORS, AnomalyDetectorSettings.MAX_ANOMALY_FEATURES, AnomalyDetectorSettings.AD_REQUEST_TIMEOUT, AnomalyDetectorSettings.DETECTION_INTERVAL, @@ -91,33 +91,33 @@ public void testAllOpenSearchSettingsReturned() { AnomalyDetectorSettings.AD_BACKOFF_INITIAL_DELAY, AnomalyDetectorSettings.AD_MAX_RETRY_FOR_BACKOFF, AnomalyDetectorSettings.AD_RESULT_HISTORY_RETENTION_PERIOD, - AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE, - AnomalyDetectorSettings.MAX_ENTITIES_PER_QUERY, + AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE, + AnomalyDetectorSettings.AD_MAX_ENTITIES_PER_QUERY, AnomalyDetectorSettings.MAX_ENTITIES_FOR_PREVIEW, AnomalyDetectorSettings.AD_INDEX_PRESSURE_SOFT_LIMIT, AnomalyDetectorSettings.AD_INDEX_PRESSURE_HARD_LIMIT, AnomalyDetectorSettings.AD_MAX_PRIMARY_SHARDS, - AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES, + AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES, AnomalyDetectorSettings.MAX_BATCH_TASK_PER_NODE, AnomalyDetectorSettings.BATCH_TASK_PIECE_INTERVAL_SECONDS, AnomalyDetectorSettings.MAX_OLD_AD_TASK_DOCS_PER_DETECTOR, AnomalyDetectorSettings.BATCH_TASK_PIECE_SIZE, AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_CONCURRENCY, AnomalyDetectorSettings.AD_CHECKPOINT_WRITE_QUEUE_CONCURRENCY, - AnomalyDetectorSettings.ENTITY_COLD_START_QUEUE_CONCURRENCY, + AnomalyDetectorSettings.AD_ENTITY_COLD_START_QUEUE_CONCURRENCY, AnomalyDetectorSettings.AD_RESULT_WRITE_QUEUE_CONCURRENCY, AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_BATCH_SIZE, AnomalyDetectorSettings.AD_CHECKPOINT_WRITE_QUEUE_BATCH_SIZE, AnomalyDetectorSettings.AD_RESULT_WRITE_QUEUE_BATCH_SIZE, - AnomalyDetectorSettings.DEDICATED_CACHE_SIZE, - AnomalyDetectorSettings.COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT, - AnomalyDetectorSettings.CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, - AnomalyDetectorSettings.CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT, - AnomalyDetectorSettings.RESULT_WRITE_QUEUE_MAX_HEAP_PERCENT, - AnomalyDetectorSettings.ENTITY_COLD_START_QUEUE_MAX_HEAP_PERCENT, - AnomalyDetectorSettings.EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS, - AnomalyDetectorSettings.MAX_ENTITIES_PER_QUERY, - AnomalyDetectorSettings.PAGE_SIZE, + AnomalyDetectorSettings.AD_DEDICATED_CACHE_SIZE, + AnomalyDetectorSettings.AD_COLD_ENTITY_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_CHECKPOINT_READ_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_CHECKPOINT_WRITE_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_RESULT_WRITE_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_ENTITY_COLD_START_QUEUE_MAX_HEAP_PERCENT, + AnomalyDetectorSettings.AD_EXPECTED_COLD_ENTITY_EXECUTION_TIME_IN_MILLISECS, + AnomalyDetectorSettings.AD_MAX_ENTITIES_PER_QUERY, + AnomalyDetectorSettings.AD_PAGE_SIZE, TimeSeriesSettings.MAX_RETRY_FOR_UNRESPONSIVE_NODE, TimeSeriesSettings.BACKOFF_MINUTES ) @@ -127,11 +127,11 @@ public void testAllOpenSearchSettingsReturned() { public void testAllLegacyOpenDistroSettingsFallback() { assertEquals( - AnomalyDetectorSettings.MAX_SINGLE_ENTITY_ANOMALY_DETECTORS.get(Settings.EMPTY), + AnomalyDetectorSettings.AD_MAX_SINGLE_ENTITY_ANOMALY_DETECTORS.get(Settings.EMPTY), LegacyOpenDistroAnomalyDetectorSettings.MAX_SINGLE_ENTITY_ANOMALY_DETECTORS.get(Settings.EMPTY) ); assertEquals( - AnomalyDetectorSettings.MAX_MULTI_ENTITY_ANOMALY_DETECTORS.get(Settings.EMPTY), + AnomalyDetectorSettings.AD_MAX_HC_ANOMALY_DETECTORS.get(Settings.EMPTY), LegacyOpenDistroAnomalyDetectorSettings.MAX_MULTI_ENTITY_ANOMALY_DETECTORS.get(Settings.EMPTY) ); assertEquals( @@ -179,7 +179,7 @@ public void testAllLegacyOpenDistroSettingsFallback() { LegacyOpenDistroAnomalyDetectorSettings.AD_RESULT_HISTORY_RETENTION_PERIOD.get(Settings.EMPTY) ); assertEquals( - AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE.get(Settings.EMPTY), + AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE.get(Settings.EMPTY), LegacyOpenDistroAnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE.get(Settings.EMPTY) ); // MAX_ENTITIES_FOR_PREVIEW does not use legacy setting @@ -191,8 +191,8 @@ public void testAllLegacyOpenDistroSettingsFallback() { LegacyOpenDistroAnomalyDetectorSettings.MAX_PRIMARY_SHARDS.get(Settings.EMPTY) ); assertEquals( - AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES.get(Settings.EMPTY), - LegacyOpenDistroAnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES.get(Settings.EMPTY) + AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES.get(Settings.EMPTY), + LegacyOpenDistroAnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES.get(Settings.EMPTY) ); assertEquals( AnomalyDetectorSettings.MAX_BATCH_TASK_PER_NODE.get(Settings.EMPTY), @@ -218,11 +218,11 @@ public void testSettingsGetValue() { assertEquals(LegacyOpenDistroAnomalyDetectorSettings.REQUEST_TIMEOUT.get(settings), TimeValue.timeValueSeconds(10)); settings = Settings.builder().put("plugins.anomaly_detection.max_anomaly_detectors", 99).build(); - assertEquals(AnomalyDetectorSettings.MAX_SINGLE_ENTITY_ANOMALY_DETECTORS.get(settings), Integer.valueOf(99)); + assertEquals(AnomalyDetectorSettings.AD_MAX_SINGLE_ENTITY_ANOMALY_DETECTORS.get(settings), Integer.valueOf(99)); assertEquals(LegacyOpenDistroAnomalyDetectorSettings.MAX_SINGLE_ENTITY_ANOMALY_DETECTORS.get(settings), Integer.valueOf(1000)); settings = Settings.builder().put("plugins.anomaly_detection.max_multi_entity_anomaly_detectors", 98).build(); - assertEquals(AnomalyDetectorSettings.MAX_MULTI_ENTITY_ANOMALY_DETECTORS.get(settings), Integer.valueOf(98)); + assertEquals(AnomalyDetectorSettings.AD_MAX_HC_ANOMALY_DETECTORS.get(settings), Integer.valueOf(98)); assertEquals(LegacyOpenDistroAnomalyDetectorSettings.MAX_MULTI_ENTITY_ANOMALY_DETECTORS.get(settings), Integer.valueOf(10)); settings = Settings.builder().put("plugins.anomaly_detection.max_anomaly_features", 7).build(); @@ -282,19 +282,19 @@ public void testSettingsGetValue() { assertEquals(LegacyOpenDistroAnomalyDetectorSettings.MAX_RETRY_FOR_BACKOFF.get(settings), Integer.valueOf(3)); settings = Settings.builder().put("plugins.anomaly_detection.max_retry_for_end_run_exception", 86).build(); - assertEquals(AnomalyDetectorSettings.MAX_RETRY_FOR_END_RUN_EXCEPTION.get(settings), Integer.valueOf(86)); + assertEquals(AnomalyDetectorSettings.AD_MAX_RETRY_FOR_END_RUN_EXCEPTION.get(settings), Integer.valueOf(86)); assertEquals(LegacyOpenDistroAnomalyDetectorSettings.MAX_RETRY_FOR_END_RUN_EXCEPTION.get(settings), Integer.valueOf(6)); settings = Settings.builder().put("plugins.anomaly_detection.filter_by_backend_roles", true).build(); - assertEquals(AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES.get(settings), Boolean.valueOf(true)); - assertEquals(LegacyOpenDistroAnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES.get(settings), Boolean.valueOf(false)); + assertEquals(AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES.get(settings), Boolean.valueOf(true)); + assertEquals(LegacyOpenDistroAnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES.get(settings), Boolean.valueOf(false)); settings = Settings.builder().put("plugins.anomaly_detection.model_max_size_percent", 0.3).build(); - assertEquals(AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE.get(settings), Double.valueOf(0.3)); + assertEquals(AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE.get(settings), Double.valueOf(0.3)); assertEquals(LegacyOpenDistroAnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE.get(settings), Double.valueOf(0.1)); settings = Settings.builder().put("plugins.anomaly_detection.max_entities_per_query", 83).build(); - assertEquals(AnomalyDetectorSettings.MAX_ENTITIES_PER_QUERY.get(settings), Integer.valueOf(83)); + assertEquals(AnomalyDetectorSettings.AD_MAX_ENTITIES_PER_QUERY.get(settings), Integer.valueOf(83)); assertEquals(LegacyOpenDistroAnomalyDetectorSettings.MAX_ENTITIES_PER_QUERY.get(settings), Integer.valueOf(1000)); settings = Settings.builder().put("plugins.anomaly_detection.max_entities_for_preview", 22).build(); @@ -361,8 +361,8 @@ public void testSettingsGetValueWithLegacyFallback() { .put("opendistro.anomaly_detection.batch_task_piece_interval_seconds", 26) .build(); - assertEquals(AnomalyDetectorSettings.MAX_SINGLE_ENTITY_ANOMALY_DETECTORS.get(settings), Integer.valueOf(1)); - assertEquals(AnomalyDetectorSettings.MAX_MULTI_ENTITY_ANOMALY_DETECTORS.get(settings), Integer.valueOf(2)); + assertEquals(AnomalyDetectorSettings.AD_MAX_SINGLE_ENTITY_ANOMALY_DETECTORS.get(settings), Integer.valueOf(1)); + assertEquals(AnomalyDetectorSettings.AD_MAX_HC_ANOMALY_DETECTORS.get(settings), Integer.valueOf(2)); assertEquals(AnomalyDetectorSettings.MAX_ANOMALY_FEATURES.get(settings), Integer.valueOf(3)); assertEquals(AnomalyDetectorSettings.AD_REQUEST_TIMEOUT.get(settings), TimeValue.timeValueSeconds(4)); assertEquals(AnomalyDetectorSettings.DETECTION_INTERVAL.get(settings), TimeValue.timeValueMinutes(5)); @@ -378,9 +378,9 @@ public void testSettingsGetValueWithLegacyFallback() { assertEquals(TimeSeriesSettings.BACKOFF_MINUTES.get(settings), TimeValue.timeValueMinutes(12)); assertEquals(AnomalyDetectorSettings.AD_BACKOFF_INITIAL_DELAY.get(settings), TimeValue.timeValueMillis(13)); assertEquals(AnomalyDetectorSettings.AD_MAX_RETRY_FOR_BACKOFF.get(settings), Integer.valueOf(14)); - assertEquals(AnomalyDetectorSettings.MAX_RETRY_FOR_END_RUN_EXCEPTION.get(settings), Integer.valueOf(15)); - assertEquals(AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES.get(settings), Boolean.valueOf(true)); - assertEquals(AnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE.get(settings), Double.valueOf(0.6D)); + assertEquals(AnomalyDetectorSettings.AD_MAX_RETRY_FOR_END_RUN_EXCEPTION.get(settings), Integer.valueOf(15)); + assertEquals(AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES.get(settings), Boolean.valueOf(true)); + assertEquals(AnomalyDetectorSettings.AD_MODEL_MAX_SIZE_PERCENTAGE.get(settings), Double.valueOf(0.6D)); // MAX_ENTITIES_FOR_PREVIEW uses default instead of legacy fallback assertEquals(AnomalyDetectorSettings.MAX_ENTITIES_FOR_PREVIEW.get(settings), Integer.valueOf(5)); // INDEX_PRESSURE_SOFT_LIMIT uses default instead of legacy fallback @@ -412,7 +412,7 @@ public void testSettingsGetValueWithLegacyFallback() { LegacyOpenDistroAnomalyDetectorSettings.AD_RESULT_HISTORY_RETENTION_PERIOD, LegacyOpenDistroAnomalyDetectorSettings.MODEL_MAX_SIZE_PERCENTAGE, LegacyOpenDistroAnomalyDetectorSettings.MAX_PRIMARY_SHARDS, - LegacyOpenDistroAnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES, + LegacyOpenDistroAnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES, LegacyOpenDistroAnomalyDetectorSettings.MAX_CACHE_MISS_HANDLING_PER_SECOND, LegacyOpenDistroAnomalyDetectorSettings.MAX_BATCH_TASK_PER_NODE, LegacyOpenDistroAnomalyDetectorSettings.BATCH_TASK_PIECE_INTERVAL_SECONDS, diff --git a/src/test/java/org/opensearch/ad/stats/ADStatsTests.java b/src/test/java/org/opensearch/ad/stats/ADStatsTests.java index fb5bf220e..00f9836c7 100644 --- a/src/test/java/org/opensearch/ad/stats/ADStatsTests.java +++ b/src/test/java/org/opensearch/ad/stats/ADStatsTests.java @@ -14,7 +14,7 @@ import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_MODEL_SIZE_PER_NODE; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_MAX_MODEL_SIZE_PER_NODE; import java.time.Clock; import java.util.ArrayList; @@ -107,11 +107,11 @@ public void setup() { nodeStatName1 = "nodeStat1"; nodeStatName2 = "nodeStat2"; - Settings settings = Settings.builder().put(MAX_MODEL_SIZE_PER_NODE.getKey(), 10).build(); + Settings settings = Settings.builder().put(AD_MAX_MODEL_SIZE_PER_NODE.getKey(), 10).build(); ClusterService clusterService = mock(ClusterService.class); ClusterSettings clusterSettings = new ClusterSettings( Settings.EMPTY, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(MAX_MODEL_SIZE_PER_NODE))) + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AD_MAX_MODEL_SIZE_PER_NODE))) ); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); diff --git a/src/test/java/org/opensearch/ad/stats/suppliers/ModelsOnNodeSupplierTests.java b/src/test/java/org/opensearch/ad/stats/suppliers/ModelsOnNodeSupplierTests.java index c0173593c..a95a61b81 100644 --- a/src/test/java/org/opensearch/ad/stats/suppliers/ModelsOnNodeSupplierTests.java +++ b/src/test/java/org/opensearch/ad/stats/suppliers/ModelsOnNodeSupplierTests.java @@ -13,7 +13,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_MODEL_SIZE_PER_NODE; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_MAX_MODEL_SIZE_PER_NODE; import static org.opensearch.ad.stats.suppliers.ModelsOnNodeSupplier.MODEL_STATE_STAT_KEYS; import java.time.Clock; @@ -90,11 +90,11 @@ public void setup() { @Test public void testGet() { - Settings settings = Settings.builder().put(MAX_MODEL_SIZE_PER_NODE.getKey(), 10).build(); + Settings settings = Settings.builder().put(AD_MAX_MODEL_SIZE_PER_NODE.getKey(), 10).build(); ClusterService clusterService = mock(ClusterService.class); ClusterSettings clusterSettings = new ClusterSettings( Settings.EMPTY, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(MAX_MODEL_SIZE_PER_NODE))) + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AD_MAX_MODEL_SIZE_PER_NODE))) ); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); diff --git a/src/test/java/org/opensearch/ad/task/ADTaskCacheManagerTests.java b/src/test/java/org/opensearch/ad/task/ADTaskCacheManagerTests.java index baff7155f..ad14b49c4 100644 --- a/src/test/java/org/opensearch/ad/task/ADTaskCacheManagerTests.java +++ b/src/test/java/org/opensearch/ad/task/ADTaskCacheManagerTests.java @@ -19,9 +19,9 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.opensearch.ad.MemoryTracker.Origin.HISTORICAL_SINGLE_ENTITY_DETECTOR; import static org.opensearch.ad.constant.ADCommonMessages.DETECTOR_IS_RUNNING; import static org.opensearch.ad.task.ADTaskCacheManager.TASK_RETRY_LIMIT; +import static org.opensearch.timeseries.MemoryTracker.Origin.HISTORICAL_SINGLE_ENTITY_DETECTOR; import java.io.IOException; import java.time.Instant; @@ -33,7 +33,6 @@ import org.junit.After; import org.junit.Before; -import org.opensearch.ad.MemoryTracker; import org.opensearch.ad.model.ADTask; import org.opensearch.ad.model.ADTaskType; import org.opensearch.ad.model.AnomalyDetector; @@ -42,10 +41,13 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.timeseries.MemoryTracker; import org.opensearch.timeseries.TestHelpers; import org.opensearch.timeseries.common.exception.DuplicateTaskException; import org.opensearch.timeseries.common.exception.LimitExceededException; import org.opensearch.timeseries.model.TaskState; +import org.opensearch.timeseries.settings.TimeSeriesSettings; +import org.opensearch.timeseries.task.RealtimeTaskCache; import com.google.common.collect.ImmutableList; @@ -63,7 +65,7 @@ public void setUp() throws Exception { settings = Settings .builder() .put(AnomalyDetectorSettings.MAX_BATCH_TASK_PER_NODE.getKey(), 2) - .put(AnomalyDetectorSettings.MAX_CACHED_DELETED_TASKS.getKey(), 100) + .put(TimeSeriesSettings.MAX_CACHED_DELETED_TASKS.getKey(), 100) .build(); clusterService = mock(ClusterService.class); @@ -72,7 +74,7 @@ public void setUp() throws Exception { Collections .unmodifiableSet( new HashSet<>( - Arrays.asList(AnomalyDetectorSettings.MAX_BATCH_TASK_PER_NODE, AnomalyDetectorSettings.MAX_CACHED_DELETED_TASKS) + Arrays.asList(AnomalyDetectorSettings.MAX_BATCH_TASK_PER_NODE, TimeSeriesSettings.MAX_CACHED_DELETED_TASKS) ) ) ); @@ -349,7 +351,7 @@ public void testUpdateRealtimeTaskCache() { String detectorId = randomAlphaOfLength(5); adTaskCacheManager.initRealtimeTaskCache(detectorId, 60_000); adTaskCacheManager.updateRealtimeTaskCache(detectorId, null, null, null); - ADRealtimeTaskCache realtimeTaskCache = adTaskCacheManager.getRealtimeTaskCache(detectorId); + RealtimeTaskCache realtimeTaskCache = adTaskCacheManager.getRealtimeTaskCache(detectorId); assertNull(realtimeTaskCache.getState()); assertNull(realtimeTaskCache.getError()); assertNull(realtimeTaskCache.getInitProgress()); @@ -379,10 +381,10 @@ public void testGetAndDecreaseEntityTaskLanes() throws IOException { public void testDeletedTask() { String taskId = randomAlphaOfLength(10); - adTaskCacheManager.addDeletedDetectorTask(taskId); - assertTrue(adTaskCacheManager.hasDeletedDetectorTask()); - assertEquals(taskId, adTaskCacheManager.pollDeletedDetectorTask()); - assertFalse(adTaskCacheManager.hasDeletedDetectorTask()); + adTaskCacheManager.addDeletedTask(taskId); + assertTrue(adTaskCacheManager.hasDeletedTask()); + assertEquals(taskId, adTaskCacheManager.pollDeletedTask()); + assertFalse(adTaskCacheManager.hasDeletedTask()); } public void testAcquireTaskUpdatingSemaphore() throws IOException, InterruptedException { @@ -527,7 +529,7 @@ public void testTaskLanes() throws IOException { public void testRefreshRealtimeJobRunTime() throws InterruptedException { String detectorId = randomAlphaOfLength(5); adTaskCacheManager.initRealtimeTaskCache(detectorId, 1_000); - ADRealtimeTaskCache realtimeTaskCache = adTaskCacheManager.getRealtimeTaskCache(detectorId); + RealtimeTaskCache realtimeTaskCache = adTaskCacheManager.getRealtimeTaskCache(detectorId); assertFalse(realtimeTaskCache.expired()); Thread.sleep(3_000); assertTrue(realtimeTaskCache.expired()); @@ -537,10 +539,10 @@ public void testRefreshRealtimeJobRunTime() throws InterruptedException { public void testAddDeletedDetector() { String detectorId = randomAlphaOfLength(5); - adTaskCacheManager.addDeletedDetector(detectorId); - String polledDetectorId = adTaskCacheManager.pollDeletedDetector(); + adTaskCacheManager.addDeletedConfig(detectorId); + String polledDetectorId = adTaskCacheManager.pollDeletedConfig(); assertEquals(detectorId, polledDetectorId); - assertNull(adTaskCacheManager.pollDeletedDetector()); + assertNull(adTaskCacheManager.pollDeletedConfig()); } public void testAddPendingEntitiesWithEmptyList() throws IOException { diff --git a/src/test/java/org/opensearch/ad/task/ADTaskManagerTests.java b/src/test/java/org/opensearch/ad/task/ADTaskManagerTests.java index cedaf527b..9d09f7fee 100644 --- a/src/test/java/org/opensearch/ad/task/ADTaskManagerTests.java +++ b/src/test/java/org/opensearch/ad/task/ADTaskManagerTests.java @@ -129,6 +129,7 @@ import org.opensearch.timeseries.model.Entity; import org.opensearch.timeseries.model.Job; import org.opensearch.timeseries.model.TaskState; +import org.opensearch.timeseries.task.RealtimeTaskCache; import org.opensearch.timeseries.util.DiscoveryNodeFilterer; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; @@ -823,14 +824,14 @@ public void testResetLatestFlagAsFalse() throws IOException { } public void testCleanADResultOfDeletedDetectorWithNoDeletedDetector() { - when(adTaskCacheManager.pollDeletedDetector()).thenReturn(null); + when(adTaskCacheManager.pollDeletedConfig()).thenReturn(null); adTaskManager.cleanADResultOfDeletedDetector(); verify(client, never()).execute(eq(DeleteByQueryAction.INSTANCE), any(), any()); } public void testCleanADResultOfDeletedDetectorWithException() { String detectorId = randomAlphaOfLength(5); - when(adTaskCacheManager.pollDeletedDetector()).thenReturn(detectorId); + when(adTaskCacheManager.pollDeletedConfig()).thenReturn(detectorId); doAnswer(invocation -> { ActionListener listener = invocation.getArgument(2); @@ -878,11 +879,11 @@ public void testCleanADResultOfDeletedDetectorWithException() { ); adTaskManager.cleanADResultOfDeletedDetector(); verify(client, times(1)).execute(eq(DeleteByQueryAction.INSTANCE), any(), any()); - verify(adTaskCacheManager, times(1)).addDeletedDetector(eq(detectorId)); + verify(adTaskCacheManager, times(1)).addDeletedConfig(eq(detectorId)); adTaskManager.cleanADResultOfDeletedDetector(); verify(client, times(2)).execute(eq(DeleteByQueryAction.INSTANCE), any(), any()); - verify(adTaskCacheManager, times(1)).addDeletedDetector(eq(detectorId)); + verify(adTaskCacheManager, times(1)).addDeletedConfig(eq(detectorId)); } public void testMaintainRunningHistoricalTasksWithOwningNodeIsNotLocalNode() { @@ -987,11 +988,11 @@ public void testMaintainRunningRealtimeTasks() { when(adTaskCacheManager.getDetectorIdsInRealtimeTaskCache()).thenReturn(new String[] { detectorId1, detectorId2, detectorId3 }); when(adTaskCacheManager.getRealtimeTaskCache(detectorId1)).thenReturn(null); - ADRealtimeTaskCache cacheOfDetector2 = mock(ADRealtimeTaskCache.class); + RealtimeTaskCache cacheOfDetector2 = mock(RealtimeTaskCache.class); when(cacheOfDetector2.expired()).thenReturn(false); when(adTaskCacheManager.getRealtimeTaskCache(detectorId2)).thenReturn(cacheOfDetector2); - ADRealtimeTaskCache cacheOfDetector3 = mock(ADRealtimeTaskCache.class); + RealtimeTaskCache cacheOfDetector3 = mock(RealtimeTaskCache.class); when(cacheOfDetector3.expired()).thenReturn(true); when(adTaskCacheManager.getRealtimeTaskCache(detectorId3)).thenReturn(cacheOfDetector3); @@ -1279,14 +1280,14 @@ public void testCreateADTaskDirectlyWithException() throws IOException { } public void testCleanChildTasksAndADResultsOfDeletedTaskWithNoDeletedDetectorTask() { - when(adTaskCacheManager.hasDeletedDetectorTask()).thenReturn(false); + when(adTaskCacheManager.hasDeletedTask()).thenReturn(false); adTaskManager.cleanChildTasksAndADResultsOfDeletedTask(); verify(client, never()).execute(any(), any(), any()); } public void testCleanChildTasksAndADResultsOfDeletedTaskWithNullTask() { - when(adTaskCacheManager.hasDeletedDetectorTask()).thenReturn(true); - when(adTaskCacheManager.pollDeletedDetectorTask()).thenReturn(null); + when(adTaskCacheManager.hasDeletedTask()).thenReturn(true); + when(adTaskCacheManager.pollDeletedTask()).thenReturn(null); doAnswer(invocation -> { ActionListener actionListener = invocation.getArgument(2); actionListener.onFailure(new RuntimeException("test")); @@ -1304,8 +1305,8 @@ public void testCleanChildTasksAndADResultsOfDeletedTaskWithNullTask() { } public void testCleanChildTasksAndADResultsOfDeletedTaskWithFailToDeleteADResult() { - when(adTaskCacheManager.hasDeletedDetectorTask()).thenReturn(true); - when(adTaskCacheManager.pollDeletedDetectorTask()).thenReturn(randomAlphaOfLength(5)); + when(adTaskCacheManager.hasDeletedTask()).thenReturn(true); + when(adTaskCacheManager.pollDeletedTask()).thenReturn(randomAlphaOfLength(5)); doAnswer(invocation -> { ActionListener actionListener = invocation.getArgument(2); actionListener.onFailure(new RuntimeException("test")); @@ -1323,8 +1324,8 @@ public void testCleanChildTasksAndADResultsOfDeletedTaskWithFailToDeleteADResult } public void testCleanChildTasksAndADResultsOfDeletedTask() { - when(adTaskCacheManager.hasDeletedDetectorTask()).thenReturn(true); - when(adTaskCacheManager.pollDeletedDetectorTask()).thenReturn(randomAlphaOfLength(5)).thenReturn(null); + when(adTaskCacheManager.hasDeletedTask()).thenReturn(true); + when(adTaskCacheManager.pollDeletedTask()).thenReturn(randomAlphaOfLength(5)).thenReturn(null); doAnswer(invocation -> { ActionListener actionListener = invocation.getArgument(2); BulkByScrollResponse response = mock(BulkByScrollResponse.class); @@ -1621,7 +1622,7 @@ public void testDeleteTaskDocs() { ExecutorFunction function = mock(ExecutorFunction.class); ActionListener listener = mock(ActionListener.class); adTaskManager.deleteTaskDocs(detectorId, searchRequest, function, listener); - verify(adTaskCacheManager, times(1)).addDeletedDetectorTask(anyString()); + verify(adTaskCacheManager, times(1)).addDeletedTask(anyString()); verify(function, times(1)).execute(); } } diff --git a/src/test/java/org/opensearch/ad/transport/ADStatsNodesTransportActionTests.java b/src/test/java/org/opensearch/ad/transport/ADStatsNodesTransportActionTests.java index 79ca32d77..2284c311e 100644 --- a/src/test/java/org/opensearch/ad/transport/ADStatsNodesTransportActionTests.java +++ b/src/test/java/org/opensearch/ad/transport/ADStatsNodesTransportActionTests.java @@ -13,7 +13,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_MODEL_SIZE_PER_NODE; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_MAX_MODEL_SIZE_PER_NODE; import java.time.Clock; import java.util.Arrays; @@ -79,11 +79,11 @@ public void setUp() throws Exception { nodeStatName1 = "nodeStat1"; nodeStatName2 = "nodeStat2"; - Settings settings = Settings.builder().put(MAX_MODEL_SIZE_PER_NODE.getKey(), 10).build(); + Settings settings = Settings.builder().put(AD_MAX_MODEL_SIZE_PER_NODE.getKey(), 10).build(); ClusterService clusterService = mock(ClusterService.class); ClusterSettings clusterSettings = new ClusterSettings( Settings.EMPTY, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(MAX_MODEL_SIZE_PER_NODE))) + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AD_MAX_MODEL_SIZE_PER_NODE))) ); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); diff --git a/src/test/java/org/opensearch/ad/transport/AnomalyDetectorJobActionTests.java b/src/test/java/org/opensearch/ad/transport/AnomalyDetectorJobActionTests.java index 6bce7e347..42c09d44f 100644 --- a/src/test/java/org/opensearch/ad/transport/AnomalyDetectorJobActionTests.java +++ b/src/test/java/org/opensearch/ad/transport/AnomalyDetectorJobActionTests.java @@ -57,7 +57,7 @@ public void setUp() throws Exception { ClusterService clusterService = mock(ClusterService.class); ClusterSettings clusterSettings = new ClusterSettings( Settings.EMPTY, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES))) + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES))) ); Settings build = Settings.builder().build(); diff --git a/src/test/java/org/opensearch/ad/transport/AnomalyResultTests.java b/src/test/java/org/opensearch/ad/transport/AnomalyResultTests.java index 76b98abc8..1de86c710 100644 --- a/src/test/java/org/opensearch/ad/transport/AnomalyResultTests.java +++ b/src/test/java/org/opensearch/ad/transport/AnomalyResultTests.java @@ -65,7 +65,6 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.cluster.HashRing; import org.opensearch.ad.common.exception.JsonPathNotFoundException; import org.opensearch.ad.constant.ADCommonMessages; @@ -108,6 +107,7 @@ import org.opensearch.timeseries.AnalysisType; import org.opensearch.timeseries.NodeStateManager; import org.opensearch.timeseries.TestHelpers; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.EndRunException; import org.opensearch.timeseries.common.exception.InternalFailure; import org.opensearch.timeseries.common.exception.LimitExceededException; @@ -149,7 +149,7 @@ public class AnomalyResultTests extends AbstractTimeSeriesTest { private String adID; private String featureId; private String featureName; - private ADCircuitBreakerService adCircuitBreakerService; + private CircuitBreakerService adCircuitBreakerService; private ADStats adStats; private double confidence; private double anomalyGrade; @@ -172,7 +172,7 @@ public void setUp() throws Exception { super.setUp(); super.setUpLog4jForJUnit(AnomalyResultTransportAction.class); - setupTestNodes(AnomalyDetectorSettings.MAX_ENTITIES_PER_QUERY, AnomalyDetectorSettings.PAGE_SIZE); + setupTestNodes(AnomalyDetectorSettings.AD_MAX_ENTITIES_PER_QUERY, AnomalyDetectorSettings.AD_PAGE_SIZE); transportService = testNodes[0].transportService; clusterService = testNodes[0].clusterService; @@ -253,7 +253,7 @@ public void setUp() throws Exception { thresholdModelID = SingleStreamModelIdMapper.getThresholdModelId(adID); // "123-threshold"; // when(normalModelPartitioner.getThresholdModelId(any(String.class))).thenReturn(thresholdModelID); - adCircuitBreakerService = mock(ADCircuitBreakerService.class); + adCircuitBreakerService = mock(CircuitBreakerService.class); when(adCircuitBreakerService.isOpen()).thenReturn(false); ThreadPool threadPool = mock(ThreadPool.class); @@ -460,8 +460,8 @@ public void sendRequest( setupTestNodes( failureTransportInterceptor, Settings.EMPTY, - AnomalyDetectorSettings.MAX_ENTITIES_PER_QUERY, - AnomalyDetectorSettings.PAGE_SIZE + AnomalyDetectorSettings.AD_MAX_ENTITIES_PER_QUERY, + AnomalyDetectorSettings.AD_PAGE_SIZE ); // mock hashing ring response. This has to happen after setting up test nodes with the failure interceptor @@ -683,8 +683,8 @@ public void sendRequest( setupTestNodes( failureTransportInterceptor, Settings.EMPTY, - AnomalyDetectorSettings.MAX_ENTITIES_PER_QUERY, - AnomalyDetectorSettings.PAGE_SIZE + AnomalyDetectorSettings.AD_MAX_ENTITIES_PER_QUERY, + AnomalyDetectorSettings.AD_PAGE_SIZE ); // mock hashing ring response. This has to happen after setting up test nodes with the failure interceptor @@ -735,7 +735,7 @@ public void sendRequest( public void testCircuitBreaker() { - ADCircuitBreakerService breakerService = mock(ADCircuitBreakerService.class); + CircuitBreakerService breakerService = mock(CircuitBreakerService.class); when(breakerService.isOpen()).thenReturn(true); // These constructors register handler in transport service diff --git a/src/test/java/org/opensearch/ad/transport/DeleteAnomalyDetectorActionTests.java b/src/test/java/org/opensearch/ad/transport/DeleteAnomalyDetectorActionTests.java index c26da5a21..93e291325 100644 --- a/src/test/java/org/opensearch/ad/transport/DeleteAnomalyDetectorActionTests.java +++ b/src/test/java/org/opensearch/ad/transport/DeleteAnomalyDetectorActionTests.java @@ -49,7 +49,7 @@ public void setUp() throws Exception { ClusterService clusterService = mock(ClusterService.class); ClusterSettings clusterSettings = new ClusterSettings( Settings.EMPTY, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES))) + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES))) ); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); adTaskManager = mock(ADTaskManager.class); diff --git a/src/test/java/org/opensearch/ad/transport/DeleteAnomalyDetectorTests.java b/src/test/java/org/opensearch/ad/transport/DeleteAnomalyDetectorTests.java index dbf9bc813..9d369b121 100644 --- a/src/test/java/org/opensearch/ad/transport/DeleteAnomalyDetectorTests.java +++ b/src/test/java/org/opensearch/ad/transport/DeleteAnomalyDetectorTests.java @@ -89,7 +89,7 @@ public void setUp() throws Exception { clusterService = mock(ClusterService.class); ClusterSettings clusterSettings = new ClusterSettings( Settings.EMPTY, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES))) + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES))) ); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); transportService = new TransportService( diff --git a/src/test/java/org/opensearch/ad/transport/EntityResultTransportActionTests.java b/src/test/java/org/opensearch/ad/transport/EntityResultTransportActionTests.java index c48e35659..d3cc0ab4b 100644 --- a/src/test/java/org/opensearch/ad/transport/EntityResultTransportActionTests.java +++ b/src/test/java/org/opensearch/ad/transport/EntityResultTransportActionTests.java @@ -50,7 +50,6 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.ad.AnomalyDetectorJobRunnerTests; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.caching.CacheProvider; import org.opensearch.ad.caching.EntityCache; import org.opensearch.ad.common.exception.JsonPathNotFoundException; @@ -83,6 +82,7 @@ import org.opensearch.timeseries.AnalysisType; import org.opensearch.timeseries.NodeStateManager; import org.opensearch.timeseries.TestHelpers; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.EndRunException; import org.opensearch.timeseries.common.exception.LimitExceededException; import org.opensearch.timeseries.constant.CommonMessages; @@ -103,7 +103,7 @@ public class EntityResultTransportActionTests extends AbstractTimeSeriesTest { ActionFilters actionFilters; TransportService transportService; ModelManager manager; - ADCircuitBreakerService adCircuitBreakerService; + CircuitBreakerService adCircuitBreakerService; CheckpointDao checkpointDao; CacheProvider provider; EntityCache entityCache; @@ -154,7 +154,7 @@ public void setUp() throws Exception { actionFilters = mock(ActionFilters.class); transportService = mock(TransportService.class); - adCircuitBreakerService = mock(ADCircuitBreakerService.class); + adCircuitBreakerService = mock(CircuitBreakerService.class); when(adCircuitBreakerService.isOpen()).thenReturn(false); checkpointDao = mock(CheckpointDao.class); @@ -173,13 +173,13 @@ public void setUp() throws Exception { settings = Settings .builder() .put(AnomalyDetectorSettings.AD_COOLDOWN_MINUTES.getKey(), TimeValue.timeValueMinutes(5)) - .put(AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ.getKey(), TimeValue.timeValueHours(12)) + .put(AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ.getKey(), TimeValue.timeValueHours(12)) .build(); clusterService = mock(ClusterService.class); ClusterSettings clusterSettings = new ClusterSettings( Settings.EMPTY, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ))) + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ))) ); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); manager = new ModelManager( @@ -192,7 +192,7 @@ public void setUp() throws Exception { 0, 0, null, - AnomalyDetectorSettings.CHECKPOINT_SAVING_FREQ, + AnomalyDetectorSettings.AD_CHECKPOINT_SAVING_FREQ, mock(EntityColdStarter.class), null, null, @@ -221,7 +221,7 @@ public void setUp() throws Exception { entities.put(cacheMissEntityObj, cacheMissData); cacheHitEntityObj = Entity.createSingleAttributeEntity(detector.getCategoryFields().get(0), cacheHitEntity); entities.put(cacheHitEntityObj, cacheHitData); - tooLongEntity = randomAlphaOfLength(AnomalyDetectorSettings.MAX_ENTITY_LENGTH + 1); + tooLongEntity = randomAlphaOfLength(257); tooLongData = new double[] { 0.3 }; entities.put(Entity.createSingleAttributeEntity(detector.getCategoryFields().get(0), tooLongEntity), tooLongData); diff --git a/src/test/java/org/opensearch/ad/transport/GetAnomalyDetectorTests.java b/src/test/java/org/opensearch/ad/transport/GetAnomalyDetectorTests.java index dfb92a7b8..1181b685f 100644 --- a/src/test/java/org/opensearch/ad/transport/GetAnomalyDetectorTests.java +++ b/src/test/java/org/opensearch/ad/transport/GetAnomalyDetectorTests.java @@ -93,7 +93,7 @@ public void setUp() throws Exception { ClusterService clusterService = mock(ClusterService.class); ClusterSettings clusterSettings = new ClusterSettings( Settings.EMPTY, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES))) + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES))) ); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); diff --git a/src/test/java/org/opensearch/ad/transport/GetAnomalyDetectorTransportActionTests.java b/src/test/java/org/opensearch/ad/transport/GetAnomalyDetectorTransportActionTests.java index 671a95111..35f6ba36f 100644 --- a/src/test/java/org/opensearch/ad/transport/GetAnomalyDetectorTransportActionTests.java +++ b/src/test/java/org/opensearch/ad/transport/GetAnomalyDetectorTransportActionTests.java @@ -86,7 +86,7 @@ public void setUp() throws Exception { ClusterService clusterService = mock(ClusterService.class); ClusterSettings clusterSettings = new ClusterSettings( Settings.EMPTY, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES))) + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES))) ); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); adTaskManager = mock(ADTaskManager.class); diff --git a/src/test/java/org/opensearch/ad/transport/IndexAnomalyDetectorTransportActionTests.java b/src/test/java/org/opensearch/ad/transport/IndexAnomalyDetectorTransportActionTests.java index 0b95a4940..d370fa703 100644 --- a/src/test/java/org/opensearch/ad/transport/IndexAnomalyDetectorTransportActionTests.java +++ b/src/test/java/org/opensearch/ad/transport/IndexAnomalyDetectorTransportActionTests.java @@ -87,7 +87,7 @@ public void setUp() throws Exception { clusterService = mock(ClusterService.class); clusterSettings = new ClusterSettings( Settings.EMPTY, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES))) + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES))) ); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); @@ -199,7 +199,7 @@ public void testIndexTransportAction() { @Test public void testIndexTransportActionWithUserAndFilterOn() { - Settings settings = Settings.builder().put(AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES.getKey(), true).build(); + Settings settings = Settings.builder().put(AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES.getKey(), true).build(); ThreadContext threadContext = new ThreadContext(settings); threadContext.putTransient(ConfigConstants.OPENSEARCH_SECURITY_USER_INFO_THREAD_CONTEXT, "alice|odfe,aes|engineering,operations"); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); diff --git a/src/test/java/org/opensearch/ad/transport/MultiEntityResultTests.java b/src/test/java/org/opensearch/ad/transport/MultiEntityResultTests.java index 828e97f81..bc5691748 100644 --- a/src/test/java/org/opensearch/ad/transport/MultiEntityResultTests.java +++ b/src/test/java/org/opensearch/ad/transport/MultiEntityResultTests.java @@ -24,8 +24,8 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.MAX_ENTITIES_PER_QUERY; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.PAGE_SIZE; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_MAX_ENTITIES_PER_QUERY; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_PAGE_SIZE; import java.io.IOException; import java.time.Clock; @@ -66,7 +66,6 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.caching.CacheProvider; import org.opensearch.ad.caching.EntityCache; import org.opensearch.ad.cluster.HashRing; @@ -113,6 +112,7 @@ import org.opensearch.timeseries.AnalysisType; import org.opensearch.timeseries.NodeStateManager; import org.opensearch.timeseries.TestHelpers; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.EndRunException; import org.opensearch.timeseries.common.exception.InternalFailure; import org.opensearch.timeseries.common.exception.LimitExceededException; @@ -153,7 +153,7 @@ public class MultiEntityResultTests extends AbstractTimeSeriesTest { private HashRing hashRing; private ClusterService clusterService; private IndexNameExpressionResolver indexNameResolver; - private ADCircuitBreakerService adCircuitBreakerService; + private CircuitBreakerService adCircuitBreakerService; private ADStats adStats; private ThreadPool mockThreadPool; private String detectorId; @@ -227,8 +227,8 @@ public void setUp() throws Exception { hashRing = mock(HashRing.class); Set> anomalyResultSetting = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - anomalyResultSetting.add(MAX_ENTITIES_PER_QUERY); - anomalyResultSetting.add(PAGE_SIZE); + anomalyResultSetting.add(AD_MAX_ENTITIES_PER_QUERY); + anomalyResultSetting.add(AD_PAGE_SIZE); anomalyResultSetting.add(TimeSeriesSettings.MAX_RETRY_FOR_UNRESPONSIVE_NODE); anomalyResultSetting.add(TimeSeriesSettings.BACKOFF_MINUTES); ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, anomalyResultSetting); @@ -245,7 +245,7 @@ public void setUp() throws Exception { indexNameResolver = new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)); - adCircuitBreakerService = mock(ADCircuitBreakerService.class); + adCircuitBreakerService = mock(CircuitBreakerService.class); when(adCircuitBreakerService.isOpen()).thenReturn(false); Map> statsMap = new HashMap>() { @@ -441,7 +441,7 @@ public void setUpNormlaStateManager() throws IOException { settings, new ClientUtil(client), clock, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, clusterService, TimeSeriesSettings.MAX_RETRY_FOR_UNRESPONSIVE_NODE, TimeSeriesSettings.BACKOFF_MINUTES @@ -679,7 +679,7 @@ public void sendRequest( // we start support multi-category fields since 1.1 // Set version to 1.1 will force the outbound/inbound message to use 1.1 version - setupTestNodes(entityResultInterceptor, 5, settings, Version.V_2_0_0, MAX_ENTITIES_PER_QUERY, PAGE_SIZE); + setupTestNodes(entityResultInterceptor, 5, settings, Version.V_2_0_0, AD_MAX_ENTITIES_PER_QUERY, AD_PAGE_SIZE); TransportService realTransportService = testNodes[0].transportService; ClusterService realClusterService = testNodes[0].clusterService; @@ -754,7 +754,7 @@ public void testCircuitBreakerOpen() throws InterruptedException, IOException { settings, clientUtil, clock, - AnomalyDetectorSettings.HOURLY_MAINTENANCE, + TimeSeriesSettings.HOURLY_MAINTENANCE, clusterService, TimeSeriesSettings.MAX_RETRY_FOR_UNRESPONSIVE_NODE, TimeSeriesSettings.BACKOFF_MINUTES @@ -768,7 +768,7 @@ public void testCircuitBreakerOpen() throws InterruptedException, IOException { when(hashRing.getOwningNodeWithSameLocalAdVersionForRealtimeAD(any(String.class))) .thenReturn(Optional.of(testNodes[1].discoveryNode())); - ADCircuitBreakerService openBreaker = mock(ADCircuitBreakerService.class); + CircuitBreakerService openBreaker = mock(CircuitBreakerService.class); when(openBreaker.isOpen()).thenReturn(true); // register entity result action diff --git a/src/test/java/org/opensearch/ad/transport/PreviewAnomalyDetectorTransportActionTests.java b/src/test/java/org/opensearch/ad/transport/PreviewAnomalyDetectorTransportActionTests.java index 7e1126124..c9553e1fb 100644 --- a/src/test/java/org/opensearch/ad/transport/PreviewAnomalyDetectorTransportActionTests.java +++ b/src/test/java/org/opensearch/ad/transport/PreviewAnomalyDetectorTransportActionTests.java @@ -46,7 +46,6 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.WriteRequest; import org.opensearch.ad.AnomalyDetectorRunner; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.feature.FeatureManager; import org.opensearch.ad.feature.Features; import org.opensearch.ad.indices.ADIndexManagement; @@ -73,6 +72,7 @@ import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.timeseries.TestHelpers; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.constant.CommonMessages; import org.opensearch.timeseries.constant.CommonName; import org.opensearch.timeseries.util.RestHandlerUtils; @@ -88,7 +88,7 @@ public class PreviewAnomalyDetectorTransportActionTests extends OpenSearchSingle private FeatureManager featureManager; private ModelManager modelManager; private Task task; - private ADCircuitBreakerService circuitBreaker; + private CircuitBreakerService circuitBreaker; @Override @Before @@ -104,8 +104,8 @@ public void setUp() throws Exception { Arrays .asList( AnomalyDetectorSettings.MAX_ANOMALY_FEATURES, - AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES, - AnomalyDetectorSettings.PAGE_SIZE, + AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES, + AnomalyDetectorSettings.AD_PAGE_SIZE, AnomalyDetectorSettings.MAX_CONCURRENT_PREVIEW ) ) @@ -130,7 +130,7 @@ public void setUp() throws Exception { featureManager = mock(FeatureManager.class); modelManager = mock(ModelManager.class); runner = new AnomalyDetectorRunner(modelManager, featureManager, AnomalyDetectorSettings.MAX_PREVIEW_RESULTS); - circuitBreaker = mock(ADCircuitBreakerService.class); + circuitBreaker = mock(CircuitBreakerService.class); when(circuitBreaker.isOpen()).thenReturn(false); action = new PreviewAnomalyDetectorTransportAction( Settings.EMPTY, @@ -278,7 +278,7 @@ public void onFailure(Exception e) { @Test public void testPreviewTransportActionNoContext() throws IOException, InterruptedException { final CountDownLatch inProgressLatch = new CountDownLatch(1); - Settings settings = Settings.builder().put(AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES.getKey(), true).build(); + Settings settings = Settings.builder().put(AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES.getKey(), true).build(); Client client = mock(Client.class); ThreadContext threadContext = new ThreadContext(settings); threadContext.putTransient(ConfigConstants.OPENSEARCH_SECURITY_USER_INFO_THREAD_CONTEXT, "alice|odfe,aes|engineering,operations"); diff --git a/src/test/java/org/opensearch/ad/transport/ProfileTransportActionTests.java b/src/test/java/org/opensearch/ad/transport/ProfileTransportActionTests.java index f522d89f1..bccd385bb 100644 --- a/src/test/java/org/opensearch/ad/transport/ProfileTransportActionTests.java +++ b/src/test/java/org/opensearch/ad/transport/ProfileTransportActionTests.java @@ -114,7 +114,7 @@ public void setUp() throws Exception { } private void setUpModelSize(int maxModel) { - Settings nodeSettings = Settings.builder().put(AnomalyDetectorSettings.MAX_MODEL_SIZE_PER_NODE.getKey(), maxModel).build(); + Settings nodeSettings = Settings.builder().put(AnomalyDetectorSettings.AD_MAX_MODEL_SIZE_PER_NODE.getKey(), maxModel).build(); internalCluster().startNode(nodeSettings); } diff --git a/src/test/java/org/opensearch/ad/transport/RCFResultTests.java b/src/test/java/org/opensearch/ad/transport/RCFResultTests.java index afc2d4824..c520b94bf 100644 --- a/src/test/java/org/opensearch/ad/transport/RCFResultTests.java +++ b/src/test/java/org/opensearch/ad/transport/RCFResultTests.java @@ -37,7 +37,6 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.ad.breaker.ADCircuitBreakerService; import org.opensearch.ad.cluster.HashRing; import org.opensearch.ad.common.exception.JsonPathNotFoundException; import org.opensearch.ad.constant.ADCommonMessages; @@ -56,6 +55,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.tasks.Task; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.timeseries.breaker.CircuitBreakerService; import org.opensearch.timeseries.common.exception.LimitExceededException; import org.opensearch.timeseries.stats.StatNames; import org.opensearch.transport.Transport; @@ -110,7 +110,7 @@ public void testNormal() { ); ModelManager manager = mock(ModelManager.class); - ADCircuitBreakerService adCircuitBreakerService = mock(ADCircuitBreakerService.class); + CircuitBreakerService adCircuitBreakerService = mock(CircuitBreakerService.class); RCFResultTransportAction action = new RCFResultTransportAction( mock(ActionFilters.class), transportService, @@ -168,7 +168,7 @@ public void testExecutionException() { ); ModelManager manager = mock(ModelManager.class); - ADCircuitBreakerService adCircuitBreakerService = mock(ADCircuitBreakerService.class); + CircuitBreakerService adCircuitBreakerService = mock(CircuitBreakerService.class); RCFResultTransportAction action = new RCFResultTransportAction( mock(ActionFilters.class), transportService, @@ -284,7 +284,7 @@ public void testCircuitBreaker() { ); ModelManager manager = mock(ModelManager.class); - ADCircuitBreakerService breakerService = mock(ADCircuitBreakerService.class); + CircuitBreakerService breakerService = mock(CircuitBreakerService.class); RCFResultTransportAction action = new RCFResultTransportAction( mock(ActionFilters.class), transportService, @@ -335,7 +335,7 @@ public void testCorruptModel() { ); ModelManager manager = mock(ModelManager.class); - ADCircuitBreakerService adCircuitBreakerService = mock(ADCircuitBreakerService.class); + CircuitBreakerService adCircuitBreakerService = mock(CircuitBreakerService.class); RCFResultTransportAction action = new RCFResultTransportAction( mock(ActionFilters.class), transportService, diff --git a/src/test/java/org/opensearch/ad/transport/SearchAnomalyDetectorInfoActionTests.java b/src/test/java/org/opensearch/ad/transport/SearchAnomalyDetectorInfoActionTests.java index 1b0538953..f06761bb6 100644 --- a/src/test/java/org/opensearch/ad/transport/SearchAnomalyDetectorInfoActionTests.java +++ b/src/test/java/org/opensearch/ad/transport/SearchAnomalyDetectorInfoActionTests.java @@ -92,7 +92,7 @@ public void onFailure(Exception e) { clusterService = mock(ClusterService.class); ClusterSettings clusterSettings = new ClusterSettings( Settings.EMPTY, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES))) + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES))) ); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); } diff --git a/src/test/java/org/opensearch/ad/transport/SearchAnomalyResultActionTests.java b/src/test/java/org/opensearch/ad/transport/SearchAnomalyResultActionTests.java index 0f5dbb1d2..877fcd887 100644 --- a/src/test/java/org/opensearch/ad/transport/SearchAnomalyResultActionTests.java +++ b/src/test/java/org/opensearch/ad/transport/SearchAnomalyResultActionTests.java @@ -88,7 +88,7 @@ public void setUp() throws Exception { clusterService = mock(ClusterService.class); ClusterSettings clusterSettings = new ClusterSettings( Settings.EMPTY, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES))) + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES))) ); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); clusterState = createClusterState(); diff --git a/src/test/java/org/opensearch/ad/transport/handler/ADSearchHandlerTests.java b/src/test/java/org/opensearch/ad/transport/handler/ADSearchHandlerTests.java index 0f2da1516..441060c4f 100644 --- a/src/test/java/org/opensearch/ad/transport/handler/ADSearchHandlerTests.java +++ b/src/test/java/org/opensearch/ad/transport/handler/ADSearchHandlerTests.java @@ -17,7 +17,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.opensearch.ad.settings.AnomalyDetectorSettings.FILTER_BY_BACKEND_ROLES; +import static org.opensearch.ad.settings.AnomalyDetectorSettings.AD_FILTER_BY_BACKEND_ROLES; import static org.opensearch.timeseries.TestHelpers.matchAllRequest; import org.junit.Before; @@ -50,8 +50,8 @@ public class ADSearchHandlerTests extends ADUnitTestCase { @Override public void setUp() throws Exception { super.setUp(); - settings = Settings.builder().put(FILTER_BY_BACKEND_ROLES.getKey(), false).build(); - clusterSettings = clusterSetting(settings, FILTER_BY_BACKEND_ROLES); + settings = Settings.builder().put(AD_FILTER_BY_BACKEND_ROLES.getKey(), false).build(); + clusterSettings = clusterSetting(settings, AD_FILTER_BY_BACKEND_ROLES); clusterService = new ClusterService(settings, clusterSettings, null); client = mock(Client.class); searchHandler = new ADSearchHandler(settings, clusterService, client); @@ -74,7 +74,7 @@ public void testSearchException() { } public void testFilterEnabledWithWrongSearch() { - settings = Settings.builder().put(FILTER_BY_BACKEND_ROLES.getKey(), true).build(); + settings = Settings.builder().put(AD_FILTER_BY_BACKEND_ROLES.getKey(), true).build(); clusterService = new ClusterService(settings, clusterSettings, null); searchHandler = new ADSearchHandler(settings, clusterService, client); @@ -83,7 +83,7 @@ public void testFilterEnabledWithWrongSearch() { } public void testFilterEnabled() { - settings = Settings.builder().put(FILTER_BY_BACKEND_ROLES.getKey(), true).build(); + settings = Settings.builder().put(AD_FILTER_BY_BACKEND_ROLES.getKey(), true).build(); clusterService = new ClusterService(settings, clusterSettings, null); searchHandler = new ADSearchHandler(settings, clusterService, client); diff --git a/src/test/java/org/opensearch/timeseries/feature/NoPowermockSearchFeatureDaoTests.java b/src/test/java/org/opensearch/timeseries/feature/NoPowermockSearchFeatureDaoTests.java index 307740361..a35ddc08b 100644 --- a/src/test/java/org/opensearch/timeseries/feature/NoPowermockSearchFeatureDaoTests.java +++ b/src/test/java/org/opensearch/timeseries/feature/NoPowermockSearchFeatureDaoTests.java @@ -103,6 +103,7 @@ import org.opensearch.timeseries.model.Entity; import org.opensearch.timeseries.model.Feature; import org.opensearch.timeseries.model.IntervalTimeConfiguration; +import org.opensearch.timeseries.settings.TimeSeriesSettings; import org.opensearch.timeseries.util.SecurityClientUtil; import com.google.common.collect.ImmutableList; @@ -164,7 +165,7 @@ public void setUp() throws Exception { Settings.EMPTY, Collections .unmodifiableSet( - new HashSet<>(Arrays.asList(AnomalyDetectorSettings.MAX_ENTITIES_FOR_PREVIEW, AnomalyDetectorSettings.PAGE_SIZE)) + new HashSet<>(Arrays.asList(AnomalyDetectorSettings.MAX_ENTITIES_FOR_PREVIEW, AnomalyDetectorSettings.AD_PAGE_SIZE)) ) ); clusterService = mock(ClusterService.class); @@ -185,7 +186,7 @@ public void setUp() throws Exception { clientUtil, settings, clusterService, - AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE, + TimeSeriesSettings.NUM_SAMPLES_PER_TREE, clock, 1, 1, @@ -371,7 +372,7 @@ public void testGetHighestCountEntitiesExhaustedPages() throws InterruptedExcept clientUtil, settings, clusterService, - AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE, + TimeSeriesSettings.NUM_SAMPLES_PER_TREE, clock, 2, 1, @@ -417,7 +418,7 @@ public void testGetHighestCountEntitiesNotEnoughTime() throws InterruptedExcepti clientUtil, settings, clusterService, - AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE, + TimeSeriesSettings.NUM_SAMPLES_PER_TREE, clock, 2, 1, diff --git a/src/test/java/org/opensearch/timeseries/feature/SearchFeatureDaoParamTests.java b/src/test/java/org/opensearch/timeseries/feature/SearchFeatureDaoParamTests.java index 87d00efce..920374b2f 100644 --- a/src/test/java/org/opensearch/timeseries/feature/SearchFeatureDaoParamTests.java +++ b/src/test/java/org/opensearch/timeseries/feature/SearchFeatureDaoParamTests.java @@ -51,7 +51,6 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.ad.model.AnomalyDetector; -import org.opensearch.ad.settings.AnomalyDetectorSettings; import org.opensearch.client.Client; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.action.ActionFuture; @@ -80,6 +79,7 @@ import org.opensearch.timeseries.dataprocessor.Imputer; import org.opensearch.timeseries.dataprocessor.LinearUniformImputer; import org.opensearch.timeseries.model.IntervalTimeConfiguration; +import org.opensearch.timeseries.settings.TimeSeriesSettings; import org.opensearch.timeseries.util.ParseUtils; import org.opensearch.timeseries.util.SecurityClientUtil; import org.powermock.api.mockito.PowerMockito; @@ -179,7 +179,7 @@ public void setup() throws Exception { }).when(nodeStateManager).getConfig(any(String.class), eq(AnalysisType.AD), any(ActionListener.class)); clientUtil = new SecurityClientUtil(nodeStateManager, settings); searchFeatureDao = spy( - new SearchFeatureDao(client, xContent, imputer, clientUtil, settings, null, AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE) + new SearchFeatureDao(client, xContent, imputer, clientUtil, settings, null, TimeSeriesSettings.NUM_SAMPLES_PER_TREE) ); detectionInterval = new IntervalTimeConfiguration(1, ChronoUnit.MINUTES); diff --git a/src/test/java/org/opensearch/timeseries/feature/SearchFeatureDaoTests.java b/src/test/java/org/opensearch/timeseries/feature/SearchFeatureDaoTests.java index 8d3701f7b..99b57b506 100644 --- a/src/test/java/org/opensearch/timeseries/feature/SearchFeatureDaoTests.java +++ b/src/test/java/org/opensearch/timeseries/feature/SearchFeatureDaoTests.java @@ -56,7 +56,6 @@ import org.opensearch.action.search.SearchResponseSections; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.ad.model.AnomalyDetector; -import org.opensearch.ad.settings.AnomalyDetectorSettings; import org.opensearch.client.Client; import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; @@ -96,6 +95,7 @@ import org.opensearch.timeseries.dataprocessor.LinearUniformImputer; import org.opensearch.timeseries.model.Entity; import org.opensearch.timeseries.model.IntervalTimeConfiguration; +import org.opensearch.timeseries.settings.TimeSeriesSettings; import org.opensearch.timeseries.util.ParseUtils; import org.opensearch.timeseries.util.SecurityClientUtil; import org.powermock.api.mockito.PowerMockito; @@ -182,7 +182,7 @@ public void setup() throws Exception { }).when(nodeStateManager).getConfig(any(String.class), eq(AnalysisType.AD), any(ActionListener.class)); clientUtil = new SecurityClientUtil(nodeStateManager, settings); searchFeatureDao = spy( - new SearchFeatureDao(client, xContent, imputer, clientUtil, settings, null, AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE) + new SearchFeatureDao(client, xContent, imputer, clientUtil, settings, null, TimeSeriesSettings.NUM_SAMPLES_PER_TREE) ); detectionInterval = new IntervalTimeConfiguration(1, ChronoUnit.MINUTES); diff --git a/src/test/java/test/org/opensearch/ad/util/MLUtil.java b/src/test/java/test/org/opensearch/ad/util/MLUtil.java index 53e620d90..6b6bb39af 100644 --- a/src/test/java/test/org/opensearch/ad/util/MLUtil.java +++ b/src/test/java/test/org/opensearch/ad/util/MLUtil.java @@ -24,9 +24,9 @@ import org.opensearch.ad.ml.EntityModel; import org.opensearch.ad.ml.ModelManager.ModelType; import org.opensearch.ad.ml.ModelState; -import org.opensearch.ad.settings.AnomalyDetectorSettings; import org.opensearch.common.collect.Tuple; import org.opensearch.timeseries.model.Entity; +import org.opensearch.timeseries.settings.TimeSeriesSettings; import com.amazon.randomcutforest.config.TransformMethod; import com.amazon.randomcutforest.parkservices.ThresholdedRandomCutForest; @@ -39,7 +39,7 @@ */ public class MLUtil { private static Random random = new Random(42); - private static int minSampleSize = AnomalyDetectorSettings.NUM_MIN_SAMPLES; + private static int minSampleSize = TimeSeriesSettings.NUM_MIN_SAMPLES; private static String randomString(int targetStringLength) { int leftLimit = 97; // letter 'a' @@ -96,19 +96,19 @@ public static EntityModel createEmptyModel(Entity entity) { public static EntityModel createNonEmptyModel(String detectorId, int sampleSize, Entity entity) { Queue samples = createQueueSamples(sampleSize); - int numDataPoints = random.nextInt(1000) + AnomalyDetectorSettings.NUM_MIN_SAMPLES; + int numDataPoints = random.nextInt(1000) + TimeSeriesSettings.NUM_MIN_SAMPLES; ThresholdedRandomCutForest trcf = new ThresholdedRandomCutForest( ThresholdedRandomCutForest .builder() .dimensions(1) - .sampleSize(AnomalyDetectorSettings.NUM_SAMPLES_PER_TREE) - .numberOfTrees(AnomalyDetectorSettings.NUM_TREES) - .timeDecay(AnomalyDetectorSettings.TIME_DECAY) - .outputAfter(AnomalyDetectorSettings.NUM_MIN_SAMPLES) + .sampleSize(TimeSeriesSettings.NUM_SAMPLES_PER_TREE) + .numberOfTrees(TimeSeriesSettings.NUM_TREES) + .timeDecay(TimeSeriesSettings.TIME_DECAY) + .outputAfter(TimeSeriesSettings.NUM_MIN_SAMPLES) .initialAcceptFraction(0.125d) .parallelExecutionEnabled(false) .internalShinglingEnabled(true) - .anomalyRate(1 - AnomalyDetectorSettings.THRESHOLD_MIN_PVALUE) + .anomalyRate(1 - TimeSeriesSettings.THRESHOLD_MIN_PVALUE) .transformMethod(TransformMethod.NORMALIZE) .alertOnce(true) .autoAdjust(true)