Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Star Tree] [Search] Support for metric aggregations with/without term query #15289

Open
wants to merge 30 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
19e2811
Star Tree Search request/response changes
sandeshkr419 Aug 25, 2024
62f4920
using bst as filter
sandeshkr419 Aug 27, 2024
52d3450
Refactoring & bug fixes
sandeshkr419 Aug 29, 2024
cf2ec5b
change filter logic and aggragtor factory
sandeshkr419 Aug 31, 2024
416570f
Rebasing changes with indexing / file-formats
sandeshkr419 Sep 3, 2024
67b5bb3
enable query caching
sandeshkr419 Sep 3, 2024
0c3a938
minor refactor
sandeshkr419 Sep 3, 2024
e41ea0e
use rewrite instead of createweight in originalorstartreequery
sandeshkr419 Sep 4, 2024
ca6407c
fix request parsing tests
sandeshkr419 Sep 4, 2024
f7caeb1
minor refactoring and test:framework spotless fix
sandeshkr419 Sep 4, 2024
c91ae0a
Refactoring star tree query utils in a utility class
sandeshkr419 Sep 4, 2024
bb3c510
refactoring to utils
sandeshkr419 Sep 4, 2024
24ed013
rebasing with main
sandeshkr419 Sep 5, 2024
193dd89
minor spotless
sandeshkr419 Sep 5, 2024
1900450
fix search service tests
sandeshkr419 Sep 5, 2024
bc80495
fix npe
sandeshkr419 Sep 5, 2024
e2060ae
search service test refactoring
sandeshkr419 Sep 5, 2024
01d1b8f
add changelog
sandeshkr419 Sep 5, 2024
a051bce
temp
sandeshkr419 Sep 19, 2024
c227b74
temp temp
sandeshkr419 Sep 19, 2024
2d10bdd
temp temp temp
sandeshkr419 Sep 25, 2024
6fb4794
fix initial values in aggs
sandeshkr419 Sep 26, 2024
2d19d8a
adding tests
sandeshkr419 Sep 30, 2024
e86ab94
refactoring
sandeshkr419 Oct 1, 2024
e76aa53
max/min agg fix, spotless, test fixes
sandeshkr419 Oct 1, 2024
3887a62
spotless fix, test refactoring
sandeshkr419 Oct 1, 2024
ae4393f
avg aggregator fix, iterators refactoring
sandeshkr419 Oct 1, 2024
326400a
making StarTreeFieldType back to final
sandeshkr419 Oct 2, 2024
89c845d
move value cache to star tree context + other comments
sandeshkr419 Oct 7, 2024
c7b70b0
refactor cache map to cache array
sandeshkr419 Oct 9, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- [S3 Repository] Change default retry mechanism of s3 clients to Standard Mode ([#15978](https://github.com/opensearch-project/OpenSearch/pull/15978))
- Add changes to block calls in cat shards, indices and segments based on dynamic limit settings ([#15986](https://github.com/opensearch-project/OpenSearch/pull/15986))
- New `phone` & `phone-search` analyzer + tokenizer ([#15915](https://github.com/opensearch-project/OpenSearch/pull/15915))
- [Star Tree - Search] Add support for metric aggregations with/without term query ([15289](https://github.com/opensearch-project/OpenSearch/pull/15289))

### Dependencies
- Bump `com.azure:azure-identity` from 1.13.0 to 1.13.2 ([#15578](https://github.com/opensearch-project/OpenSearch/pull/15578))
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,242 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/

package org.opensearch.index.compositeindex.datacube.startree.utils;

import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SegmentReader;
import org.apache.lucene.search.CollectionTerminatedException;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.FixedBitSet;
import org.opensearch.common.lucene.Lucene;
import org.opensearch.index.codec.composite.CompositeIndexFieldInfo;
import org.opensearch.index.codec.composite.CompositeIndexReader;
import org.opensearch.index.compositeindex.datacube.Dimension;
import org.opensearch.index.compositeindex.datacube.Metric;
import org.opensearch.index.compositeindex.datacube.MetricStat;
import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues;
import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator;
import org.opensearch.index.mapper.CompositeDataCubeFieldType;
import org.opensearch.index.query.MatchAllQueryBuilder;
import org.opensearch.index.query.QueryBuilder;
import org.opensearch.index.query.TermQueryBuilder;
import org.opensearch.search.aggregations.AggregatorFactory;
import org.opensearch.search.aggregations.LeafBucketCollector;
import org.opensearch.search.aggregations.LeafBucketCollectorBase;
import org.opensearch.search.aggregations.metrics.MetricAggregatorFactory;
import org.opensearch.search.aggregations.support.ValuesSource;
import org.opensearch.search.builder.SearchSourceBuilder;
import org.opensearch.search.internal.SearchContext;
import org.opensearch.search.startree.StarTreeFilter;
import org.opensearch.search.startree.StarTreeQueryContext;

import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
import java.util.stream.Collectors;

/**
* Helper class for building star-tree query
*
* @opensearch.internal
* @opensearch.experimental
*/
public class StarTreeQueryHelper {

/**
* Checks if the search context can be supported by star-tree
*/
public static boolean isStarTreeSupported(SearchContext context) {
return context.aggregations() != null && context.mapperService().isCompositeIndexPresent() && context.parsedPostFilter() == null;
}

/**
* Gets StarTreeQueryContext from the search context and source builder.
* Returns null if the query and aggregation cannot be supported.
*/
public static StarTreeQueryContext getStarTreeQueryContext(SearchContext context, SearchSourceBuilder source) throws IOException {
sandeshkr419 marked this conversation as resolved.
Show resolved Hide resolved
// Current implementation assumes only single star-tree is supported
CompositeDataCubeFieldType compositeMappedFieldType = (CompositeDataCubeFieldType) context.mapperService()
.getCompositeFieldTypes()
.iterator()
.next();
CompositeIndexFieldInfo starTree = new CompositeIndexFieldInfo(
compositeMappedFieldType.name(),
compositeMappedFieldType.getCompositeIndexType()
);

for (AggregatorFactory aggregatorFactory : context.aggregations().factories().getFactories()) {
MetricStat metricStat = validateStarTreeMetricSupport(compositeMappedFieldType, aggregatorFactory);
if (metricStat == null) {
return null;
}
}

// need to cache star tree values only for multiple aggregations
boolean cacheStarTreeValues = context.aggregations().factories().getFactories().length > 1;
int cacheSize = cacheStarTreeValues ? context.indexShard().segments(false).size() : -1;

return StarTreeQueryHelper.tryCreateStarTreeQueryContext(starTree, compositeMappedFieldType, source.query(), cacheSize);
}

/**
* Uses query builder and composite index info to form star-tree query context
*/
private static StarTreeQueryContext tryCreateStarTreeQueryContext(
CompositeIndexFieldInfo compositeIndexFieldInfo,
CompositeDataCubeFieldType compositeFieldType,
QueryBuilder queryBuilder,
int cacheStarTreeValuesSize
) {
Map<String, Long> queryMap;
if (queryBuilder == null || queryBuilder instanceof MatchAllQueryBuilder) {
queryMap = null;
} else if (queryBuilder instanceof TermQueryBuilder) {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

minor: Would it make sense to push this if down into getStarTreePredicates. I guess we can move it whenever we add support for more query types. That said, I think we should be a little opinionated about where the query-matching logic will live.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wanted to avoid duplicate checks actually. From getStarTreePredicates it would be difficult to classify whether the query shape is something that is supported or not and that is not feasible from just the queryMap returned from getStarTreePredicates - I mean we could do a distinction between null and empty-map but that might not be very readable - so delegating responsibility of getting queryMap as a separate utility.

List<String> supportedDimensions = compositeFieldType.getDimensions()
.stream()
.map(Dimension::getField)
.collect(Collectors.toList());
queryMap = getStarTreePredicates(queryBuilder, supportedDimensions);
if (queryMap == null) {
return null;
}
} else {
return null;
}
return new StarTreeQueryContext(compositeIndexFieldInfo, queryMap, cacheStarTreeValuesSize);
}

/**
* Parse query body to star-tree predicates
* @param queryBuilder to match star-tree supported query shape
* @return predicates to match
*/
private static Map<String, Long> getStarTreePredicates(QueryBuilder queryBuilder, List<String> supportedDimensions) {
TermQueryBuilder tq = (TermQueryBuilder) queryBuilder;
String field = tq.fieldName();
if (!supportedDimensions.contains(field)) {
return null;
}
long inputQueryVal = Long.parseLong(tq.value().toString());
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If tq.value() isn't a number, this will throw an exception, right?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It fails at a very early step itself when the original (default) query is constructed.

Validated with getting same response with star tree settings enabled & disabled both.

{
    "error": {
        "root_cause": [
            {
                "type": "query_shard_exception",
                "reason": "failed to create query: For input string: \"abc\"",
                "index": "logs-241998",
                "index_uuid": "-hO2g2IDRoO1bFY2bN3MCQ"
            }
        ],
        "type": "search_phase_execution_exception",
        "reason": "all shards failed",
        "phase": "query",
        "grouped": true,
        "failed_shards": [
            {
                "shard": 0,
                "index": "logs-241998",
                "node": "SVlqkJb5R266kvYaGFx5lQ",
                "reason": {
                    "type": "query_shard_exception",
                    "reason": "failed to create query: For input string: \"abc\"",
                    "index": "logs-241998",
                    "index_uuid": "-hO2g2IDRoO1bFY2bN3MCQ",
                    "caused_by": {
                        "type": "number_format_exception",
                        "reason": "For input string: \"abc\""
                    }
                }
            }
        ]
    },
    "status": 400
}

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we have a check to make sure we're querying a numeric field? If the field has a keyword type, then a string will parse successfully.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess you kind of have a check on the indexing side of things, since we currently only support dimensions defined over numeric fields, I think. So, the field wouldn't be present in supportedDimensions if it's not numeric. Eventually, we will want to support non-string values, though, right?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right now numeric fields are only supported. Once keyword fields are introduced, will have to take care of it then I guess.
@bharath-techie - how are keyword fields ingested - their values will be ingested be converting to some numeric fields only, right?

Copy link
Contributor

@bharath-techie bharath-techie Oct 9, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hey keyword support will be available quite soon as code changes are complete and tests are being added.

No the user input will be a string, first you need to convert to bytesRef, then you need to look up the ord for the bytesRef in the associated dimension starTreeDocValues field. [ SortedSetDocValues field for keyword fields compared to SortedNumericDocValues of numeric fields ]

Something like below:

BytesRef bytesRef = getBytesRefForField(val, keyword.getKey(), context);
long ord = keywordMap.get(keyword.getKey()).lookupTerm(bytesRef);
long fieldOrd = field.lookupTerm(bytesRef);
...

...
if (ord > 0) {
  Predicate<Long> predicate = dimVal -> dimVal == ord;
  predicates.add(predicate);
}
}

BytesRef getBytesRefForField(final String term, final String field, final LeafReaderContext context) {
    if (field.contains("ip")) {
        return new BytesRef(InetAddressPoint.encode(InetAddresses.forString(term)));
    }
    return BytesRef.deepCopyOf(BytesRefs.toBytesRef(term));
}
                   

Poc code reference


// Create a map with the field and the value
Map<String, Long> predicateMap = new HashMap<>();
predicateMap.put(field, inputQueryVal);
return predicateMap;
}

private static MetricStat validateStarTreeMetricSupport(
CompositeDataCubeFieldType compositeIndexFieldInfo,
AggregatorFactory aggregatorFactory
) {
if (aggregatorFactory instanceof MetricAggregatorFactory && aggregatorFactory.getSubFactories().getFactories().length == 0) {
String field;
Map<String, List<MetricStat>> supportedMetrics = compositeIndexFieldInfo.getMetrics()
.stream()
.collect(Collectors.toMap(Metric::getField, Metric::getMetrics));

MetricStat metricStat = ((MetricAggregatorFactory) aggregatorFactory).getMetricStat();
field = ((MetricAggregatorFactory) aggregatorFactory).getField();

if (supportedMetrics.containsKey(field) && supportedMetrics.get(field).contains(metricStat)) {
return metricStat;
}
}
return null;
}

public static CompositeIndexFieldInfo getSupportedStarTree(SearchContext context) {
StarTreeQueryContext starTreeQueryContext = context.getStarTreeQueryContext();
return (starTreeQueryContext != null) ? starTreeQueryContext.getStarTree() : null;
}

public static StarTreeValues getStarTreeValues(LeafReaderContext context, CompositeIndexFieldInfo starTree) throws IOException {
SegmentReader reader = Lucene.segmentReader(context.reader());
if (!(reader.getDocValuesReader() instanceof CompositeIndexReader)) {
return null;
}
CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader();
return (StarTreeValues) starTreeDocValuesReader.getCompositeIndexValues(starTree);
}

/**
* Get the star-tree leaf collector
* This collector computes the aggregation prematurely and invokes an early termination collector
*/
public static LeafBucketCollector getStarTreeLeafCollector(
SearchContext context,
ValuesSource.Numeric valuesSource,
LeafReaderContext ctx,
LeafBucketCollector sub,
CompositeIndexFieldInfo starTree,
String metric,
Consumer<Long> valueConsumer,
Runnable finalConsumer
) throws IOException {
StarTreeValues starTreeValues = getStarTreeValues(ctx, starTree);
assert starTreeValues != null;
String fieldName = ((ValuesSource.Numeric.FieldData) valuesSource).getIndexFieldName();
String metricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues(starTree.getField(), fieldName, metric);

assert starTreeValues != null;
SortedNumericStarTreeValuesIterator valuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues.getMetricValuesIterator(
metricName
);
// Obtain a FixedBitSet of matched star tree document IDs
FixedBitSet filteredValues = getStarTreeFilteredValues(context, ctx, starTreeValues);
assert filteredValues != null;

int numBits = filteredValues.length(); // Get the number of the filtered values (matching docs)
if (numBits > 0) {
// Iterate over the filtered values
for (int bit = filteredValues.nextSetBit(0); bit != DocIdSetIterator.NO_MORE_DOCS; bit = (bit + 1 < numBits)
? filteredValues.nextSetBit(bit + 1)
: DocIdSetIterator.NO_MORE_DOCS) {
// Advance to the entryId in the valuesIterator
if (valuesIterator.advanceExact(bit) == false) {
continue; // Skip if no more entries
}

// Iterate over the values for the current entryId
for (int i = 0, count = valuesIterator.valuesCount(); i < count; i++) {
long value = valuesIterator.nextValue();
valueConsumer.accept(value); // Apply the consumer operation (e.g., max, sum)
}
}
}

// Call the final consumer after processing all entries
finalConsumer.run();

// Return a LeafBucketCollector that terminates collection
return new LeafBucketCollectorBase(sub, valuesSource.doubleValues(ctx)) {
@Override
public void collect(int doc, long bucket) {
throw new CollectionTerminatedException();
}
};
}

/**
* Get the filtered values for the star-tree query
* Cache the results in case of multiple aggregations (if cache is initialized)
* @return FixedBitSet of matched document IDs
*/
public static FixedBitSet getStarTreeFilteredValues(SearchContext context, LeafReaderContext ctx, StarTreeValues starTreeValues)
throws IOException {
Comment on lines +232 to +233
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This method might get called from multiple threads if we use concurrent segment search. I suspect that you would get a ConcurrentModificationException with enough segments.

You could avoid that by initializing an array of FixedBitSet (of size context.searcher().getLeafContexts().size() and using ctx.ord as the key.

Since Lucene 10 introduces intra-segment concurrency, I think we'll eventually need a way to guarantee that only one thread per segment initializes this value.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Refactored the value cache map to startree context. Defining it as volatile should ensure thread visibility and initializing it with ConcurrentHashMap to avoid ConcurrentModificationException.

When upgrading to Lucene 10, I think that will warrant some work to resolve intra-segment concurrency. I think that calls out for a broader discussion in general. Most likely, only thread per [group of threads operating on same shard] will have to compute the aggregation result while the remaining will just do nothing in this case since the star-tree index presently won't be able to split as the default index in its current implementation.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Refactored the value cache map to startree context. Defining it as volatile should ensure thread visibility and initializing it with ConcurrentHashMap to avoid ConcurrentModificationException.

Ideally, we should be able to avoid any atomics (including ConcurrentHashMap). That's why I suggested preallocating an array whose size corresponds to the number of segments. If only one thread operates on each segment (which is likely also what we would want once we have intra-segment concurrency), then you can safely let each thread overwrite a null value in the array without worrying about ConcurrentModificationException, since no two threads will try touching the same value.

FixedBitSet result = context.getStarTreeQueryContext().getStarTreeValues(ctx);
if (result == null) {
StarTreeFilter filter = new StarTreeFilter(starTreeValues, context.getStarTreeQueryContext().getQueryMap());
result = filter.getStarTreeResult();
context.getStarTreeQueryContext().setStarTreeValues(ctx, result);
}
return result;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -29,4 +29,12 @@ public SortedNumericStarTreeValuesIterator(DocIdSetIterator docIdSetIterator) {
public long nextValue() throws IOException {
return ((SortedNumericDocValues) docIdSetIterator).nextValue();
}

public int valuesCount() throws IOException {
return ((SortedNumericDocValues) docIdSetIterator).docValueCount();
}

public boolean advanceExact(int target) throws IOException {
return ((SortedNumericDocValues) docIdSetIterator).advanceExact(target);
}
}
26 changes: 21 additions & 5 deletions server/src/main/java/org/opensearch/search/SearchService.java
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@
import org.opensearch.index.IndexNotFoundException;
import org.opensearch.index.IndexService;
import org.opensearch.index.IndexSettings;
import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper;
import org.opensearch.index.engine.Engine;
import org.opensearch.index.mapper.DerivedFieldResolver;
import org.opensearch.index.mapper.DerivedFieldResolverFactory;
Expand Down Expand Up @@ -137,6 +138,7 @@
import org.opensearch.search.sort.SortAndFormats;
import org.opensearch.search.sort.SortBuilder;
import org.opensearch.search.sort.SortOrder;
import org.opensearch.search.startree.StarTreeQueryContext;
import org.opensearch.search.suggest.Suggest;
import org.opensearch.search.suggest.completion.CompletionSuggestion;
import org.opensearch.tasks.TaskResourceTrackingService;
Expand Down Expand Up @@ -164,6 +166,7 @@
import static org.opensearch.common.unit.TimeValue.timeValueHours;
import static org.opensearch.common.unit.TimeValue.timeValueMillis;
import static org.opensearch.common.unit.TimeValue.timeValueMinutes;
import static org.opensearch.search.internal.SearchContext.TRACK_TOTAL_HITS_DISABLED;

/**
* The main search service
Expand Down Expand Up @@ -1357,6 +1360,7 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc
context.evaluateRequestShouldUseConcurrentSearch();
return;
}

SearchShardTarget shardTarget = context.shardTarget();
QueryShardContext queryShardContext = context.getQueryShardContext();
context.from(source.from());
Expand All @@ -1370,7 +1374,7 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc
InnerHitContextBuilder.extractInnerHits(source.postFilter(), innerHitBuilders);
context.parsedPostFilter(queryShardContext.toQuery(source.postFilter()));
}
if (innerHitBuilders.size() > 0) {
if (!innerHitBuilders.isEmpty()) {
for (Map.Entry<String, InnerHitContextBuilder> entry : innerHitBuilders.entrySet()) {
try {
entry.getValue().build(context, context.innerHits());
Expand All @@ -1382,9 +1386,7 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc
if (source.sorts() != null) {
try {
Optional<SortAndFormats> optionalSort = SortBuilder.buildSort(source.sorts(), context.getQueryShardContext());
if (optionalSort.isPresent()) {
context.sort(optionalSort.get());
}
optionalSort.ifPresent(context::sort);
} catch (IOException e) {
throw new SearchException(shardTarget, "failed to create sort elements", e);
}
Expand Down Expand Up @@ -1539,6 +1541,20 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc
if (source.profile()) {
context.setProfilers(new Profilers(context.searcher(), context.shouldUseConcurrentSearch()));
}

if (this.indicesService.getCompositeIndexSettings() != null
&& this.indicesService.getCompositeIndexSettings().isStarTreeIndexCreationEnabled()
&& StarTreeQueryHelper.isStarTreeSupported(context)) {
try {
StarTreeQueryContext starTreeQueryContext = StarTreeQueryHelper.getStarTreeQueryContext(context, source);
if (starTreeQueryContext != null) {
context.starTreeQueryContext(starTreeQueryContext);
logger.debug("can use star tree");
} else {
logger.debug("cannot use star tree");
}
} catch (IOException ignored) {}
}
}

/**
Expand Down Expand Up @@ -1698,7 +1714,7 @@ public static boolean canMatchSearchAfter(
&& minMax != null
&& primarySortField != null
&& primarySortField.missing() == null
&& Objects.equals(trackTotalHitsUpto, SearchContext.TRACK_TOTAL_HITS_DISABLED)) {
&& Objects.equals(trackTotalHitsUpto, TRACK_TOTAL_HITS_DISABLED)) {
final Object searchAfterPrimary = searchAfter.fields[0];
if (primarySortField.order() == SortOrder.DESC) {
if (minMax.compareMin(searchAfterPrimary) > 0) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -661,4 +661,8 @@ public PipelineTree buildPipelineTree() {
return new PipelineTree(subTrees, aggregators);
}
}

public AggregatorFactory[] getFactories() {
return factories;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -127,4 +127,8 @@ protected boolean supportsConcurrentSegmentSearch() {
public boolean evaluateChildFactories() {
return factories.allFactoriesSupportConcurrentSearch();
}

public AggregatorFactories getSubFactories() {
return factories;
}
}
Loading
Loading