diff --git a/CHANGELOG.md b/CHANGELOG.md index f86f173c64add..e4e96d222f74d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,10 +29,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Upgrade crypto kms plugin dependencies for AWS SDK v2.x. ([#18268](https://github.com/opensearch-project/OpenSearch/pull/18268)) - Add support for `matched_fields` with the unified highlighter ([#18164](https://github.com/opensearch-project/OpenSearch/issues/18164)) - [repository-s3] Add support for SSE-KMS and S3 bucket owner verification ([#18312](https://github.com/opensearch-project/OpenSearch/pull/18312)) +- Added File Cache Stats - Involves Block level as well as full file level stats ([#17538](https://github.com/opensearch-project/OpenSearch/issues/17479)) ### Changed - Create generic DocRequest to better categorize ActionRequests ([#18269](https://github.com/opensearch-project/OpenSearch/pull/18269))) - + ### Dependencies - Update Apache Lucene from 10.1.0 to 10.2.1 ([#17961](https://github.com/opensearch-project/OpenSearch/pull/17961)) - Bump `com.google.code.gson:gson` from 2.12.1 to 2.13.1 ([#17923](https://github.com/opensearch-project/OpenSearch/pull/17923), [#18266](https://github.com/opensearch-project/OpenSearch/pull/18266)) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java index 3ba885812bcfc..44e53836108de 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java @@ -51,7 +51,7 @@ import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.Store; -import org.opensearch.index.store.remote.filecache.FileCacheStats; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.plugins.ActionPlugin; @@ -193,7 +193,7 @@ public void testClusterInfoServiceCollectsInformation() { assertThat("shard size is greater than 0", size, greaterThanOrEqualTo(0L)); } - final Map nodeFileCacheStats = info.nodeFileCacheStats; + final Map nodeFileCacheStats = info.nodeFileCacheStats; assertNotNull(nodeFileCacheStats); assertThat("file cache is empty on non warm nodes", nodeFileCacheStats.size(), Matchers.equalTo(0)); @@ -227,12 +227,12 @@ public void testClusterInfoServiceCollectsFileCacheInformation() { infoService.setUpdateFrequency(TimeValue.timeValueMillis(200)); ClusterInfo info = infoService.refresh(); assertNotNull("info should not be null", info); - final Map nodeFileCacheStats = info.nodeFileCacheStats; + final Map nodeFileCacheStats = info.nodeFileCacheStats; assertNotNull(nodeFileCacheStats); assertThat("file cache is enabled on both warm nodes", nodeFileCacheStats.size(), Matchers.equalTo(2)); - for (FileCacheStats fileCacheStats : nodeFileCacheStats.values()) { - assertThat("file cache is non empty", fileCacheStats.getTotal().getBytes(), greaterThan(0L)); + for (AggregateFileCacheStats aggregateFileCacheStats : nodeFileCacheStats.values()) { + assertThat("file cache is non empty", aggregateFileCacheStats.getTotal().getBytes(), greaterThan(0L)); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java index f258822ddd426..6674bbb5afd24 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java @@ -12,6 +12,9 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; +import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexResponse; @@ -27,6 +30,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.CompositeDirectory; import org.opensearch.index.store.remote.file.CleanerDaemonThreadLeakFilter; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.utils.FileTypeUtils; import org.opensearch.indices.IndicesService; @@ -36,7 +40,9 @@ import java.util.Arrays; import java.util.HashSet; +import java.util.Objects; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -49,6 +55,7 @@ public class WritableWarmIT extends RemoteStoreBaseIntegTestCase { protected static final String INDEX_NAME = "test-idx-1"; + protected static final String INDEX_NAME_2 = "test-idx-2"; protected static final int NUM_DOCS_IN_BULK = 1000; /* @@ -172,4 +179,82 @@ public void testWritableWarmBasic() throws Exception { assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); fileCache.prune(); } + + public void testFullFileAndFileCacheStats() throws ExecutionException, InterruptedException { + + InternalTestCluster internalTestCluster = internalCluster(); + internalTestCluster.startClusterManagerOnlyNode(); + internalTestCluster.startDataAndWarmNodes(1); + + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey(), IndexModule.DataLocalityType.PARTIAL.name()) + .build(); + + assertAcked(client().admin().indices().prepareCreate(INDEX_NAME_2).setSettings(settings).get()); + + // Verify from the cluster settings if the data locality is partial + GetIndexResponse getIndexResponse = client().admin() + .indices() + .getIndex(new GetIndexRequest().indices(INDEX_NAME_2).includeDefaults(true)) + .get(); + + Settings indexSettings = getIndexResponse.settings().get(INDEX_NAME_2); + assertEquals(IndexModule.DataLocalityType.PARTIAL.name(), indexSettings.get(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey())); + + // Ingesting docs again before force merge + indexBulk(INDEX_NAME_2, NUM_DOCS_IN_BULK); + flushAndRefresh(INDEX_NAME_2); + + // ensuring cluster is green + ensureGreen(); + + SearchResponse searchResponse = client().prepareSearch(INDEX_NAME_2).setQuery(QueryBuilders.matchAllQuery()).get(); + // Asserting that search returns same number of docs as ingested + assertHitCount(searchResponse, NUM_DOCS_IN_BULK); + + // Ingesting docs again before force merge + indexBulk(INDEX_NAME_2, NUM_DOCS_IN_BULK); + flushAndRefresh(INDEX_NAME_2); + + FileCache fileCache = internalTestCluster.getDataNodeInstance(Node.class).fileCache(); + + // TODO: Make these validation more robust, when SwitchableIndexInput is implemented. + + NodesStatsResponse nodesStatsResponse = client().admin().cluster().nodesStats(new NodesStatsRequest().all()).actionGet(); + + AggregateFileCacheStats fileCacheStats = nodesStatsResponse.getNodes() + .stream() + .filter(n -> n.getNode().isDataNode()) + .toList() + .getFirst() + .getFileCacheStats(); + + if (Objects.isNull(fileCacheStats)) { + fail("File Cache Stats should not be null"); + } + + // Deleting the index (so that ref count drops to zero for all the files) and then pruning the cache to clear it to avoid any file + // leaks + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME_2)).get()); + fileCache.prune(); + + NodesStatsResponse response = client().admin().cluster().nodesStats(new NodesStatsRequest().all()).actionGet(); + int nonEmptyFileCacheNodes = 0; + for (NodeStats stats : response.getNodes()) { + AggregateFileCacheStats fcStats = stats.getFileCacheStats(); + if (Objects.isNull(fcStats) == false) { + if (isFileCacheEmpty(fcStats) == false) { + nonEmptyFileCacheNodes++; + } + } + } + assertEquals(0, nonEmptyFileCacheNodes); + + } + + private boolean isFileCacheEmpty(AggregateFileCacheStats stats) { + return stats.getUsed().getBytes() == 0L && stats.getActive().getBytes() == 0L; + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 5cc4d0706888e..6001e5636014b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -44,7 +44,7 @@ import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.remote.file.CleanerDaemonThreadLeakFilter; -import org.opensearch.index.store.remote.filecache.FileCacheStats; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats; import org.opensearch.monitor.fs.FsInfo; import org.opensearch.node.Node; import org.opensearch.repositories.fs.FsRepository; @@ -711,7 +711,7 @@ private void assertIndexDirectoryDoesNotExist(String... indexNames) { private void assertAllNodesFileCacheEmpty() { NodesStatsResponse response = client().admin().cluster().nodesStats(new NodesStatsRequest().all()).actionGet(); for (NodeStats stats : response.getNodes()) { - FileCacheStats fcstats = stats.getFileCacheStats(); + AggregateFileCacheStats fcstats = stats.getFileCacheStats(); if (fcstats != null) { assertTrue(isFileCacheEmpty(fcstats)); } @@ -722,7 +722,7 @@ private void assertNodesFileCacheNonEmpty(int numNodes) { NodesStatsResponse response = client().admin().cluster().nodesStats(new NodesStatsRequest().all()).actionGet(); int nonEmptyFileCacheNodes = 0; for (NodeStats stats : response.getNodes()) { - FileCacheStats fcStats = stats.getFileCacheStats(); + AggregateFileCacheStats fcStats = stats.getFileCacheStats(); if (stats.getNode().isWarmNode()) { if (!isFileCacheEmpty(fcStats)) { nonEmptyFileCacheNodes++; @@ -735,7 +735,7 @@ private void assertNodesFileCacheNonEmpty(int numNodes) { assertEquals(numNodes, nonEmptyFileCacheNodes); } - private boolean isFileCacheEmpty(FileCacheStats stats) { + private boolean isFileCacheEmpty(AggregateFileCacheStats stats) { return stats.getUsed().getBytes() == 0L && stats.getActive().getBytes() == 0L; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index eb79e3403a25c..e8f0ea56cff8d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -50,7 +50,7 @@ import org.opensearch.index.SegmentReplicationRejectionStats; import org.opensearch.index.stats.IndexingPressureStats; import org.opensearch.index.stats.ShardIndexingPressureStats; -import org.opensearch.index.store.remote.filecache.FileCacheStats; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats; import org.opensearch.indices.NodeIndicesStats; import org.opensearch.ingest.IngestStats; import org.opensearch.monitor.fs.FsInfo; @@ -143,7 +143,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { private WeightedRoutingStats weightedRoutingStats; @Nullable - private FileCacheStats fileCacheStats; + private AggregateFileCacheStats fileCacheStats; @Nullable private TaskCancellationStats taskCancellationStats; @@ -208,7 +208,7 @@ public NodeStats(StreamInput in) throws IOException { weightedRoutingStats = null; } if (in.getVersion().onOrAfter(Version.V_2_7_0)) { - fileCacheStats = in.readOptionalWriteable(FileCacheStats::new); + fileCacheStats = in.readOptionalWriteable(AggregateFileCacheStats::new); } else { fileCacheStats = null; } @@ -277,7 +277,7 @@ public NodeStats( @Nullable SearchBackpressureStats searchBackpressureStats, @Nullable ClusterManagerThrottlingStats clusterManagerThrottlingStats, @Nullable WeightedRoutingStats weightedRoutingStats, - @Nullable FileCacheStats fileCacheStats, + @Nullable AggregateFileCacheStats fileCacheStats, @Nullable TaskCancellationStats taskCancellationStats, @Nullable SearchPipelineStats searchPipelineStats, @Nullable SegmentReplicationRejectionStats segmentReplicationRejectionStats, @@ -444,7 +444,7 @@ public WeightedRoutingStats getWeightedRoutingStats() { return weightedRoutingStats; } - public FileCacheStats getFileCacheStats() { + public AggregateFileCacheStats getFileCacheStats() { return fileCacheStats; } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java index 7216c447acc3e..5d286d1af4a62 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java @@ -43,7 +43,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.store.remote.filecache.FileCacheStats; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats; import java.io.IOException; import java.util.Collections; @@ -68,7 +68,7 @@ public class ClusterInfo implements ToXContentFragment, Writeable { public static final ClusterInfo EMPTY = new ClusterInfo(); final Map routingToDataPath; final Map reservedSpace; - final Map nodeFileCacheStats; + final Map nodeFileCacheStats; private long avgTotalBytes; private long avgFreeByte; @@ -92,7 +92,7 @@ public ClusterInfo( final Map shardSizes, final Map routingToDataPath, final Map reservedSpace, - final Map nodeFileCacheStats + final Map nodeFileCacheStats ) { this.leastAvailableSpaceUsage = leastAvailableSpaceUsage; this.shardSizes = shardSizes; @@ -117,7 +117,7 @@ public ClusterInfo(StreamInput in) throws IOException { this.routingToDataPath = Collections.unmodifiableMap(routingMap); this.reservedSpace = Collections.unmodifiableMap(reservedSpaceMap); if (in.getVersion().onOrAfter(Version.V_2_10_0)) { - this.nodeFileCacheStats = in.readMap(StreamInput::readString, FileCacheStats::new); + this.nodeFileCacheStats = in.readMap(StreamInput::readString, AggregateFileCacheStats::new); } else { this.nodeFileCacheStats = Map.of(); } @@ -242,7 +242,7 @@ public Map getNodeMostAvailableDiskUsages() { /** * Returns a node id to file cache stats mapping for the nodes that have search roles assigned to it. */ - public Map getNodeFileCacheStats() { + public Map getNodeFileCacheStats() { return Collections.unmodifiableMap(this.nodeFileCacheStats); } diff --git a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java index 804325dc1f565..2024cc0504891 100644 --- a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java @@ -58,7 +58,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.index.store.StoreStats; -import org.opensearch.index.store.remote.filecache.FileCacheStats; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats; import org.opensearch.monitor.fs.FsInfo; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ReceiveTimeoutTransportException; @@ -112,7 +112,7 @@ public class InternalClusterInfoService implements ClusterInfoService, ClusterSt private volatile Map leastAvailableSpaceUsages; private volatile Map mostAvailableSpaceUsages; - private volatile Map nodeFileCacheStats; + private volatile Map nodeFileCacheStats; private volatile IndicesStatsSummary indicesStatsSummary; // null if this node is not currently the cluster-manager private final AtomicReference refreshAndRescheduleRunnable = new AtomicReference<>(); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/WarmDiskThresholdDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/WarmDiskThresholdDecider.java index c5db09fcbd608..5d1b970c8a5b9 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/WarmDiskThresholdDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/WarmDiskThresholdDecider.java @@ -43,8 +43,8 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats; import org.opensearch.index.store.remote.filecache.FileCacheSettings; -import org.opensearch.index.store.remote.filecache.FileCacheStats; import java.util.List; import java.util.stream.Collectors; @@ -280,7 +280,7 @@ private long calculateTotalAddressableSpace(RoutingNode node, RoutingAllocation ClusterInfo clusterInfo = allocation.clusterInfo(); // TODO: Change the default value to 5 instead of 0 final double dataToFileCacheSizeRatio = fileCacheSettings.getRemoteDataRatio(); - final FileCacheStats fileCacheStats = clusterInfo.getNodeFileCacheStats().getOrDefault(node.nodeId(), null); + final AggregateFileCacheStats fileCacheStats = clusterInfo.getNodeFileCacheStats().getOrDefault(node.nodeId(), null); final long nodeCacheSize = fileCacheStats != null ? fileCacheStats.getTotal().getBytes() : 0; return (long) dataToFileCacheSizeRatio * nodeCacheSize; } @@ -309,7 +309,7 @@ private Decision earlyTerminate(RoutingNode node, RoutingAllocation allocation) } // Fail open if there are no file cache stats available - final FileCacheStats fileCacheStats = clusterInfo.getNodeFileCacheStats().getOrDefault(node.nodeId(), null); + final AggregateFileCacheStats fileCacheStats = clusterInfo.getNodeFileCacheStats().getOrDefault(node.nodeId(), null); if (fileCacheStats == null) { if (logger.isTraceEnabled()) { logger.trace("unable to get file cache stats for node [{}], allowing allocation", node.nodeId()); diff --git a/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java index 7434283ff6f41..1978a2bee4cf0 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java @@ -111,7 +111,7 @@ private void resetHelper(Node current) { @Override public long count() { - // Include this here so caches don't have to create an entire CacheStats object to run count(). + // Include this here so caches don't have to create an entire AggregateRefCountedCacheStats object to run count(). return statsRoot.getEntries(); } diff --git a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java index db23e7b877596..68472562fa976 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java @@ -20,7 +20,7 @@ import java.util.Objects; /** - * An immutable snapshot of CacheStats. + * An immutable snapshot of AggregateRefCountedCacheStats. * * @opensearch.experimental */ diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/AggregateFileCacheStats.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/AggregateFileCacheStats.java new file mode 100644 index 0000000000000..237724ed7a6a8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/AggregateFileCacheStats.java @@ -0,0 +1,177 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store.remote.filecache; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.EnumSet; + +/** + * Statistics for the file cache system that tracks memory usage and performance metrics. + * {@link FileCache} internally uses a {@link org.opensearch.index.store.remote.utils.cache.SegmentedCache} + * to manage cached file data in memory segments. + * This class aggregates statistics across all cache segments including: + * - Memory usage (total, active, used) + * - Cache performance (hits, misses, evictions) + * - Utilization percentages + * The statistics are exposed via {@link org.opensearch.action.admin.cluster.node.stats.NodeStats} + * to provide visibility into cache behavior and performance. + * + * @opensearch.api + */ +@ExperimentalApi +public class AggregateFileCacheStats implements Writeable, ToXContentFragment { + + private final long timestamp; + private final FileCacheStats overallFileCacheStats; + private final FileCacheStats fullFileCacheStats; + private final FileCacheStats blockFileCacheStats; + + public AggregateFileCacheStats( + final long timestamp, + final FileCacheStats overallFileCacheStats, + final FileCacheStats fullFileCacheStats, + final FileCacheStats blockFileCacheStats + ) { + this.timestamp = timestamp; + this.overallFileCacheStats = overallFileCacheStats; + this.fullFileCacheStats = fullFileCacheStats; + this.blockFileCacheStats = blockFileCacheStats; + } + + public AggregateFileCacheStats(final StreamInput in) throws IOException { + this.timestamp = in.readLong(); + this.overallFileCacheStats = new FileCacheStats(in); + this.fullFileCacheStats = new FileCacheStats(in); + this.blockFileCacheStats = new FileCacheStats(in); + } + + public static short calculatePercentage(long used, long max) { + return max <= 0 ? 0 : (short) (Math.round((100d * used) / max)); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeLong(timestamp); + overallFileCacheStats.writeTo(out); + fullFileCacheStats.writeTo(out); + blockFileCacheStats.writeTo(out); + } + + public long getTimestamp() { + return timestamp; + } + + public ByteSizeValue getTotal() { + return new ByteSizeValue(overallFileCacheStats.getTotal()); + } + + public ByteSizeValue getActive() { + return new ByteSizeValue(overallFileCacheStats.getActive()); + } + + public short getActivePercent() { + return calculatePercentage(overallFileCacheStats.getActive(), overallFileCacheStats.getUsed()); + } + + public ByteSizeValue getUsed() { + return new ByteSizeValue(overallFileCacheStats.getUsed()); + } + + public short getUsedPercent() { + return calculatePercentage(getUsed().getBytes(), getTotal().getBytes()); + } + + public ByteSizeValue getEvicted() { + return new ByteSizeValue(overallFileCacheStats.getEvicted()); + } + + public long getCacheHits() { + return overallFileCacheStats.getCacheHits(); + } + + public long getCacheMisses() { + return overallFileCacheStats.getCacheMisses(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.AGGREGATE_FILE_CACHE); + builder.field(Fields.TIMESTAMP, getTimestamp()); + builder.humanReadableField(Fields.ACTIVE_IN_BYTES, Fields.ACTIVE, getActive()); + builder.humanReadableField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, getTotal()); + builder.humanReadableField(Fields.USED_IN_BYTES, Fields.USED, getUsed()); + builder.humanReadableField(Fields.EVICTIONS_IN_BYTES, Fields.EVICTIONS, getEvicted()); + builder.field(Fields.ACTIVE_PERCENT, getActivePercent()); + builder.field(Fields.USED_PERCENT, getUsedPercent()); + builder.field(Fields.HIT_COUNT, getCacheHits()); + builder.field(Fields.MISS_COUNT, getCacheMisses()); + overallFileCacheStats.toXContent(builder, params); + fullFileCacheStats.toXContent(builder, params); + blockFileCacheStats.toXContent(builder, params); + + builder.endObject(); + return builder; + } + + static final class Fields { + static final String AGGREGATE_FILE_CACHE = "aggregate_file_cache"; + static final String TIMESTAMP = "timestamp"; + static final String ACTIVE = "active"; + static final String ACTIVE_IN_BYTES = "active_in_bytes"; + static final String USED = "used"; + static final String USED_IN_BYTES = "used_in_bytes"; + static final String EVICTIONS = "evictions"; + static final String EVICTIONS_IN_BYTES = "evictions_in_bytes"; + static final String TOTAL = "total"; + static final String TOTAL_IN_BYTES = "total_in_bytes"; + + static final String ACTIVE_PERCENT = "active_percent"; + static final String USED_PERCENT = "used_percent"; + + static final String HIT_COUNT = "hit_count"; + static final String MISS_COUNT = "miss_count"; + } + + /** + * File Cache Stats Type. + */ + @ExperimentalApi + public enum FileCacheStatsType { + FULL_FILE_STATS("full_file_stats"), + BLOCK_FILE_STATS("block_file_stats"), + OVER_ALL_STATS("over_all_stats"); + + private final String fileCacheStatsType; + + FileCacheStatsType(String fileCacheStatsType) { + this.fileCacheStatsType = fileCacheStatsType; + } + + @Override + public String toString() { + return fileCacheStatsType; + } + + public static FileCacheStatsType fromString(String fileCacheStatsType) { + return EnumSet.allOf(FileCacheStatsType.class) + .stream() + .filter(f -> f.fileCacheStatsType.equals(fileCacheStatsType)) + .findFirst() + .orElseThrow(() -> new IllegalArgumentException("Found invalid fileCacheStatsType.")); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java index 4ddde7f850ba3..c43cc7d1f3b18 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java @@ -14,10 +14,12 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.breaker.CircuitBreakingException; -import org.opensearch.index.store.remote.utils.cache.CacheUsage; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats.FileCacheStatsType; import org.opensearch.index.store.remote.utils.cache.RefCountedCache; import org.opensearch.index.store.remote.utils.cache.SegmentedCache; -import org.opensearch.index.store.remote.utils.cache.stats.CacheStats; +import org.opensearch.index.store.remote.utils.cache.stats.AggregateRefCountedCacheStats; +import org.opensearch.index.store.remote.utils.cache.stats.IRefCountedCacheStats; +import org.opensearch.index.store.remote.utils.cache.stats.RefCountedCacheStats; import java.io.IOException; import java.io.UncheckedIOException; @@ -138,20 +140,27 @@ public long prune(Predicate keyPredicate) { } @Override - public CacheUsage usage() { - return theCache.usage(); + public long usage() { + long l = theCache.usage(); + return l; } @Override - public CacheStats stats() { + public long activeUsage() { + long l = theCache.activeUsage(); + return l; + } + + @Override + public IRefCountedCacheStats stats() { return theCache.stats(); } // To be used only for debugging purposes public void logCurrentState() { logger.trace("CURRENT STATE OF FILE CACHE \n"); - CacheUsage cacheUsage = theCache.usage(); - logger.trace("Total Usage: " + cacheUsage.usage() + " , Active Usage: " + cacheUsage.activeUsage()); + long cacheUsage = theCache.usage(); + logger.trace("Total Usage: " + cacheUsage + " , Active Usage: " + theCache.activeUsage()); theCache.logCurrentState(); } @@ -208,19 +217,43 @@ public void restoreFromDirectory(List fileCacheDataPaths) { } /** - * Returns the current {@link FileCacheStats} + * Returns the current {@link AggregateFileCacheStats} */ - public FileCacheStats fileCacheStats() { - CacheStats stats = stats(); - CacheUsage usage = usage(); - return new FileCacheStats( + public AggregateFileCacheStats fileCacheStats() { + final AggregateRefCountedCacheStats stats = (AggregateRefCountedCacheStats) stats(); + + final RefCountedCacheStats overallCacheStats = stats.getOverallCacheStats(); + final RefCountedCacheStats fullFileCacheStats = stats.getFullFileCacheStats(); + final RefCountedCacheStats blockFileCacheStats = stats.getBlockFileCacheStats(); + return new AggregateFileCacheStats( System.currentTimeMillis(), - usage.activeUsage(), - capacity(), - usage.usage(), - stats.evictionWeight(), - stats.hitCount(), - stats.missCount() + new FileCacheStats( + overallCacheStats.activeUsage(), + capacity(), + overallCacheStats.usage(), + overallCacheStats.evictionWeight(), + overallCacheStats.hitCount(), + overallCacheStats.missCount(), + FileCacheStatsType.OVER_ALL_STATS + ), + new FileCacheStats( + fullFileCacheStats.activeUsage(), + capacity(), + fullFileCacheStats.usage(), + fullFileCacheStats.evictionWeight(), + fullFileCacheStats.hitCount(), + fullFileCacheStats.missCount(), + FileCacheStatsType.FULL_FILE_STATS + ), + new FileCacheStats( + blockFileCacheStats.activeUsage(), + capacity(), + blockFileCacheStats.usage(), + blockFileCacheStats.evictionWeight(), + blockFileCacheStats.hitCount(), + blockFileCacheStats.missCount(), + FileCacheStatsType.BLOCK_FILE_STATS + ) ); } diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheStats.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheStats.java index 070fd663896a3..ee3410914fd92 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheStats.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheStats.java @@ -8,52 +8,61 @@ package org.opensearch.index.store.remote.filecache; -import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats.FileCacheStatsType; import java.io.IOException; +import static org.opensearch.index.store.remote.filecache.AggregateFileCacheStats.calculatePercentage; + /** - * Statistics on file cache + * Statistics for the file cache system that tracks memory usage and performance metrics. + * Aggregates statistics across all cache segments including: + * - Memory usage: active and used bytes. + * - Cache performance: hit counts and eviction counts. + * - Utilization: active percentage of total used memory. + * The statistics are exposed as part of {@link AggregateFileCacheStats} and via {@link org.opensearch.action.admin.cluster.node.stats.NodeStats} + * to provide visibility into cache behavior and performance. * * @opensearch.api */ -@PublicApi(since = "2.7.0") +@ExperimentalApi public class FileCacheStats implements Writeable, ToXContentFragment { - private final long timestamp; private final long active; private final long total; private final long used; private final long evicted; private final long hits; private final long misses; + private final FileCacheStatsType statsType; public FileCacheStats( - final long timestamp, final long active, - final long total, + long total, final long used, final long evicted, final long hits, - final long misses + long misses, + FileCacheStatsType statsType ) { - this.timestamp = timestamp; this.active = active; this.total = total; this.used = used; this.evicted = evicted; this.hits = hits; this.misses = misses; + this.statsType = statsType; } public FileCacheStats(final StreamInput in) throws IOException { - this.timestamp = in.readLong(); + this.statsType = FileCacheStatsType.fromString(in.readString()); this.active = in.readLong(); this.total = in.readLong(); this.used = in.readLong(); @@ -62,13 +71,9 @@ public FileCacheStats(final StreamInput in) throws IOException { this.misses = in.readLong(); } - public static short calculatePercentage(long used, long max) { - return max <= 0 ? 0 : (short) (Math.round((100d * used) / max)); - } - @Override public void writeTo(final StreamOutput out) throws IOException { - out.writeLong(timestamp); + out.writeString(statsType.toString()); out.writeLong(active); out.writeLong(total); out.writeLong(used); @@ -77,32 +82,32 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeLong(misses); } - public long getTimestamp() { - return timestamp; + public long getActive() { + return active; } - public ByteSizeValue getTotal() { - return new ByteSizeValue(total); + public long getUsed() { + return used; } - public ByteSizeValue getActive() { - return new ByteSizeValue(active); + public long getEvicted() { + return evicted; + } + + public long getHits() { + return hits; } public short getActivePercent() { return calculatePercentage(active, used); } - public ByteSizeValue getUsed() { - return new ByteSizeValue(used); + public long getTotal() { + return total; } public short getUsedPercent() { - return calculatePercentage(getUsed().getBytes(), total); - } - - public ByteSizeValue getEvicted() { - return new ByteSizeValue(evicted); + return calculatePercentage(getUsed(), total); } public long getCacheHits() { @@ -113,38 +118,30 @@ public long getCacheMisses() { return misses; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields.FILE_CACHE); - builder.field(Fields.TIMESTAMP, getTimestamp()); - builder.humanReadableField(Fields.ACTIVE_IN_BYTES, Fields.ACTIVE, getActive()); - builder.humanReadableField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, getTotal()); - builder.humanReadableField(Fields.USED_IN_BYTES, Fields.USED, getUsed()); - builder.humanReadableField(Fields.EVICTIONS_IN_BYTES, Fields.EVICTIONS, getEvicted()); - builder.field(Fields.ACTIVE_PERCENT, getActivePercent()); - builder.field(Fields.USED_PERCENT, getUsedPercent()); - builder.field(Fields.HIT_COUNT, getCacheHits()); - builder.field(Fields.MISS_COUNT, getCacheMisses()); - builder.endObject(); - return builder; - } - static final class Fields { - static final String FILE_CACHE = "file_cache"; - static final String TIMESTAMP = "timestamp"; static final String ACTIVE = "active"; static final String ACTIVE_IN_BYTES = "active_in_bytes"; static final String USED = "used"; static final String USED_IN_BYTES = "used_in_bytes"; static final String EVICTIONS = "evictions"; static final String EVICTIONS_IN_BYTES = "evictions_in_bytes"; - static final String TOTAL = "total"; - static final String TOTAL_IN_BYTES = "total_in_bytes"; - static final String ACTIVE_PERCENT = "active_percent"; - static final String USED_PERCENT = "used_percent"; - static final String HIT_COUNT = "hit_count"; - static final String MISS_COUNT = "miss_count"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(statsType.toString()); + builder.humanReadableField(FileCacheStats.Fields.ACTIVE_IN_BYTES, FileCacheStats.Fields.ACTIVE, new ByteSizeValue(getActive())); + builder.humanReadableField(FileCacheStats.Fields.USED_IN_BYTES, FileCacheStats.Fields.USED, new ByteSizeValue(getUsed())); + builder.humanReadableField( + FileCacheStats.Fields.EVICTIONS_IN_BYTES, + FileCacheStats.Fields.EVICTIONS, + new ByteSizeValue(getEvicted()) + ); + builder.field(FileCacheStats.Fields.ACTIVE_PERCENT, getActivePercent()); + builder.field(FileCacheStats.Fields.HIT_COUNT, getHits()); + builder.endObject(); + return builder; } } diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java b/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java index 2b619c26f49c0..d7f484bb26a79 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java @@ -111,13 +111,13 @@ private static FileCachedIndexInput createIndexInput(FileCache fileCache, Stream try { // This local file cache is ref counted and may not strictly enforce configured capacity. // If we find available capacity is exceeded, deny further BlobFetchRequests. - if (fileCache.capacity() < fileCache.usage().usage()) { + if (fileCache.capacity() < fileCache.usage()) { fileCache.prune(); throw new IOException( "Local file cache capacity (" + fileCache.capacity() + ") exceeded (" - + fileCache.usage().usage() + + fileCache.usage() + ") - BlobFetchRequest failed: " + request.getFilePath() ); diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/CacheUsage.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/CacheUsage.java deleted file mode 100644 index 0b5480d3ca978..0000000000000 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/CacheUsage.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.store.remote.utils.cache; - -import org.opensearch.common.annotation.PublicApi; - -/** - * Usage metrics for {@link RefCountedCache} - * - * @opensearch.internal - */ -@PublicApi(since = "2.7.0") -public class CacheUsage { - /** - * Cache usage of the system - */ - private final long usage; - - /** - * Cache usage by entries which are referenced - */ - private final long activeUsage; - - public CacheUsage(long usage, long activeUsage) { - this.usage = usage; - this.activeUsage = activeUsage; - } - - public long usage() { - return usage; - } - - public long activeUsage() { - return activeUsage; - } -} diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/LRUCache.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/LRUCache.java index 18c3ba70d7bfb..c0ce53ff51471 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/LRUCache.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/LRUCache.java @@ -14,8 +14,8 @@ import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.RemovalReason; import org.opensearch.common.cache.Weigher; -import org.opensearch.index.store.remote.utils.cache.stats.CacheStats; -import org.opensearch.index.store.remote.utils.cache.stats.DefaultStatsCounter; +import org.opensearch.index.store.remote.utils.cache.stats.FileStatsCounter; +import org.opensearch.index.store.remote.utils.cache.stats.IRefCountedCacheStats; import org.opensearch.index.store.remote.utils.cache.stats.StatsCounter; import java.util.HashMap; @@ -29,7 +29,7 @@ /** * LRU implementation of {@link RefCountedCache}. As long as {@link Node#refCount} greater than 0 then node is not eligible for eviction. - * So this is best effort lazy cache to maintain capacity.
+ * So this is the best effort lazy cache to maintain capacity.
* For more context why in-house cache implementation exist look at * this comment and * this ticket for future plans @@ -58,20 +58,10 @@ class LRUCache implements RefCountedCache { private final Weigher weigher; - private final StatsCounter statsCounter; + private final StatsCounter statsCounter; private final ReentrantLock lock; - /** - * this tracks cache usage on the system (as long as cache entry is in the cache) - */ - private long usage; - - /** - * this tracks cache usage only by entries which are being referred ({@link Node#refCount > 0}) - */ - private long activeUsage; - static class Node { final K key; @@ -100,7 +90,7 @@ public LRUCache(long capacity, RemovalListener listener, Weigher weighe this.data = new HashMap<>(); this.lru = new LinkedHashMap<>(); this.lock = new ReentrantLock(); - this.statsCounter = new DefaultStatsCounter<>(); + this.statsCounter = new FileStatsCounter<>(); } @@ -117,7 +107,7 @@ public V get(K key) { } // hit incRef(key); - statsCounter.recordHits(key, 1); + statsCounter.recordHits(key, node.value, 1); return node.value; } finally { lock.unlock(); @@ -168,7 +158,7 @@ public V compute(K key, BiFunction remappingF removeNode(key); return null; } else { - statsCounter.recordHits(key, 1); + statsCounter.recordHits(key, node.value, 1); replaceNode(node, newValue); return newValue; } @@ -193,16 +183,16 @@ public void remove(K key) { public void clear() { lock.lock(); try { - usage = 0L; - activeUsage = 0L; lru.clear(); final Iterator> iterator = data.values().iterator(); while (iterator.hasNext()) { Node node = iterator.next(); iterator.remove(); - statsCounter.recordRemoval(node.weight); + statsCounter.recordRemoval(node.value, node.weight); listener.onRemoval(new RemovalNotification<>(node.key, node.value, RemovalReason.EXPLICIT)); } + statsCounter.resetUsage(); + statsCounter.resetActiveUsage(); } finally { lock.unlock(); } @@ -222,7 +212,7 @@ public void incRef(K key) { if (node != null) { if (node.refCount == 0) { // if it was inactive, we should add the weight to active usage from now - activeUsage += node.weight; + statsCounter.recordActiveUsage(node.value, node.weight, false); } if (node.evictable()) { @@ -254,7 +244,7 @@ public void decRef(K key) { if (node.refCount == 0) { // if it was active, we should remove its weight from active usage - activeUsage -= node.weight; + statsCounter.recordActiveUsage(node.value, node.weight, true); } } } finally { @@ -291,10 +281,9 @@ public long prune(Predicate keyPredicate) { iterator.remove(); data.remove(node.key, node); sum += node.weight; - statsCounter.recordRemoval(node.weight); + statsCounter.recordRemoval(node.value, node.weight); listener.onRemoval(new RemovalNotification<>(node.key, node.value, RemovalReason.EXPLICIT)); } - usage -= sum; } finally { lock.unlock(); } @@ -302,17 +291,28 @@ public long prune(Predicate keyPredicate) { } @Override - public CacheUsage usage() { + public long usage() { + lock.lock(); + try { + return statsCounter.usage(); + } finally { + lock.unlock(); + } + } + + @Override + + public long activeUsage() { lock.lock(); try { - return new CacheUsage(usage, activeUsage); + return statsCounter.activeUsage(); } finally { lock.unlock(); } } @Override - public CacheStats stats() { + public IRefCountedCacheStats stats() { lock.lock(); try { return statsCounter.snapshot(); @@ -348,7 +348,7 @@ private void addNode(K key, V value) { final long weight = weigher.weightOf(value); Node newNode = new Node<>(key, value, weight); data.put(key, newNode); - usage += weight; + statsCounter.recordUsage(value, weight, false); incRef(key); evict(); } @@ -361,13 +361,9 @@ private void replaceNode(Node node, V newValue) { // update the value and weight node.value = newValue; node.weight = newWeight; - // update usage - final long weightDiff = newWeight - oldWeight; - if (node.refCount > 0) { - activeUsage += weightDiff; - } - usage += weightDiff; - statsCounter.recordReplacement(); + + // update stats + statsCounter.recordReplacement(oldValue, newValue, oldWeight, newWeight, node.refCount > 0); listener.onRemoval(new RemovalNotification<>(node.key, oldValue, RemovalReason.REPLACED)); } incRef(node.key); @@ -378,19 +374,18 @@ private void removeNode(K key) { Node node = data.remove(key); if (node != null) { if (node.refCount > 0) { - activeUsage -= node.weight; + statsCounter.recordActiveUsage(node.value, node.weight, true); } - usage -= node.weight; if (node.evictable()) { lru.remove(node.key); } - statsCounter.recordRemoval(node.weight); + statsCounter.recordRemoval(node.value, node.weight); listener.onRemoval(new RemovalNotification<>(node.key, node.value, RemovalReason.EXPLICIT)); } } private boolean hasOverflowed() { - return usage >= capacity; + return statsCounter.usage() >= capacity; } private void evict() { @@ -402,8 +397,7 @@ private void evict() { iterator.remove(); // Notify the listener only if the entry was evicted data.remove(node.key, node); - usage -= node.weight; - statsCounter.recordEviction(node.weight); + statsCounter.recordEviction(node.value, node.weight); listener.onRemoval(new RemovalNotification<>(node.key, node.value, RemovalReason.CAPACITY)); } } diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/RefCountedCache.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/RefCountedCache.java index 4b16218641acc..ff72b190932aa 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/RefCountedCache.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/RefCountedCache.java @@ -8,7 +8,7 @@ package org.opensearch.index.store.remote.utils.cache; -import org.opensearch.index.store.remote.utils.cache.stats.CacheStats; +import org.opensearch.index.store.remote.utils.cache.stats.IRefCountedCacheStats; import java.util.function.BiFunction; import java.util.function.Predicate; @@ -103,7 +103,14 @@ default long prune() { * * @return the combined weight of the values in this cache */ - CacheUsage usage(); + long usage(); + + /** + * Returns the active usage of this cache. + * + * @return the combined active weight of the values in this cache. + */ + long activeUsage(); /** * Returns a current snapshot of this cache's cumulative statistics. All statistics are @@ -114,5 +121,5 @@ default long prune() { * * @return the current snapshot of the statistics of this cache */ - CacheStats stats(); + IRefCountedCacheStats stats(); } diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/SegmentedCache.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/SegmentedCache.java index 735916788f875..feb9c3dd4fcdd 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/SegmentedCache.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/SegmentedCache.java @@ -13,7 +13,9 @@ import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.Weigher; -import org.opensearch.index.store.remote.utils.cache.stats.CacheStats; +import org.opensearch.index.store.remote.utils.cache.stats.AggregateRefCountedCacheStats; +import org.opensearch.index.store.remote.utils.cache.stats.IRefCountedCacheStats; +import org.opensearch.index.store.remote.utils.cache.stats.RefCountedCacheStats; import java.util.Objects; import java.util.function.BiFunction; @@ -158,38 +160,42 @@ public long prune(Predicate keyPredicate) { } @Override - public CacheUsage usage() { - long usage = 0L; - long activeUsage = 0L; + public long usage() { + long totalUsage = 0L; for (RefCountedCache cache : table) { - CacheUsage c = cache.usage(); - usage += c.usage(); - activeUsage += c.activeUsage(); + IRefCountedCacheStats c = cache.stats(); + totalUsage += c.usage(); + } - return new CacheUsage(usage, activeUsage); + return totalUsage; } @Override - public CacheStats stats() { - long hitCount = 0L; - long missCount = 0L; - long removeCount = 0L; - long removeWeight = 0L; - long replaceCount = 0L; - long evictionCount = 0L; - long evictionWeight = 0L; + public long activeUsage() { + long totalActiveUsage = 0L; + for (RefCountedCache cache : table) { + IRefCountedCacheStats c = cache.stats(); + totalActiveUsage += c.activeUsage(); + } + return totalActiveUsage; + } + + @Override + public IRefCountedCacheStats stats() { + + final RefCountedCacheStats totalOverallCacheStats = new RefCountedCacheStats(0, 0, 0, 0, 0, 0, 0, 0, 0); + final RefCountedCacheStats totalFullFileCacheStats = new RefCountedCacheStats(0, 0, 0, 0, 0, 0, 0, 0, 0); + final RefCountedCacheStats totalBlockFileCacheStats = new RefCountedCacheStats(0, 0, 0, 0, 0, 0, 0, 0, 0); for (RefCountedCache cache : table) { - CacheStats c = cache.stats(); - hitCount += c.hitCount(); - missCount += c.missCount(); - removeCount += c.removeCount(); - removeWeight += c.removeWeight(); - replaceCount += c.replaceCount(); - evictionCount += c.evictionCount(); - evictionWeight += c.evictionWeight(); + AggregateRefCountedCacheStats aggregateStats = (AggregateRefCountedCacheStats) cache.stats(); + + totalOverallCacheStats.accumulate(aggregateStats.getOverallCacheStats()); + totalFullFileCacheStats.accumulate(aggregateStats.getFullFileCacheStats()); + totalBlockFileCacheStats.accumulate(aggregateStats.getBlockFileCacheStats()); } - return new CacheStats(hitCount, missCount, removeCount, removeWeight, replaceCount, evictionCount, evictionWeight); + + return new AggregateRefCountedCacheStats(totalOverallCacheStats, totalFullFileCacheStats, totalBlockFileCacheStats); } // To be used only for debugging purposes diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/AggregateRefCountedCacheStats.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/AggregateRefCountedCacheStats.java new file mode 100644 index 0000000000000..2ad0dd7428628 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/AggregateRefCountedCacheStats.java @@ -0,0 +1,248 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store.remote.utils.cache.stats; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.index.store.remote.utils.cache.RefCountedCache; + +import java.util.Objects; + +/** + * Statistics about the Cumulative performance of a {@link RefCountedCache}. + * + * @opensearch.api + */ +@PublicApi(since = "2.7.0") +public final class AggregateRefCountedCacheStats implements IRefCountedCacheStats { + + private final RefCountedCacheStats overallCacheStats; + private final RefCountedCacheStats fullFileCacheStats; + private final RefCountedCacheStats blockFileCacheStats; + + /** + * Constructs a new {@code AggregateRefCountedCacheStats} instance. + *

+ * Many parameters of the same type in a row is a bad thing, but this class is not constructed + * by end users and is too fine-grained for a builder. + * + */ + public AggregateRefCountedCacheStats( + RefCountedCacheStats overallCacheStats, + RefCountedCacheStats fullFileCacheStats, + RefCountedCacheStats blockFileCacheStats + ) { + this.overallCacheStats = overallCacheStats; + this.fullFileCacheStats = fullFileCacheStats; + this.blockFileCacheStats = blockFileCacheStats; + } + + /** + * Getter for OverallCacheStats. + * @return {@link RefCountedCacheStats} overallCacheStats. + */ + public RefCountedCacheStats getOverallCacheStats() { + return overallCacheStats; + } + + /** + * Getter for blockFileCacheStats. + * @return {@link RefCountedCacheStats} blockFileCacheStats. + */ + public RefCountedCacheStats getBlockFileCacheStats() { + return blockFileCacheStats; + } + + /** + * Getter for fullFileCacheStats. + * @return {@link RefCountedCacheStats} fullFileCacheStats. + */ + public RefCountedCacheStats getFullFileCacheStats() { + return fullFileCacheStats; + } + + /** + * Returns the number of times {@link RefCountedCache} lookup methods have returned either a cached or + * uncached value. This is defined as {@code hitCount + missCount}. + * + * @return the {@code hitCount + missCount} + */ + public long requestCount() { + return this.overallCacheStats.requestCount(); + } + + /** + * Returns the number of times {@link RefCountedCache} lookup methods have returned a cached value. + * + * @return the number of times {@link RefCountedCache} lookup methods have returned a cached value + */ + public long hitCount() { + return this.overallCacheStats.hitCount(); + } + + /** + * Returns the ratio of cache requests which were hits. This is defined as + * {@code hitCount / requestCount}, or {@code 1.0} when {@code requestCount == 0}. Note that + * {@code hitRate + missRate =~ 1.0}. + * + * @return the ratio of cache requests which were hits + */ + public double hitRate() { + long requestCount = requestCount(); + return (requestCount == 0) ? 1.0 : (double) hitCount() / requestCount; + } + + /** + * Returns the number of times {@link RefCountedCache} lookup methods have returned an uncached (newly + * loaded) value, or null. Multiple concurrent calls to {@link RefCountedCache} lookup methods on an absent + * value can result in multiple misses, all returning the results of a single cache load + * operation. + * + * @return the number of times {@link RefCountedCache} lookup methods have returned an uncached (newly + * loaded) value, or null + */ + public long missCount() { + return this.overallCacheStats.missCount(); + } + + /** + * Returns the ratio of cache requests which were misses. This is defined as + * {@code missCount / requestCount}, or {@code 0.0} when {@code requestCount == 0}. + * Note that {@code hitRate + missRate =~ 1.0}. Cache misses include all requests which + * weren't cache hits, including requests which resulted in either successful or failed loading + * attempts, and requests which waited for other threads to finish loading. It is thus the case + * that {@code missCount >= loadSuccessCount + loadFailureCount}. Multiple + * concurrent misses for the same key will result in a single load operation. + * + * @return the ratio of cache requests which were misses + */ + public double missRate() { + long requestCount = requestCount(); + return (requestCount == 0) ? 0.0 : (double) missCount() / requestCount; + } + + /** + * Returns the number of times an entry has been removed explicitly. + * + * @return the number of times an entry has been removed + */ + public long removeCount() { + return this.overallCacheStats.removeCount(); + } + + /** + * Returns the sum of weights of explicitly removed entries. + * + * @return the sum of weights of explicitly removed entries + */ + public long removeWeight() { + return this.overallCacheStats.removeWeight(); + } + + /** + * Returns the number of times an entry has been replaced. + * + * @return the number of times an entry has been replaced + */ + public long replaceCount() { + return this.overallCacheStats.replaceCount(); + } + + /** + * Returns the number of times an entry has been evicted. This count does not include manual + * {@linkplain RefCountedCache#remove removals}. + * + * @return the number of times an entry has been evicted + */ + public long evictionCount() { + return this.overallCacheStats.evictionCount(); + } + + /** + * Returns the sum of weights of evicted entries. This total does not include manual + * {@linkplain RefCountedCache#remove removals}. + * + * @return the sum of weights of evicted entities + */ + public long evictionWeight() { + return this.overallCacheStats.evictionWeight(); + } + + /** + * Returns the total weight of the cache. + * + * @return the total weight of the cache + */ + public long usage() { + return this.overallCacheStats.usage(); + } + + /** + * Returns the total active weight of the cache. + * + * @return the total active weight of the cache + */ + public long activeUsage() { + return this.overallCacheStats.activeUsage(); + } + + /** + * Accumulates the values of another {@link IRefCountedCacheStats} into this one. + * + * @param other another {@link IRefCountedCacheStats} + * @return result of accumulation of the other {@link IRefCountedCacheStats} into this one. + */ + @Override + public IRefCountedCacheStats accumulate(IRefCountedCacheStats other) { + + if (other instanceof AggregateRefCountedCacheStats == false) { + throw new IllegalArgumentException("Invalid Argument passed for Accumulating AggregateRefCountedCacheStats"); + } + + final AggregateRefCountedCacheStats otherStats = (AggregateRefCountedCacheStats) other; + + this.overallCacheStats.accumulate(otherStats.overallCacheStats); + this.fullFileCacheStats.accumulate(otherStats.fullFileCacheStats); + this.blockFileCacheStats.accumulate(otherStats.blockFileCacheStats); + + return this; + } + + @Override + public int hashCode() { + return Objects.hash(overallCacheStats, fullFileCacheStats, blockFileCacheStats); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } else if (!(o instanceof AggregateRefCountedCacheStats)) { + return false; + } + AggregateRefCountedCacheStats other = (AggregateRefCountedCacheStats) o; + return overallCacheStats.equals(other.overallCacheStats) + && fullFileCacheStats.equals(other.fullFileCacheStats) + && blockFileCacheStats.equals(other.blockFileCacheStats); + } + + @Override + public String toString() { + return getClass().getSimpleName() + + '{' + + "overallRefCountedCacheStats=" + + overallCacheStats.toString() + + ", " + + "fullRefCountedCacheStats=" + + fullFileCacheStats.toString() + + ", " + + "blockRefCountedCacheStats=" + + blockFileCacheStats.toString() + + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/DefaultStatsCounter.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/DefaultStatsCounter.java index 60fe5223ef37e..9d3b144cd5a11 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/DefaultStatsCounter.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/DefaultStatsCounter.java @@ -13,7 +13,7 @@ * * @opensearch.internal */ -public class DefaultStatsCounter implements StatsCounter { +public class DefaultStatsCounter implements StatsCounter { private long hitCount; private long missCount; private long removeCount; @@ -21,6 +21,15 @@ public class DefaultStatsCounter implements StatsCounter { private long replaceCount; private long evictionCount; private long evictionWeight; + /** + * this tracks cache usage on the system (as long as cache entry is in the cache) + */ + private long usage; + + /** + * this tracks cache usage only by entries which are being referred. + */ + private long activeUsage; public DefaultStatsCounter() { this.hitCount = 0L; @@ -30,10 +39,12 @@ public DefaultStatsCounter() { this.replaceCount = 0L; this.evictionCount = 0L; this.evictionWeight = 0L; + this.usage = 0L; + this.activeUsage = 0L; } @Override - public void recordHits(K key, int count) { + public void recordHits(K key, V value, int count) { hitCount += count; } @@ -43,29 +54,77 @@ public void recordMisses(K key, int count) { } @Override - public void recordRemoval(long weight) { + public void recordRemoval(V value, long weight) { removeCount++; removeWeight += weight; + usage -= weight; } @Override - public void recordReplacement() { + public void recordReplacement(V oldValue, V newValue, long oldWeight, long newWeight, boolean shouldUpdateActiveUsage) { replaceCount++; + if (shouldUpdateActiveUsage) activeUsage = activeUsage - oldWeight + newWeight; + usage = usage - oldWeight + newWeight; + } @Override - public void recordEviction(long weight) { + public void recordEviction(V value, long weight) { evictionCount++; evictionWeight += weight; + usage -= weight; + } + + @Override + public void recordUsage(V value, long weight, boolean shouldDecrease) { + weight = shouldDecrease ? -1 * weight : weight; + usage += weight; + } + + @Override + public void recordActiveUsage(V value, long weight, boolean shouldDecrease) { + weight = shouldDecrease ? -1 * weight : weight; + activeUsage += weight; + } + + @Override + public void resetActiveUsage() { + this.activeUsage = 0; } @Override - public CacheStats snapshot() { - return new CacheStats(hitCount, missCount, removeCount, removeWeight, replaceCount, evictionCount, evictionWeight); + public void resetUsage() { + this.usage = 0; + } + + @Override + public long activeUsage() { + return this.activeUsage; + } + + @Override + public long usage() { + return this.usage; + } + + @Override + public IRefCountedCacheStats snapshot() { + return new RefCountedCacheStats( + hitCount, + missCount, + removeCount, + removeWeight, + replaceCount, + evictionCount, + evictionWeight, + usage, + activeUsage + ); } @Override public String toString() { return snapshot().toString(); } + } diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/FileStatsCounter.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/FileStatsCounter.java new file mode 100644 index 0000000000000..0589a6f34006c --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/FileStatsCounter.java @@ -0,0 +1,141 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store.remote.utils.cache.stats; + +import org.opensearch.index.store.remote.filecache.CachedFullFileIndexInput; + +/** + * A non thread-safe {@link StatsCounter} implementation which aggregates multiple {@link DefaultStatsCounter}. + * + * @opensearch.internal + */ +public class FileStatsCounter implements StatsCounter { + + private final DefaultStatsCounter overallStatsCounter; + private final DefaultStatsCounter fullFileStatsCounter; + private final DefaultStatsCounter blockFileStatsCounter; + + public FileStatsCounter() { + overallStatsCounter = new DefaultStatsCounter<>(); + fullFileStatsCounter = new DefaultStatsCounter<>(); + blockFileStatsCounter = new DefaultStatsCounter<>(); + } + + @Override + public void recordHits(K key, V value, int count) { + overallStatsCounter.recordHits(key, value, count); + if (isFullFile(value)) fullFileStatsCounter.recordHits(key, value, count); + else blockFileStatsCounter.recordHits(key, value, count); + } + + @Override + public void recordMisses(K key, int count) { + overallStatsCounter.recordMisses(key, count); + // we haven't added a check for full file here because we don't expect full file to ever have misses. + blockFileStatsCounter.recordMisses(key, count); + } + + @Override + public void recordRemoval(V value, long weight) { + + overallStatsCounter.recordRemoval(value, weight); + if (isFullFile(value)) fullFileStatsCounter.recordRemoval(value, weight); + else blockFileStatsCounter.recordRemoval(value, weight); + } + + @Override + public void recordReplacement(V oldValue, V newValue, long oldWeight, long newWeight, boolean shouldUpdateActiveUsage) { + + boolean isOldFullFile = isFullFile(oldValue); + boolean isNewFullFile = isFullFile(newValue); + + overallStatsCounter.recordReplacement(oldValue, newValue, oldWeight, newWeight, shouldUpdateActiveUsage); + fullFileStatsCounter.recordReplacement( + oldValue, + newValue, + isOldFullFile ? oldWeight : 0, + isNewFullFile ? newWeight : 0, + shouldUpdateActiveUsage + ); + blockFileStatsCounter.recordReplacement( + oldValue, + newValue, + isOldFullFile ? 0 : oldWeight, + isNewFullFile ? 0 : newWeight, + shouldUpdateActiveUsage + ); + } + + @Override + public void recordEviction(V value, long weight) { + + overallStatsCounter.recordEviction(value, weight); + if (isFullFile(value)) fullFileStatsCounter.recordEviction(value, weight); + else blockFileStatsCounter.recordEviction(value, weight); + } + + @Override + public void recordUsage(V value, long weight, boolean shouldDecrease) { + + overallStatsCounter.recordUsage(value, weight, shouldDecrease); + if (isFullFile(value)) fullFileStatsCounter.recordUsage(value, weight, shouldDecrease); + else blockFileStatsCounter.recordUsage(value, weight, shouldDecrease); + } + + @Override + public void recordActiveUsage(V value, long weight, boolean shouldDecrease) { + + overallStatsCounter.recordActiveUsage(value, weight, shouldDecrease); + if (isFullFile(value)) fullFileStatsCounter.recordActiveUsage(value, weight, shouldDecrease); + else blockFileStatsCounter.recordActiveUsage(value, weight, shouldDecrease); + } + + @Override + public void resetActiveUsage() { + overallStatsCounter.resetActiveUsage(); + fullFileStatsCounter.resetActiveUsage(); + blockFileStatsCounter.resetActiveUsage(); + + } + + @Override + public void resetUsage() { + overallStatsCounter.resetUsage(); + fullFileStatsCounter.resetUsage(); + blockFileStatsCounter.resetUsage(); + } + + @Override + public long activeUsage() { + return overallStatsCounter.activeUsage(); + } + + @Override + public long usage() { + return overallStatsCounter.usage(); + } + + @Override + public IRefCountedCacheStats snapshot() { + return new AggregateRefCountedCacheStats( + (RefCountedCacheStats) overallStatsCounter.snapshot(), + (RefCountedCacheStats) fullFileStatsCounter.snapshot(), + (RefCountedCacheStats) blockFileStatsCounter.snapshot() + ); + } + + @Override + public String toString() { + return snapshot().toString(); + } + + private boolean isFullFile(V value) { + return value instanceof CachedFullFileIndexInput; + } +} diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/IRefCountedCacheStats.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/IRefCountedCacheStats.java new file mode 100644 index 0000000000000..ed74d0542dd79 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/IRefCountedCacheStats.java @@ -0,0 +1,124 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store.remote.utils.cache.stats; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.store.remote.utils.cache.RefCountedCache; + +/** + * Statistics about the performance of a {@link RefCountedCache}. + */ +@ExperimentalApi +public interface IRefCountedCacheStats { + + /** + * Returns the number of times {@link IRefCountedCacheStats} lookup methods have returned either a cached or + * uncached value. This is defined as {@code hitCount + missCount}. + * + * @return the {@code hitCount + missCount} + */ + public long requestCount(); + + /** + * Returns the number of times {@link IRefCountedCacheStats} lookup methods have returned a cached value. + * + * @return the number of times {@link IRefCountedCacheStats} lookup methods have returned a cached value + */ + public long hitCount(); + + /** + * Returns the ratio of cache requests which were hits. This is defined as + * {@code hitCount / requestCount}, or {@code 1.0} when {@code requestCount == 0}. Note that + * {@code hitRate + missRate =~ 1.0}. + * + * @return the ratio of cache requests which were hits + */ + public double hitRate(); + + /** + * Returns the number of times {@link IRefCountedCacheStats} lookup methods have returned an uncached (newly + * loaded) value, or null. Multiple concurrent calls to {@link IRefCountedCacheStats} lookup methods on an absent + * value can result in multiple misses, all returning the results of a single cache load + * operation. + * + * @return the number of times {@link IRefCountedCacheStats} lookup methods have returned an uncached (newly + * loaded) value, or null + */ + public long missCount(); + + /** + * Returns the ratio of cache requests which were misses. This is defined as + * {@code missCount / requestCount}, or {@code 0.0} when {@code requestCount == 0}. + * Note that {@code hitRate + missRate =~ 1.0}. Cache misses include all requests which + * weren't cache hits, including requests which resulted in either successful or failed loading + * attempts, and requests which waited for other threads to finish loading. It is thus the case + * that {@code missCount >= loadSuccessCount + loadFailureCount}. Multiple + * concurrent misses for the same key will result in a single load operation. + * + * @return the ratio of cache requests which were misses + */ + public double missRate(); + + /** + * Returns the number of times an entry has been removed explicitly. + * + * @return the number of times an entry has been removed + */ + public long removeCount(); + + /** + * Returns the sum of weights of explicitly removed entries. + * + * @return the sum of weights of explicitly removed entries + */ + public long removeWeight(); + + /** + * Returns the number of times an entry has been replaced. + * + * @return the number of times an entry has been replaced + */ + public long replaceCount(); + + /** + * Returns the number of times an entry has been evicted. + * + * @return the number of times an entry has been evicted + */ + public long evictionCount(); + + /** + * Returns the sum of weights of evicted entries. + * + * @return the sum of weights of evicted entities + */ + public long evictionWeight(); + + /** + * Returns the total weight of the cache. + * + * @return the total weight of the cache + */ + public long usage(); + + /** + * Returns the total active weight of the cache. + * + * @return the total active weight of the cache + */ + public long activeUsage(); + + /** + * Accumulates the values of another {@link IRefCountedCacheStats} into this one. + * @param other another {@link IRefCountedCacheStats} + * @return result of accumulation of the other {@link IRefCountedCacheStats} into this one. + */ + public IRefCountedCacheStats accumulate(IRefCountedCacheStats other) throws IllegalArgumentException; + +} diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/CacheStats.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/RefCountedCacheStats.java similarity index 70% rename from server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/CacheStats.java rename to server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/RefCountedCacheStats.java index 55893752669a8..0f2895a8d7727 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/CacheStats.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/RefCountedCacheStats.java @@ -19,17 +19,19 @@ * @opensearch.api */ @PublicApi(since = "2.7.0") -public final class CacheStats { - private final long hitCount; - private final long missCount; - private final long removeCount; - private final long removeWeight; - private final long replaceCount; - private final long evictionCount; - private final long evictionWeight; +public final class RefCountedCacheStats implements IRefCountedCacheStats { + private long hitCount; + private long missCount; + private long removeCount; + private long removeWeight; + private long replaceCount; + private long evictionCount; + private long evictionWeight; + private long usage; + private long activeUsage; /** - * Constructs a new {@code CacheStats} instance. + * Constructs a new {@code AggregateRefCountedCacheStats} instance. *

* Many parameters of the same type in a row is a bad thing, but this class is not constructed * by end users and is too fine-grained for a builder. @@ -42,14 +44,16 @@ public final class CacheStats { * @param evictionCount the number of entries evicted from the cache * @param evictionWeight the sum of weights of entries evicted from the cache */ - public CacheStats( + public RefCountedCacheStats( long hitCount, long missCount, long removeCount, long removeWeight, long replaceCount, long evictionCount, - long evictionWeight + long evictionWeight, + long usage, + long activeUsage ) { if ((hitCount < 0) || (missCount < 0) @@ -67,6 +71,8 @@ public CacheStats( this.replaceCount = replaceCount; this.evictionCount = evictionCount; this.evictionWeight = evictionWeight; + this.usage = usage; + this.activeUsage = activeUsage; } /** @@ -75,6 +81,7 @@ public CacheStats( * * @return the {@code hitCount + missCount} */ + @Override public long requestCount() { return hitCount + missCount; } @@ -84,6 +91,7 @@ public long requestCount() { * * @return the number of times {@link RefCountedCache} lookup methods have returned a cached value */ + @Override public long hitCount() { return hitCount; } @@ -95,6 +103,7 @@ public long hitCount() { * * @return the ratio of cache requests which were hits */ + @Override public double hitRate() { long requestCount = requestCount(); return (requestCount == 0) ? 1.0 : (double) hitCount / requestCount; @@ -109,6 +118,7 @@ public double hitRate() { * @return the number of times {@link RefCountedCache} lookup methods have returned an uncached (newly * loaded) value, or null */ + @Override public long missCount() { return missCount; } @@ -124,6 +134,7 @@ public long missCount() { * * @return the ratio of cache requests which were misses */ + @Override public double missRate() { long requestCount = requestCount(); return (requestCount == 0) ? 0.0 : (double) missCount / requestCount; @@ -134,6 +145,7 @@ public double missRate() { * * @return the number of times an entry has been removed */ + @Override public long removeCount() { return removeCount; } @@ -143,6 +155,7 @@ public long removeCount() { * * @return the sum of weights of explicitly removed entries */ + @Override public long removeWeight() { return removeWeight; } @@ -152,6 +165,7 @@ public long removeWeight() { * * @return the number of times an entry has been replaced */ + @Override public long replaceCount() { return replaceCount; } @@ -162,6 +176,7 @@ public long replaceCount() { * * @return the number of times an entry has been evicted */ + @Override public long evictionCount() { return evictionCount; } @@ -172,30 +187,90 @@ public long evictionCount() { * * @return the sum of weights of evicted entities */ + @Override public long evictionWeight() { return evictionWeight; } + /** + * Returns the total weight of the cache. + * + * @return the total weight of the cache + */ + @Override + public long usage() { + return usage; + } + + /** + * Returns the total active weight of the cache. + * + * @return the total active weight of the cache + */ + @Override + public long activeUsage() { + return activeUsage; + } + + /** + * Accumulates the values of another {@link RefCountedCacheStats} into this one. + * + * @param other another {@link RefCountedCacheStats} + * @return result of accumulation of the other {@link RefCountedCacheStats} into this one. + */ + @Override + public IRefCountedCacheStats accumulate(IRefCountedCacheStats other) { + if (other instanceof RefCountedCacheStats == false) { + throw new IllegalArgumentException("Invalid Argument passed for Accumulating RefCountedCacheStats"); + } + + final RefCountedCacheStats otherStats = (RefCountedCacheStats) other; + + this.hitCount += otherStats.hitCount(); + this.missCount += otherStats.missCount(); + this.removeCount += otherStats.removeCount(); + this.removeWeight += otherStats.removeWeight(); + this.replaceCount += otherStats.replaceCount(); + this.evictionCount += otherStats.evictionCount(); + this.evictionWeight += otherStats.evictionWeight(); + this.usage += otherStats.usage(); + this.activeUsage += otherStats.activeUsage(); + + return this; + } + @Override public int hashCode() { - return Objects.hash(hitCount, missCount, removeCount, removeWeight, replaceCount, evictionCount, evictionWeight); + return Objects.hash( + hitCount, + missCount, + removeCount, + removeWeight, + replaceCount, + evictionCount, + evictionWeight, + usage, + activeUsage + ); } @Override public boolean equals(Object o) { if (o == this) { return true; - } else if (!(o instanceof CacheStats)) { + } else if (!(o instanceof RefCountedCacheStats)) { return false; } - CacheStats other = (CacheStats) o; + RefCountedCacheStats other = (RefCountedCacheStats) o; return hitCount == other.hitCount && missCount == other.missCount && removeCount == other.removeCount && removeWeight == other.removeWeight && replaceCount == other.replaceCount && evictionCount == other.evictionCount - && evictionWeight == other.evictionWeight; + && evictionWeight == other.evictionWeight + && usage == other.usage + && activeUsage == other.activeUsage; } @Override @@ -222,6 +297,12 @@ public String toString() { + ", " + "evictionWeight=" + evictionWeight + + ", " + + "usage=" + + usage + + ", " + + "activeUsage=" + + activeUsage + '}'; } } diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/StatsCounter.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/StatsCounter.java index b096bb8d652ae..aeb910869904e 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/StatsCounter.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/StatsCounter.java @@ -18,14 +18,14 @@ * * @opensearch.internal */ -public interface StatsCounter { +public interface StatsCounter { /** * Records cache hits. This should be called when a cache request returns a cached value. * * @param count the number of hits to record */ - void recordHits(K key, int count); + void recordHits(K key, V value, int count); /** * Records cache misses. This should be called when a cache request returns a value that was not @@ -47,7 +47,7 @@ public interface StatsCounter { * * @param weight the weight of the removed entry */ - void recordRemoval(long weight); + void recordRemoval(V value, long weight); /** * Records the replacement of an entry from the cache. This should only been called when an entry is @@ -55,7 +55,7 @@ public interface StatsCounter { * {@link RefCountedCache#put(Object, Object)} * {@link RefCountedCache#compute(Object, BiFunction)} */ - void recordReplacement(); + void recordReplacement(V oldValue, V newValue, long oldWeight, long newWeight, boolean shouldUpdateActiveUsage); /** * Records the eviction of an entry from the cache. This should only been called when an entry is @@ -64,7 +64,49 @@ public interface StatsCounter { * * @param weight the weight of the evicted entry */ - void recordEviction(long weight); + void recordEviction(V value, long weight); + + /** + * Records the usage of the cache. This should be called when an entry is created/removed/replaced in the cache. + * + * @param value Entry of the cache. + * @param weight Weight of the entry. + * @param shouldDecrease Should the usage of the cache be decreased or not. + */ + void recordUsage(V value, long weight, boolean shouldDecrease); + + /** + * Records the cache usage by entries which are active (being referenced). + * This should be called when an active entry is created/removed/replaced in the cache. + * @param value Entry of the cache. + * @param weight Weight of the entry. + * @param shouldDecrease Should the active usage of the cache be decreased or not. + */ + void recordActiveUsage(V value, long weight, boolean shouldDecrease); + + /** + * Resets the cache usage by entries which are active (being referenced). + * This should be called when cache is cleared. + */ + void resetActiveUsage(); + + /** + * Resets the cache usage. + * This should be called when cache is cleared. + */ + void resetUsage(); + + /** + * Returns the active usage of the cache. + * @return Active usage of the cache. + */ + long activeUsage(); + + /** + * Returns the usage of the cache. + * @return Usage of the cache. + */ + long usage(); /** * Returns a snapshot of this counter's values. Note that this may be an inconsistent view, as it @@ -73,5 +115,5 @@ public interface StatsCounter { * @return a snapshot of this counter's values */ - CacheStats snapshot(); + IRefCountedCacheStats snapshot(); } diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java index db77ec7628e76..0b446443d4b25 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java @@ -80,7 +80,7 @@ public FsInfo stats(FsInfo previous) throws IOException { paths[i] = getFSInfo(dataLocations[i]); if (fileCache != null && dataLocations[i].fileCacheReservedSize != ByteSizeValue.ZERO) { paths[i].fileCacheReserved = adjustForHugeFilesystems(dataLocations[i].fileCacheReservedSize.getBytes()); - paths[i].fileCacheUtilized = adjustForHugeFilesystems(fileCache.usage().usage()); + paths[i].fileCacheUtilized = adjustForHugeFilesystems(fileCache.usage()); // fileCacheFree will be less than zero if the cache being over-subscribed long fileCacheFree = paths[i].fileCacheReserved - paths[i].fileCacheUtilized; if (fileCacheFree > 0) { diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 5ecd90dda1dea..e3cf2f4d43a3f 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -89,7 +89,7 @@ import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; -import org.opensearch.index.store.remote.filecache.FileCacheStats; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.replication.common.ReplicationType; @@ -889,7 +889,7 @@ private IndexMetadata updateIndexSettings( private void validateSearchableSnapshotRestorable(long totalRestorableRemoteIndexesSize) { ClusterInfo clusterInfo = clusterInfoSupplier.get(); final double remoteDataToFileCacheRatio = dataToFileCacheSizeRatioSupplier.get(); - Map nodeFileCacheStats = clusterInfo.getNodeFileCacheStats(); + Map nodeFileCacheStats = clusterInfo.getNodeFileCacheStats(); if (nodeFileCacheStats.isEmpty() || remoteDataToFileCacheRatio <= 0.01f) { return; } diff --git a/server/src/test/java/org/opensearch/cluster/ClusterInfoTests.java b/server/src/test/java/org/opensearch/cluster/ClusterInfoTests.java index 4ec7db2f3d552..a218e9d50c11e 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterInfoTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterInfoTests.java @@ -36,6 +36,8 @@ import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats.FileCacheStatsType; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.test.OpenSearchTestCase; @@ -82,19 +84,40 @@ private static Map randomDiskUsage() { return builder; } - private static Map randomFileCacheStats() { + private static Map randomFileCacheStats() { int numEntries = randomIntBetween(0, 16); - final Map builder = new HashMap<>(numEntries); + final Map builder = new HashMap<>(numEntries); for (int i = 0; i < numEntries; i++) { String key = randomAlphaOfLength(16); - FileCacheStats fileCacheStats = new FileCacheStats( + AggregateFileCacheStats fileCacheStats = new AggregateFileCacheStats( randomLong(), - randomLong(), - randomLong(), - randomLong(), - randomLong(), - randomLong(), - randomLong() + new FileCacheStats( + randomLong(), + randomLong(), + randomLong(), + randomLong(), + randomLong(), + randomLong(), + FileCacheStatsType.FULL_FILE_STATS + ), + new FileCacheStats( + randomLong(), + randomLong(), + randomLong(), + randomLong(), + randomLong(), + randomLong(), + FileCacheStatsType.FULL_FILE_STATS + ), + new FileCacheStats( + randomLong(), + randomLong(), + randomLong(), + randomLong(), + randomLong(), + randomLong(), + FileCacheStatsType.BLOCK_FILE_STATS + ) ); builder.put(key, fileCacheStats); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index ceb6a8ec4c087..f8f4ee169f36e 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -69,6 +69,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats.FileCacheStatsType; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.repositories.IndexId; import org.opensearch.snapshots.EmptySnapshotsInfoService; @@ -300,10 +302,34 @@ public void testDiskThresholdForRemoteShards() { shardSizes.put("[test][0][p]", 10L); // 10 bytes shardSizes.put("[test][0][r]", 10L); - Map fileCacheStatsMap = new HashMap<>(); - fileCacheStatsMap.put("node1", new FileCacheStats(0, 0, 1000, 0, 0, 0, 0)); - fileCacheStatsMap.put("node2", new FileCacheStats(0, 0, 1000, 0, 0, 0, 0)); - fileCacheStatsMap.put("node3", new FileCacheStats(0, 0, 1000, 0, 0, 0, 0)); + Map fileCacheStatsMap = new HashMap<>(); + fileCacheStatsMap.put( + "node1", + new AggregateFileCacheStats( + 0, + new FileCacheStats(0, 0, 1000, 0, 0, 0, FileCacheStatsType.OVER_ALL_STATS), + new FileCacheStats(0, 0, 0, 0, 0, 0, FileCacheStatsType.FULL_FILE_STATS), + new FileCacheStats(0, 0, 1000, 0, 0, 0, FileCacheStatsType.BLOCK_FILE_STATS) + ) + ); + fileCacheStatsMap.put( + "node2", + new AggregateFileCacheStats( + 0, + new FileCacheStats(0, 0, 1000, 0, 0, 0, FileCacheStatsType.OVER_ALL_STATS), + new FileCacheStats(0, 0, 0, 0, 0, 0, FileCacheStatsType.FULL_FILE_STATS), + new FileCacheStats(0, 0, 1000, 0, 0, 0, FileCacheStatsType.BLOCK_FILE_STATS) + ) + ); + fileCacheStatsMap.put( + "node3", + new AggregateFileCacheStats( + 0, + new FileCacheStats(0, 0, 1000, 0, 0, 0, FileCacheStatsType.OVER_ALL_STATS), + new FileCacheStats(0, 0, 0, 0, 0, 0, FileCacheStatsType.FULL_FILE_STATS), + new FileCacheStats(0, 0, 1000, 0, 0, 0, FileCacheStatsType.BLOCK_FILE_STATS) + ) + ); final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes, fileCacheStatsMap); ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); @@ -1546,7 +1572,7 @@ static class DevNullClusterInfo extends ClusterInfo { final Map leastAvailableSpaceUsage, final Map mostAvailableSpaceUsage, final Map shardSizes, - final Map nodeFileCacheStats + final Map nodeFileCacheStats ) { this(leastAvailableSpaceUsage, mostAvailableSpaceUsage, shardSizes, Map.of(), nodeFileCacheStats); } @@ -1556,7 +1582,7 @@ static class DevNullClusterInfo extends ClusterInfo { final Map mostAvailableSpaceUsage, final Map shardSizes, Map reservedSpace, - final Map nodeFileCacheStats + final Map nodeFileCacheStats ) { super(leastAvailableSpaceUsage, mostAvailableSpaceUsage, shardSizes, null, reservedSpace, nodeFileCacheStats); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/WarmDiskThresholdDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/WarmDiskThresholdDeciderTests.java index 86fe1333715cd..27220b07b3549 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/WarmDiskThresholdDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/WarmDiskThresholdDeciderTests.java @@ -55,6 +55,8 @@ import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats.FileCacheStatsType; import org.opensearch.index.store.remote.filecache.FileCacheSettings; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.snapshots.EmptySnapshotsInfoService; @@ -92,9 +94,26 @@ public void testCanAllocateSufficientFreeSpace() { shardSizes.put("[test2][0][p]", 1000L); // 1000 bytes shardSizes.put("[test2][0][r]", 1000L); - Map fileCacheStatsMap = new HashMap<>(); - fileCacheStatsMap.put("node1", new FileCacheStats(0, 0, 1000, 0, 0, 0, 0)); - fileCacheStatsMap.put("node2", new FileCacheStats(0, 0, 1000, 0, 0, 0, 0)); + Map fileCacheStatsMap = new HashMap<>(); + fileCacheStatsMap.put( + "node1", + new AggregateFileCacheStats( + randomNonNegativeInt(), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.OVER_ALL_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.FULL_FILE_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.BLOCK_FILE_STATS) + + ) + ); + fileCacheStatsMap.put( + "node2", + new AggregateFileCacheStats( + randomNonNegativeInt(), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.OVER_ALL_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.FULL_FILE_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.BLOCK_FILE_STATS) + ) + ); final Map usages = new HashMap<>(); final ClusterInfo clusterInfo = new DiskThresholdDeciderTests.DevNullClusterInfo(usages, usages, shardSizes, fileCacheStatsMap); @@ -159,9 +178,27 @@ public void testCanAllocateInSufficientFreeSpace() { shardSizes.put("[test2][0][p]", 1000L); // 1000 bytes shardSizes.put("[test2][0][r]", 1000L); - Map fileCacheStatsMap = new HashMap<>(); - fileCacheStatsMap.put("node1", new FileCacheStats(0, 0, 1000, 0, 0, 0, 0)); - fileCacheStatsMap.put("node2", new FileCacheStats(0, 0, 1000, 0, 0, 0, 0)); + Map fileCacheStatsMap = new HashMap<>(); + fileCacheStatsMap.put( + "node1", + new AggregateFileCacheStats( + randomNonNegativeInt(), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.OVER_ALL_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.FULL_FILE_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.BLOCK_FILE_STATS) + + ) + ); + fileCacheStatsMap.put( + "node2", + new AggregateFileCacheStats( + randomNonNegativeInt(), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.OVER_ALL_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.FULL_FILE_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.BLOCK_FILE_STATS) + + ) + ); final Map usages = new HashMap<>(); final ClusterInfo clusterInfo = new DiskThresholdDeciderTests.DevNullClusterInfo(usages, usages, shardSizes, fileCacheStatsMap); @@ -232,9 +269,27 @@ public void testCanRemainSufficientSpace() { shardSizes.put("[test2][0][p]", 1000L); // 1000 bytes shardSizes.put("[test2][0][r]", 1000L); - Map fileCacheStatsMap = new HashMap<>(); - fileCacheStatsMap.put("node1", new FileCacheStats(0, 0, 1000, 0, 0, 0, 0)); - fileCacheStatsMap.put("node2", new FileCacheStats(0, 0, 1000, 0, 0, 0, 0)); + Map fileCacheStatsMap = new HashMap<>(); + fileCacheStatsMap.put( + "node1", + new AggregateFileCacheStats( + randomNonNegativeInt(), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.OVER_ALL_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.FULL_FILE_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.BLOCK_FILE_STATS) + + ) + ); + fileCacheStatsMap.put( + "node2", + new AggregateFileCacheStats( + randomNonNegativeInt(), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.OVER_ALL_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.FULL_FILE_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.BLOCK_FILE_STATS) + + ) + ); final Map usages = new HashMap<>(); final ClusterInfo clusterInfo = new DiskThresholdDeciderTests.DevNullClusterInfo(usages, usages, shardSizes, fileCacheStatsMap); @@ -298,9 +353,27 @@ public void testCanRemainInsufficientSpace() { shardSizes.put("[test2][0][p]", 1000L); // 1000 bytes shardSizes.put("[test2][0][r]", 1000L); - Map fileCacheStatsMap = new HashMap<>(); - fileCacheStatsMap.put("node1", new FileCacheStats(0, 0, 1000, 0, 0, 0, 0)); - fileCacheStatsMap.put("node2", new FileCacheStats(0, 0, 1000, 0, 0, 0, 0)); + Map fileCacheStatsMap = new HashMap<>(); + fileCacheStatsMap.put( + "node1", + new AggregateFileCacheStats( + randomNonNegativeInt(), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.OVER_ALL_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.FULL_FILE_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.BLOCK_FILE_STATS) + + ) + ); + fileCacheStatsMap.put( + "node2", + new AggregateFileCacheStats( + randomNonNegativeInt(), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.OVER_ALL_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.FULL_FILE_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.BLOCK_FILE_STATS) + + ) + ); final Map usages = new HashMap<>(); final ClusterInfo clusterInfo = new DiskThresholdDeciderTests.DevNullClusterInfo(usages, usages, shardSizes, fileCacheStatsMap); @@ -364,9 +437,27 @@ public void testCanRemainSufficientSpaceAfterRelocation() { shardSizes.put("[test2][0][r]", 1000L); shardSizes.put("[test3][0][p]", 1500L); - Map fileCacheStatsMap = new HashMap<>(); - fileCacheStatsMap.put("node1", new FileCacheStats(0, 0, 1000, 0, 0, 0, 0)); - fileCacheStatsMap.put("node2", new FileCacheStats(0, 0, 1000, 0, 0, 0, 0)); + Map fileCacheStatsMap = new HashMap<>(); + fileCacheStatsMap.put( + "node1", + new AggregateFileCacheStats( + randomNonNegativeInt(), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.OVER_ALL_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.FULL_FILE_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.BLOCK_FILE_STATS) + + ) + ); + fileCacheStatsMap.put( + "node2", + new AggregateFileCacheStats( + randomNonNegativeInt(), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.OVER_ALL_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.FULL_FILE_STATS), + new FileCacheStats(0, 1000, 0, 0, 0, 0, FileCacheStatsType.BLOCK_FILE_STATS) + + ) + ); final Map usages = new HashMap<>(); final ClusterInfo clusterInfo = new DiskThresholdDeciderTests.DevNullClusterInfo(usages, usages, shardSizes, fileCacheStatsMap); diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/AggregateFileCacheStatsTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/AggregateFileCacheStatsTests.java new file mode 100644 index 0000000000000..56998cd59bf34 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/AggregateFileCacheStatsTests.java @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store.remote.filecache; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats.FileCacheStatsType; +import org.opensearch.index.store.remote.utils.cache.stats.AggregateRefCountedCacheStats; +import org.opensearch.index.store.remote.utils.cache.stats.RefCountedCacheStats; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class AggregateFileCacheStatsTests extends OpenSearchTestCase { + private static final long BYTES_IN_GB = 1024 * 1024 * 1024; + + public static AggregateRefCountedCacheStats getMockCacheStats() { + final long evicted = randomLongBetween(10000, BYTES_IN_GB); + final long removed = randomLongBetween(10000, BYTES_IN_GB); + final long replaced = randomLongBetween(0, 10000); + final long hits = randomLongBetween(0, 10000); + final long miss = randomLongBetween(0, 10000); + final long usage = randomLongBetween(10000, BYTES_IN_GB); + final long activeUsage = randomLongBetween(10000, BYTES_IN_GB); + final long fullFileHitCount = randomLongBetween(0, 10000); + final long fullFileRemoveCount = randomLongBetween(0, 10000); + final long fullFileRemoveWeight = randomLongBetween(10000, BYTES_IN_GB); + final long fullFileReplaceCount = randomLongBetween(0, 10000); + final long fullFileEvictionCount = randomLongBetween(0, 10000); + final long fullFileEvictionWeight = randomLongBetween(10000, BYTES_IN_GB); + final long fullFileUsage = randomLongBetween(0, 10000); + final long fullFileActiveUsage = randomLongBetween(0, 10000); + + return new AggregateRefCountedCacheStats( + + new RefCountedCacheStats(hits, miss, 0, removed, replaced, 0, evicted, usage, activeUsage), + new RefCountedCacheStats( + fullFileHitCount, + 0, + fullFileRemoveCount, + fullFileRemoveWeight, + fullFileReplaceCount, + fullFileEvictionCount, + fullFileEvictionWeight, + fullFileUsage, + fullFileActiveUsage + ), + new RefCountedCacheStats( + fullFileHitCount, + 0, + fullFileRemoveCount, + fullFileRemoveWeight, + fullFileReplaceCount, + fullFileEvictionCount, + fullFileEvictionWeight, + fullFileUsage, + fullFileActiveUsage + ) + + ); + } + + public static long getMockCacheCapacity() { + return randomLongBetween(10 * BYTES_IN_GB, 1000 * BYTES_IN_GB); + } + + public static AggregateFileCacheStats getFileCacheStats(final long fileCacheCapacity, final AggregateRefCountedCacheStats stats) + throws IOException { + return new AggregateFileCacheStats( + System.currentTimeMillis(), + new FileCacheStats( + stats.activeUsage(), + fileCacheCapacity, + stats.usage(), + stats.evictionWeight(), + stats.hitCount(), + stats.missCount(), + FileCacheStatsType.OVER_ALL_STATS + ), + new FileCacheStats( + stats.activeUsage(), + fileCacheCapacity, + stats.usage(), + stats.evictionWeight(), + stats.hitCount(), + stats.missCount(), + FileCacheStatsType.FULL_FILE_STATS + ), + new FileCacheStats( + stats.activeUsage(), + fileCacheCapacity, + stats.usage(), + stats.evictionWeight(), + stats.hitCount(), + stats.missCount(), + FileCacheStatsType.BLOCK_FILE_STATS + ) + ); + } + + public static FileCacheStats getMockFullFileCacheStats() { + final long active = randomLongBetween(100000, BYTES_IN_GB); + final long total = randomLongBetween(100000, BYTES_IN_GB); + final long used = randomLongBetween(100000, BYTES_IN_GB); + final long evicted = randomLongBetween(0, getMockCacheStats().getFullFileCacheStats().evictionWeight()); + final long hit = randomLongBetween(0, 10); + final long misses = randomLongBetween(0, 10); + return new FileCacheStats(active, total, used, evicted, hit, misses, FileCacheStatsType.OVER_ALL_STATS); + } + + public static AggregateFileCacheStats getMockFileCacheStats() throws IOException { + final long fcSize = getMockCacheCapacity(); + return getFileCacheStats(fcSize, getMockCacheStats()); + } + + public static void validateFullFileStats(FileCacheStats original, FileCacheStats deserialized) { + assertEquals(original.getHits(), deserialized.getHits()); + assertEquals(original.getActive(), deserialized.getActive()); + assertEquals(original.getUsed(), deserialized.getUsed()); + assertEquals(original.getEvicted(), deserialized.getEvicted()); + assertEquals(original.getActivePercent(), deserialized.getActivePercent()); + } + + public static void validateFileCacheStats(AggregateFileCacheStats original, AggregateFileCacheStats deserialized) throws IOException { + assertEquals(original.getTotal(), deserialized.getTotal()); + assertEquals(original.getUsed(), deserialized.getUsed()); + assertEquals(original.getUsedPercent(), deserialized.getUsedPercent()); + assertEquals(original.getActive(), deserialized.getActive()); + assertEquals(original.getActivePercent(), deserialized.getActivePercent()); + assertEquals(original.getEvicted(), deserialized.getEvicted()); + assertEquals(original.getCacheHits(), deserialized.getCacheHits()); + assertEquals(original.getCacheMisses(), deserialized.getCacheMisses()); + assertEquals(original.getTimestamp(), deserialized.getTimestamp()); + + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + builder = original.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + XContentBuilder deserializedBuilder = XContentFactory.jsonBuilder(); + deserializedBuilder.startObject(); + deserializedBuilder = deserialized.toXContent(deserializedBuilder, ToXContent.EMPTY_PARAMS); + deserializedBuilder.endObject(); + + assertTrue(builder.toString().equals(deserializedBuilder.toString())); + } + + public void testFileCacheStatsSerialization() throws IOException { + final AggregateFileCacheStats fileCacheStats = getMockFileCacheStats(); + try (BytesStreamOutput out = new BytesStreamOutput()) { + fileCacheStats.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + // Validate original object against deserialized values + validateFileCacheStats(fileCacheStats, new AggregateFileCacheStats(in)); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheStatsTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheStatsTests.java index 7931c6fec5414..6f0d676b46090 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheStatsTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheStatsTests.java @@ -10,70 +10,42 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.index.store.remote.utils.cache.CacheUsage; -import org.opensearch.index.store.remote.utils.cache.stats.CacheStats; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; public class FileCacheStatsTests extends OpenSearchTestCase { - private static final long BYTES_IN_GB = 1024 * 1024 * 1024; - - public static CacheStats getMockCacheStats() { - final long evicted = randomLongBetween(10000, BYTES_IN_GB); - final long removed = randomLongBetween(10000, BYTES_IN_GB); - final long replaced = randomLongBetween(0, 10000); - final long hits = randomLongBetween(0, 10000); - final long miss = randomLongBetween(0, 10000); - return new CacheStats(hits, miss, 0, removed, replaced, 0, evicted); - } - public static CacheUsage getMockCacheUsage(long total) { - final long used = randomLongBetween(100, total); - final long active = randomLongBetween(10, used); - return new CacheUsage(used, active); - } + private static final long BYTES_IN_GB = 1024 * 1024 * 1024; - public static long getMockCacheCapacity() { - return randomLongBetween(10 * BYTES_IN_GB, 1000 * BYTES_IN_GB); - } + public static FileCacheStats getMockFullFileCacheStats() { + final long active = randomLongBetween(100000, BYTES_IN_GB); + final long total = randomLongBetween(100000, BYTES_IN_GB); + final long used = randomLongBetween(100000, BYTES_IN_GB); + final long evicted = randomLongBetween(0, active); + final long hits = randomLongBetween(0, 10); + final long misses = randomLongBetween(0, 10); - public static FileCacheStats getFileCacheStats(final long fileCacheCapacity, final CacheStats stats, final CacheUsage usage) { - return new FileCacheStats( - System.currentTimeMillis(), - usage.activeUsage(), - fileCacheCapacity, - usage.usage(), - stats.evictionWeight(), - stats.hitCount(), - stats.missCount() - ); + return new FileCacheStats(active, total, used, evicted, hits, misses, AggregateFileCacheStats.FileCacheStatsType.OVER_ALL_STATS); } - public static FileCacheStats getMockFileCacheStats() { - final long fcSize = getMockCacheCapacity(); - return getFileCacheStats(fcSize, getMockCacheStats(), getMockCacheUsage(fcSize)); + public static void validateFullFileCacheStats(FileCacheStats expected, FileCacheStats actual) { + assertEquals(expected.getActive(), actual.getActive()); + assertEquals(expected.getUsed(), actual.getUsed()); + assertEquals(expected.getEvicted(), actual.getEvicted()); + assertEquals(expected.getHits(), actual.getHits()); + assertEquals(expected.getActivePercent(), actual.getActivePercent()); } - public static void validateFileCacheStats(FileCacheStats original, FileCacheStats deserialized) { - assertEquals(original.getTotal(), deserialized.getTotal()); - assertEquals(original.getUsed(), deserialized.getUsed()); - assertEquals(original.getUsedPercent(), deserialized.getUsedPercent()); - assertEquals(original.getActive(), deserialized.getActive()); - assertEquals(original.getActivePercent(), deserialized.getActivePercent()); - assertEquals(original.getEvicted(), deserialized.getEvicted()); - assertEquals(original.getCacheHits(), deserialized.getCacheHits()); - assertEquals(original.getCacheMisses(), deserialized.getCacheMisses()); - } + public void testFullFileCacheStatsSerialization() throws IOException { + final FileCacheStats fileCacheStats = getMockFullFileCacheStats(); - public void testFileCacheStatsSerialization() throws IOException { - final FileCacheStats fileCacheStats = getMockFileCacheStats(); try (BytesStreamOutput out = new BytesStreamOutput()) { fileCacheStats.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { - // Validate original object against deserialized values - validateFileCacheStats(fileCacheStats, new FileCacheStats(in)); + validateFullFileCacheStats(fileCacheStats, new FileCacheStats(in)); } } + } } diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java index 55f1d9dc6c0ea..7964077eb04ae 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java @@ -8,6 +8,8 @@ package org.opensearch.index.store.remote.filecache; +import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.breaker.TestCircuitBreaker; @@ -16,7 +18,6 @@ import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.store.remote.directory.RemoteSnapshotDirectoryFactory; -import org.opensearch.index.store.remote.utils.cache.CacheUsage; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -77,6 +78,69 @@ public void testGet() { } } + public void testGetWithCachedFullFileIndexInput() throws IOException { + FileCache fileCache = createFileCache(1 * 1000); + for (int i = 0; i < 4; i++) { + Path filePath = path.resolve(NodeEnvironment.CACHE_FOLDER) + .resolve("indexName") + .resolve("shardId") + .resolve(Integer.toString(i)) + .resolve(RemoteSnapshotDirectoryFactory.LOCAL_STORE_LOCATION); + createFile("indexName", "shardId/".concat(Integer.toString(i)), "test_file"); + FSDirectory fsDirectory = FSDirectory.open(filePath); + FileCachedIndexInput fileCachedIndexInput = new FileCachedIndexInput( + fileCache, + filePath, + fsDirectory.openInput("test_file", IOContext.DEFAULT) + ); + fileCache.put(filePath.resolve("test_file"), new CachedFullFileIndexInput(fileCache, filePath, fileCachedIndexInput)); + } + // verify all files are put into file cache + for (int i = 0; i < 4; i++) { + Path filePath = path.resolve(NodeEnvironment.CACHE_FOLDER) + .resolve("indexName") + .resolve("shardId") + .resolve(Integer.toString(i)) + .resolve(RemoteSnapshotDirectoryFactory.LOCAL_STORE_LOCATION); + assertNotNull(fileCache.get(filePath.resolve("test_file"))); + + fileCache.decRef(filePath); + fileCache.decRef(filePath); + } + + // Test eviction by adding more files to exceed cache capacity + for (int i = 4; i < 8000; i++) { + Path filePath = path.resolve(NodeEnvironment.CACHE_FOLDER) + .resolve("indexName") + .resolve("shardId") + .resolve(Integer.toString(i)) + .resolve(RemoteSnapshotDirectoryFactory.LOCAL_STORE_LOCATION); + createFile("indexName", "shardId/".concat(Integer.toString(i)), "test_file"); + FSDirectory fsDirectory = FSDirectory.open(filePath); + FileCachedIndexInput fileCachedIndexInput = new FileCachedIndexInput( + fileCache, + filePath, + fsDirectory.openInput("test_file", IOContext.DEFAULT) + ); + fileCache.put(filePath.resolve("test_file"), new CachedFullFileIndexInput(fileCache, filePath, fileCachedIndexInput)); + } + + // Verify some of the original files were evicted + boolean someEvicted = false; + for (int i = 0; i < 8000; i++) { + Path filePath = path.resolve(NodeEnvironment.CACHE_FOLDER) + .resolve("indexName") + .resolve("shardId") + .resolve(Integer.toString(i)) + .resolve(RemoteSnapshotDirectoryFactory.LOCAL_STORE_LOCATION); + if (fileCache.get(filePath) == null) { + someEvicted = true; + break; + } + } + assertTrue("Expected some files to be evicted", someEvicted); + } + public void testGetThrowException() { assertThrows(NullPointerException.class, () -> { FileCache fileCache = createFileCache(MEGA_BYTES); @@ -249,10 +313,13 @@ public void testUsage() { ); putAndDecRef(fileCache, 0, 16 * MEGA_BYTES); - CacheUsage expectedCacheUsage = new CacheUsage(16 * MEGA_BYTES, 0); - CacheUsage realCacheUsage = fileCache.usage(); - assertEquals(expectedCacheUsage.activeUsage(), realCacheUsage.activeUsage()); - assertEquals(expectedCacheUsage.usage(), realCacheUsage.usage()); + long expectedCacheUsage = 16 * MEGA_BYTES; + long expectedActiveCacheUsage = 0; + long realCacheUsage = fileCache.usage(); + long realActiveCacheUsage = fileCache.activeUsage(); + + assertEquals(expectedCacheUsage, realCacheUsage); + assertEquals(expectedActiveCacheUsage, realActiveCacheUsage); } public void testStats() { @@ -283,11 +350,11 @@ public void testCacheRestore() throws IOException { String shardId = "0"; createFile(indexName, shardId, "test.0"); FileCache fileCache = createFileCache(MEGA_BYTES); - assertEquals(0, fileCache.usage().usage()); + assertEquals(0, fileCache.usage()); Path fileCachePath = path.resolve(NodeEnvironment.CACHE_FOLDER).resolve(indexName).resolve(shardId); fileCache.restoreFromDirectory(List.of(fileCachePath)); - assertTrue(fileCache.usage().usage() > 0); - assertEquals(0, fileCache.usage().activeUsage()); + assertTrue(fileCache.usage() > 0); + assertEquals(0, fileCache.activeUsage()); } private void putAndDecRef(FileCache cache, int path, long indexInputSize) { diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCachedIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCachedIndexInputTests.java index 258bc2db4c5d0..ce0a4d7bf3c02 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCachedIndexInputTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCachedIndexInputTests.java @@ -74,6 +74,6 @@ public void testSlice() throws IOException { } protected boolean isActiveAndTotalUsageSame() { - return fileCache.usage().activeUsage() == fileCache.usage().usage(); + return fileCache.activeUsage() == fileCache.usage(); } } diff --git a/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTestCase.java b/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTestCase.java index 1eae5119ab462..668eac51b1b81 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTestCase.java +++ b/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTestCase.java @@ -67,10 +67,10 @@ protected static byte[] createData() { public void testSingleAccess() throws Exception { try (IndexInput i = fetchBlobWithName("file")) { assertIndexInputIsFunctional(i); - MatcherAssert.assertThat(fileCache.usage().activeUsage(), equalTo((long) EIGHT_MB)); + MatcherAssert.assertThat(fileCache.activeUsage(), equalTo((long) EIGHT_MB)); } - MatcherAssert.assertThat(fileCache.usage().activeUsage(), equalTo(0L)); - MatcherAssert.assertThat(fileCache.usage().usage(), equalTo((long) EIGHT_MB)); + MatcherAssert.assertThat(fileCache.activeUsage(), equalTo(0L)); + MatcherAssert.assertThat(fileCache.usage(), equalTo((long) EIGHT_MB)); } public void testConcurrentAccess() throws Exception { @@ -152,8 +152,8 @@ public void testDownloadFails() throws Exception { IOException.class, () -> transferManager.fetchBlob(BlobFetchRequest.builder().fileName("file").directory(directory).blobParts(blobParts).build()) ); - MatcherAssert.assertThat(fileCache.usage().activeUsage(), equalTo(0L)); - MatcherAssert.assertThat(fileCache.usage().usage(), equalTo(0L)); + MatcherAssert.assertThat(fileCache.activeUsage(), equalTo(0L)); + MatcherAssert.assertThat(fileCache.usage(), equalTo(0L)); } public void testFetchesToDifferentBlobsDoNotBlockOnEachOther() throws Exception { diff --git a/server/src/test/java/org/opensearch/index/store/remote/utils/cache/RefCountedCacheTestCase.java b/server/src/test/java/org/opensearch/index/store/remote/utils/cache/RefCountedCacheTestCase.java index abff2ec923ddd..0d81f023544d4 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/utils/cache/RefCountedCacheTestCase.java +++ b/server/src/test/java/org/opensearch/index/store/remote/utils/cache/RefCountedCacheTestCase.java @@ -27,22 +27,23 @@ public void testBasicGetAndPutAndRemove() { assertNull(refCountedCache.get("1")); } + // fat rha h public void testUsageWithIncrementAndDecrement() { refCountedCache.put("1", 10L); - assertEquals(10L, refCountedCache.usage().usage()); - assertEquals(10L, refCountedCache.usage().activeUsage()); + assertEquals(10L, refCountedCache.usage()); + assertEquals(10L, refCountedCache.activeUsage()); assertNotNull(refCountedCache.getRef("1")); assertEquals(1, (int) refCountedCache.getRef("1")); refCountedCache.decRef("1"); - assertEquals(10L, refCountedCache.usage().usage()); - assertEquals(0L, refCountedCache.usage().activeUsage()); + assertEquals(10L, refCountedCache.usage()); + assertEquals(0L, refCountedCache.activeUsage()); assertNotNull(refCountedCache.getRef("1")); assertEquals(0, (int) refCountedCache.getRef("1")); refCountedCache.incRef("1"); - assertEquals(10L, refCountedCache.usage().usage()); - assertEquals(10L, refCountedCache.usage().activeUsage()); + assertEquals(10L, refCountedCache.usage()); + assertEquals(10L, refCountedCache.activeUsage()); assertNotNull(refCountedCache.getRef("1")); assertEquals(1, (int) refCountedCache.getRef("1")); @@ -64,20 +65,20 @@ public void testEviction() { assertNotNull(refCountedCache.get("4")); assertNotNull(refCountedCache.get("5")); - assertEquals(75L, refCountedCache.usage().usage()); - assertEquals(75L, refCountedCache.usage().activeUsage()); + assertEquals(75L, refCountedCache.usage()); + assertEquals(75L, refCountedCache.activeUsage()); } public void testComputeRemoveWhenExists() { refCountedCache.put("1", 25L); refCountedCache.decRef("1"); - assertEquals(0, refCountedCache.usage().activeUsage()); - assertEquals(25L, refCountedCache.usage().usage()); + assertEquals(0, refCountedCache.activeUsage()); + assertEquals(25L, refCountedCache.usage()); assertNull(refCountedCache.compute("1", (k, v) -> null)); assertNull(refCountedCache.get("1")); - assertEquals(0, refCountedCache.usage().activeUsage()); - assertEquals(0L, refCountedCache.usage().usage()); + assertEquals(0, refCountedCache.activeUsage()); + assertEquals(0L, refCountedCache.usage()); } public void testComputeRemoveWhenNotExists() { @@ -120,8 +121,8 @@ public void testActiveUsageGreaterThanCapacity() { final String key = Integer.toString(i); refCountedCache.put(key, 25L); } - assertEquals(125L, refCountedCache.usage().usage()); - assertEquals(125L, refCountedCache.usage().activeUsage()); + assertEquals(125L, refCountedCache.usage()); + assertEquals(125L, refCountedCache.activeUsage()); } public void testReferenceCountingItemsThatDoNotExist() { @@ -132,8 +133,8 @@ public void testReferenceCountingItemsThatDoNotExist() { assertUsage(0, 0); refCountedCache.decRef("1"); assertNull(refCountedCache.get("1")); - assertEquals(0L, refCountedCache.usage().usage()); - assertEquals(0L, refCountedCache.usage().activeUsage()); + assertEquals(0L, refCountedCache.usage()); + assertEquals(0L, refCountedCache.activeUsage()); } public void testPrune() { @@ -226,13 +227,13 @@ public void testClear() { refCountedCache.put("1", 10L); refCountedCache.put("2", 10L); refCountedCache.put("3", 10L); - assertEquals(30L, refCountedCache.usage().usage()); + assertEquals(30L, refCountedCache.usage()); refCountedCache.clear(); - assertEquals(0L, refCountedCache.usage().usage()); + assertEquals(0L, refCountedCache.usage()); } private void assertUsage(long usage, long activeUsage) { - assertEquals(usage, refCountedCache.usage().usage()); - assertEquals(activeUsage, refCountedCache.usage().activeUsage()); + assertEquals(usage, refCountedCache.usage()); + assertEquals(activeUsage, refCountedCache.activeUsage()); } } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 63fba6726716a..aa30eda474ef1 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -195,6 +195,8 @@ import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats; +import org.opensearch.index.store.remote.filecache.AggregateFileCacheStats.FileCacheStatsType; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesModule; @@ -459,9 +461,17 @@ public void testSearchableSnapshotOverSubscription() { testClusterNodes.nodes.values().iterator().next().clusterService.state() ); - Map nodeFileCacheStats = new HashMap<>(); + Map nodeFileCacheStats = new HashMap<>(); for (TestClusterNodes.TestClusterNode node : testClusterNodes.nodes.values()) { - nodeFileCacheStats.put(node.node.getId(), new FileCacheStats(0, 1, 0, 0, 0, 0, 0)); + nodeFileCacheStats.put( + node.node.getId(), + new AggregateFileCacheStats( + 0, + new FileCacheStats(1, 0, 0, 0, 0, 0, FileCacheStatsType.OVER_ALL_STATS), + new FileCacheStats(0, 0, 0, 0, 0, 0, FileCacheStatsType.FULL_FILE_STATS), + new FileCacheStats(1, 0, 0, 0, 0, 0, FileCacheStatsType.BLOCK_FILE_STATS) + ) + ); } ClusterInfo clusterInfo = new ClusterInfo(Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), nodeFileCacheStats); testClusterNodes.nodes.values().forEach(node -> when(node.getMockClusterInfoService().getClusterInfo()).thenReturn(clusterInfo));