-
Notifications
You must be signed in to change notification settings - Fork 25.4k
Add shard write-load to cluster info #131496
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
6d1093f
4fcea56
db99e30
b24aa23
62d47f2
529cba3
1bb0203
cd3169f
45020c5
c5ceb92
6f2ea34
7886659
cfe5759
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -41,7 +41,7 @@ | |
|
||
/** | ||
* ClusterInfo is an object representing a map of nodes to {@link DiskUsage} | ||
* and a map of shard ids to shard sizes, see | ||
* and a map of shard ids to shard sizes and shard write-loads, see | ||
* <code>InternalClusterInfoService.shardIdentifierFromRouting(String)</code> | ||
* for the key used in the shardSizes map | ||
*/ | ||
|
@@ -59,9 +59,10 @@ public class ClusterInfo implements ChunkedToXContent, Writeable { | |
final Map<NodeAndPath, ReservedSpace> reservedSpace; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The class comment further up ^ could use an update at this point. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Fixed in cfe5759 |
||
final Map<String, EstimatedHeapUsage> estimatedHeapUsages; | ||
final Map<String, NodeUsageStatsForThreadPools> nodeUsageStatsForThreadPools; | ||
final Map<ShardId, Double> shardWriteLoads; | ||
|
||
protected ClusterInfo() { | ||
this(Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), Map.of()); | ||
this(Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), Map.of()); | ||
} | ||
|
||
/** | ||
|
@@ -85,7 +86,8 @@ public ClusterInfo( | |
Map<NodeAndShard, String> dataPath, | ||
Map<NodeAndPath, ReservedSpace> reservedSpace, | ||
Map<String, EstimatedHeapUsage> estimatedHeapUsages, | ||
Map<String, NodeUsageStatsForThreadPools> nodeUsageStatsForThreadPools | ||
Map<String, NodeUsageStatsForThreadPools> nodeUsageStatsForThreadPools, | ||
Map<ShardId, Double> shardWriteLoads | ||
) { | ||
this.leastAvailableSpaceUsage = Map.copyOf(leastAvailableSpaceUsage); | ||
this.mostAvailableSpaceUsage = Map.copyOf(mostAvailableSpaceUsage); | ||
|
@@ -95,6 +97,7 @@ public ClusterInfo( | |
this.reservedSpace = Map.copyOf(reservedSpace); | ||
this.estimatedHeapUsages = Map.copyOf(estimatedHeapUsages); | ||
this.nodeUsageStatsForThreadPools = Map.copyOf(nodeUsageStatsForThreadPools); | ||
this.shardWriteLoads = Map.copyOf(shardWriteLoads); | ||
} | ||
|
||
public ClusterInfo(StreamInput in) throws IOException { | ||
|
@@ -116,6 +119,11 @@ public ClusterInfo(StreamInput in) throws IOException { | |
} else { | ||
this.nodeUsageStatsForThreadPools = Map.of(); | ||
} | ||
if (in.getTransportVersion().onOrAfter(TransportVersions.SHARD_WRITE_LOAD_IN_CLUSTER_INFO)) { | ||
this.shardWriteLoads = in.readImmutableMap(ShardId::new, StreamInput::readDouble); | ||
} else { | ||
this.shardWriteLoads = Map.of(); | ||
} | ||
} | ||
|
||
@Override | ||
|
@@ -136,6 +144,9 @@ public void writeTo(StreamOutput out) throws IOException { | |
if (out.getTransportVersion().onOrAfter(TransportVersions.NODE_USAGE_STATS_FOR_THREAD_POOLS_IN_CLUSTER_INFO)) { | ||
out.writeMap(this.nodeUsageStatsForThreadPools, StreamOutput::writeWriteable); | ||
} | ||
if (out.getTransportVersion().onOrAfter(TransportVersions.SHARD_WRITE_LOAD_IN_CLUSTER_INFO)) { | ||
out.writeMap(this.shardWriteLoads, StreamOutput::writeWriteable, StreamOutput::writeDouble); | ||
} | ||
} | ||
|
||
/** | ||
|
@@ -216,7 +227,7 @@ public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params | |
return builder.endObject(); // NodeAndPath | ||
}), | ||
endArray() // end "reserved_sizes" | ||
// NOTE: We don't serialize estimatedHeapUsages/nodeUsageStatsForThreadPools at this stage, to avoid | ||
// NOTE: We don't serialize estimatedHeapUsages/nodeUsageStatsForThreadPools/shardWriteLoads at this stage, to avoid | ||
// committing to API payloads until the features are settled | ||
); | ||
} | ||
|
@@ -255,6 +266,16 @@ public Map<String, DiskUsage> getNodeMostAvailableDiskUsages() { | |
return this.mostAvailableSpaceUsage; | ||
} | ||
|
||
/** | ||
* Returns a map of shard IDs to the write-loads for use in balancing. The write-loads can be interpreted | ||
* as the average number of threads that ingestion to the shard will consume. | ||
* This information may be partial or missing altogether under some circumstances. The absence of a shard | ||
* write load from the map should be interpreted as "unknown". | ||
*/ | ||
public Map<ShardId, Double> getShardWriteLoads() { | ||
return shardWriteLoads; | ||
} | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. update the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't like that we add equals and hashCode just for testing, but I updated them in cd3169f |
||
/** | ||
* Returns the shard size for the given shardId or <code>null</code> if that metric is not available. | ||
*/ | ||
|
@@ -331,7 +352,9 @@ public boolean equals(Object o) { | |
&& shardDataSetSizes.equals(that.shardDataSetSizes) | ||
&& dataPath.equals(that.dataPath) | ||
&& reservedSpace.equals(that.reservedSpace) | ||
&& nodeUsageStatsForThreadPools.equals(that.nodeUsageStatsForThreadPools); | ||
&& estimatedHeapUsages.equals(that.estimatedHeapUsages) | ||
&& nodeUsageStatsForThreadPools.equals(that.nodeUsageStatsForThreadPools) | ||
&& shardWriteLoads.equals(that.shardWriteLoads); | ||
} | ||
|
||
@Override | ||
|
@@ -343,7 +366,9 @@ public int hashCode() { | |
shardDataSetSizes, | ||
dataPath, | ||
reservedSpace, | ||
nodeUsageStatsForThreadPools | ||
estimatedHeapUsages, | ||
nodeUsageStatsForThreadPools, | ||
shardWriteLoads | ||
); | ||
} | ||
|
||
|
@@ -466,6 +491,7 @@ public static class Builder { | |
private Map<NodeAndPath, ReservedSpace> reservedSpace = Map.of(); | ||
private Map<String, EstimatedHeapUsage> estimatedHeapUsages = Map.of(); | ||
private Map<String, NodeUsageStatsForThreadPools> nodeUsageStatsForThreadPools = Map.of(); | ||
private Map<ShardId, Double> shardWriteLoads = Map.of(); | ||
|
||
public ClusterInfo build() { | ||
return new ClusterInfo( | ||
|
@@ -476,7 +502,8 @@ public ClusterInfo build() { | |
dataPath, | ||
reservedSpace, | ||
estimatedHeapUsages, | ||
nodeUsageStatsForThreadPools | ||
nodeUsageStatsForThreadPools, | ||
shardWriteLoads | ||
); | ||
} | ||
|
||
|
@@ -519,5 +546,10 @@ public Builder nodeUsageStatsForThreadPools(Map<String, NodeUsageStatsForThreadP | |
this.nodeUsageStatsForThreadPools = nodeUsageStatsForThreadPools; | ||
return this; | ||
} | ||
|
||
public Builder shardWriteLoads(Map<ShardId, Double> shardWriteLoads) { | ||
this.shardWriteLoads = shardWriteLoads; | ||
return this; | ||
} | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -159,7 +159,8 @@ public ClusterInfo getClusterInfo() { | |
dataPath, | ||
Map.of(), | ||
estimatedHeapUsages, | ||
nodeThreadPoolUsageStats | ||
nodeThreadPoolUsageStats, | ||
allocation.clusterInfo().getShardWriteLoads() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should this be saved as a private variable initialized in the ClusterInfoSimulator constructor? Similar to estimatedHeapUsages and nodeThreadPoolUsageStats There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No we won't use it at this level in the |
||
); | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -38,6 +38,7 @@ | |
import org.elasticsearch.common.unit.ByteSizeValue; | ||
import org.elasticsearch.common.util.concurrent.EsExecutors; | ||
import org.elasticsearch.core.TimeValue; | ||
import org.elasticsearch.index.shard.IndexingStats; | ||
import org.elasticsearch.index.shard.ShardId; | ||
import org.elasticsearch.index.store.StoreStats; | ||
import org.elasticsearch.threadpool.ThreadPool; | ||
|
@@ -215,7 +216,7 @@ void execute() { | |
logger.trace("starting async refresh"); | ||
|
||
try (var ignoredRefs = fetchRefs) { | ||
maybeFetchIndicesStats(diskThresholdEnabled); | ||
maybeFetchIndicesStats(diskThresholdEnabled || writeLoadConstraintEnabled == WriteLoadDeciderStatus.ENABLED); | ||
maybeFetchNodeStats(diskThresholdEnabled || estimatedHeapThresholdEnabled); | ||
maybeFetchNodesEstimatedHeapUsage(estimatedHeapThresholdEnabled); | ||
maybeFetchNodesUsageStatsForThreadPools(writeLoadConstraintEnabled); | ||
|
@@ -301,7 +302,14 @@ public void onFailure(Exception e) { | |
private void fetchIndicesStats() { | ||
final IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); | ||
indicesStatsRequest.clear(); | ||
indicesStatsRequest.store(true); | ||
if (diskThresholdEnabled) { | ||
// This returns the shard sizes on disk | ||
indicesStatsRequest.store(true); | ||
} | ||
if (writeLoadConstraintEnabled == WriteLoadDeciderStatus.ENABLED) { | ||
// This returns the shard write-loads | ||
indicesStatsRequest.indexing(true); | ||
} | ||
indicesStatsRequest.indicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_CLOSED_HIDDEN); | ||
indicesStatsRequest.timeout(fetchTimeout); | ||
client.admin() | ||
|
@@ -350,13 +358,15 @@ public void onResponse(IndicesStatsResponse indicesStatsResponse) { | |
} | ||
|
||
final ShardStats[] stats = indicesStatsResponse.getShards(); | ||
final Map<ShardId, Double> shardWriteLoadByIdentifierBuilder = new HashMap<>(); | ||
final Map<String, Long> shardSizeByIdentifierBuilder = new HashMap<>(); | ||
final Map<ShardId, Long> shardDataSetSizeBuilder = new HashMap<>(); | ||
final Map<ClusterInfo.NodeAndShard, String> dataPath = new HashMap<>(); | ||
final Map<ClusterInfo.NodeAndPath, ClusterInfo.ReservedSpace.Builder> reservedSpaceBuilders = | ||
new HashMap<>(); | ||
buildShardLevelInfo( | ||
adjustShardStats(stats), | ||
shardWriteLoadByIdentifierBuilder, | ||
shardSizeByIdentifierBuilder, | ||
shardDataSetSizeBuilder, | ||
dataPath, | ||
|
@@ -370,7 +380,8 @@ public void onResponse(IndicesStatsResponse indicesStatsResponse) { | |
Map.copyOf(shardSizeByIdentifierBuilder), | ||
Map.copyOf(shardDataSetSizeBuilder), | ||
Map.copyOf(dataPath), | ||
Map.copyOf(reservedSpace) | ||
Map.copyOf(reservedSpace), | ||
Map.copyOf(shardWriteLoadByIdentifierBuilder) | ||
); | ||
} | ||
|
||
|
@@ -527,8 +538,6 @@ public ClusterInfo getClusterInfo() { | |
estimatedHeapUsages.put(nodeId, new EstimatedHeapUsage(nodeId, maxHeapSize.getBytes(), estimatedHeapUsage)); | ||
} | ||
}); | ||
final Map<String, NodeUsageStatsForThreadPools> nodeThreadPoolUsageStats = new HashMap<>(); | ||
nodeThreadPoolUsageStatsPerNode.forEach((nodeId, nodeWriteLoad) -> { nodeThreadPoolUsageStats.put(nodeId, nodeWriteLoad); }); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This appeared to be just a copy, which already happens in the |
||
return new ClusterInfo( | ||
leastAvailableSpaceUsages, | ||
mostAvailableSpaceUsages, | ||
|
@@ -537,7 +546,8 @@ public ClusterInfo getClusterInfo() { | |
indicesStatsSummary.dataPath, | ||
indicesStatsSummary.reservedSpace, | ||
estimatedHeapUsages, | ||
nodeThreadPoolUsageStats | ||
nodeThreadPoolUsageStatsPerNode, | ||
indicesStatsSummary.shardWriteLoads() | ||
); | ||
} | ||
|
||
|
@@ -567,6 +577,7 @@ public void addListener(Consumer<ClusterInfo> clusterInfoConsumer) { | |
|
||
static void buildShardLevelInfo( | ||
ShardStats[] stats, | ||
Map<ShardId, Double> shardWriteLoads, | ||
Map<String, Long> shardSizes, | ||
Map<ShardId, Long> shardDataSetSizeBuilder, | ||
Map<ClusterInfo.NodeAndShard, String> dataPathByShard, | ||
|
@@ -577,25 +588,31 @@ static void buildShardLevelInfo( | |
dataPathByShard.put(ClusterInfo.NodeAndShard.from(shardRouting), s.getDataPath()); | ||
|
||
final StoreStats storeStats = s.getStats().getStore(); | ||
if (storeStats == null) { | ||
continue; | ||
} | ||
final long size = storeStats.sizeInBytes(); | ||
final long dataSetSize = storeStats.totalDataSetSizeInBytes(); | ||
final long reserved = storeStats.reservedSizeInBytes(); | ||
|
||
final String shardIdentifier = ClusterInfo.shardIdentifierFromRouting(shardRouting); | ||
logger.trace("shard: {} size: {} reserved: {}", shardIdentifier, size, reserved); | ||
shardSizes.put(shardIdentifier, size); | ||
if (dataSetSize > shardDataSetSizeBuilder.getOrDefault(shardRouting.shardId(), -1L)) { | ||
shardDataSetSizeBuilder.put(shardRouting.shardId(), dataSetSize); | ||
if (storeStats != null) { | ||
final long size = storeStats.sizeInBytes(); | ||
final long dataSetSize = storeStats.totalDataSetSizeInBytes(); | ||
final long reserved = storeStats.reservedSizeInBytes(); | ||
|
||
final String shardIdentifier = ClusterInfo.shardIdentifierFromRouting(shardRouting); | ||
logger.trace("shard: {} size: {} reserved: {}", shardIdentifier, size, reserved); | ||
shardSizes.put(shardIdentifier, size); | ||
if (dataSetSize > shardDataSetSizeBuilder.getOrDefault(shardRouting.shardId(), -1L)) { | ||
shardDataSetSizeBuilder.put(shardRouting.shardId(), dataSetSize); | ||
} | ||
if (reserved != StoreStats.UNKNOWN_RESERVED_BYTES) { | ||
final ClusterInfo.ReservedSpace.Builder reservedSpaceBuilder = reservedSpaceByShard.computeIfAbsent( | ||
new ClusterInfo.NodeAndPath(shardRouting.currentNodeId(), s.getDataPath()), | ||
t -> new ClusterInfo.ReservedSpace.Builder() | ||
); | ||
reservedSpaceBuilder.add(shardRouting.shardId(), reserved); | ||
} | ||
} | ||
if (reserved != StoreStats.UNKNOWN_RESERVED_BYTES) { | ||
final ClusterInfo.ReservedSpace.Builder reservedSpaceBuilder = reservedSpaceByShard.computeIfAbsent( | ||
new ClusterInfo.NodeAndPath(shardRouting.currentNodeId(), s.getDataPath()), | ||
t -> new ClusterInfo.ReservedSpace.Builder() | ||
); | ||
reservedSpaceBuilder.add(shardRouting.shardId(), reserved); | ||
final IndexingStats indexingStats = s.getStats().getIndexing(); | ||
if (indexingStats != null) { | ||
final double shardWriteLoad = indexingStats.getTotal().getPeakWriteLoad(); | ||
if (shardWriteLoad > shardWriteLoads.getOrDefault(shardRouting.shardId(), -1.0)) { | ||
shardWriteLoads.put(shardRouting.shardId(), shardWriteLoad); | ||
} | ||
} | ||
} | ||
} | ||
|
@@ -623,9 +640,10 @@ private record IndicesStatsSummary( | |
Map<String, Long> shardSizes, | ||
Map<ShardId, Long> shardDataSetSizes, | ||
Map<ClusterInfo.NodeAndShard, String> dataPath, | ||
Map<ClusterInfo.NodeAndPath, ClusterInfo.ReservedSpace> reservedSpace | ||
Map<ClusterInfo.NodeAndPath, ClusterInfo.ReservedSpace> reservedSpace, | ||
Map<ShardId, Double> shardWriteLoads | ||
) { | ||
static final IndicesStatsSummary EMPTY = new IndicesStatsSummary(Map.of(), Map.of(), Map.of(), Map.of()); | ||
static final IndicesStatsSummary EMPTY = new IndicesStatsSummary(Map.of(), Map.of(), Map.of(), Map.of(), Map.of()); | ||
} | ||
|
||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
shouldn't this assert be per index? Since all the indices received writes.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
No we insert between 1 and 500 documents in indices with between 1 and 3 shards, so it's possible some shards will not be written to. This is just a sanity check anyhow.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't think I follow. Some shards won't receive writes, I agree. But all indices will have writes (to at least one shard), and thus maximumLoadRecorded would have a value per index.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Oh sorry, you're right. For some reason I read that as per shard. Indeed it could be tightened up to be per index. I'll put up a small PR to do that.