Skip to content

Commit

Permalink
make locks specific to tasks (#1072)
Browse files Browse the repository at this point in the history
Before it was using synchronized and the locks were overly
broad. Now the locks are for a specific publish task. This
allows sending to LWC to happen concurrently with sending
to Atlas.
  • Loading branch information
brharrington authored Aug 14, 2023
1 parent ce22fc4 commit 73e5bab
Showing 1 changed file with 28 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@
import java.util.TreeMap;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;

Expand Down Expand Up @@ -93,6 +95,8 @@ public final class AtlasRegistry extends AbstractRegistry implements AutoCloseab
private long lastFlushTimestamp = -1L;
private final ConcurrentHashMap<Id, Consolidator> atlasMeasurements = new ConcurrentHashMap<>();

private final ConcurrentHashMap<String, Lock> publishTaskLocks = new ConcurrentHashMap<>();

/** Create a new instance. */
@Inject
public AtlasRegistry(Clock clock, AtlasConfig config) {
Expand Down Expand Up @@ -244,8 +248,24 @@ private Timer publishTaskTimer(String id) {
return debugRegistry.timer(PUBLISH_TASK_TIMER, "id", id);
}

synchronized void sendToAtlas() {
publishTaskTimer("sendToAtlas").record(() -> {
private void timePublishTask(String id, Runnable task) {
timePublishTask(id, id, task);
}

private void timePublishTask(String id, String lockName, Runnable task) {
publishTaskTimer(id).record(() -> {
Lock lock = publishTaskLocks.computeIfAbsent(lockName, n -> new ReentrantLock());
lock.lock();
try {
task.run();
} finally {
lock.unlock();
}
});
}

void sendToAtlas() {
timePublishTask("sendToAtlas", () -> {
if (config.enabled()) {
long t = lastCompletedTimestamp(stepMillis);
if (t > lastFlushTimestamp) {
Expand All @@ -272,8 +292,8 @@ synchronized void sendToAtlas() {
});
}

synchronized void sendToLWC() {
publishTaskTimer("sendToLWC").record(() -> {
void sendToLWC() {
timePublishTask("sendToLWC", () -> {
long t = lastCompletedTimestamp(lwcStepMillis);
//if (config.enabled() || config.lwcEnabled()) {
// If either are enabled we poll the meters for each step interval to flush the
Expand Down Expand Up @@ -306,8 +326,8 @@ synchronized void sendToLWC() {
}

/** Collect measurements from all the meters in the registry. */
synchronized void pollMeters(long t) {
publishTaskTimer("pollMeters").record(() -> {
void pollMeters(long t) {
timePublishTask("pollMeters", "atlasMeasurements", () -> {
if (t > lastPollTimestamp) {
MeasurementConsumer consumer = (id, timestamp, value) -> {
// Update the map for data to go to the Atlas storage layer
Expand Down Expand Up @@ -365,10 +385,10 @@ private void fetchSubscriptions() {
* Get a list of all consolidated measurements intended to be sent to Atlas and break them
* into batches.
*/
synchronized List<RollupPolicy.Result> getBatches(long t) {
List<RollupPolicy.Result> getBatches(long t) {
final int n = atlasMeasurements.size();
final List<RollupPolicy.Result> batches = new ArrayList<>(n / batchSize + 1);
publishTaskTimer("getBatches").record(() -> {
timePublishTask("getBatches", "atlasMeasurements", () -> {
debugRegistry.distributionSummary("spectator.registrySize").record(n);
List<Measurement> input = new ArrayList<>(n);
Iterator<Map.Entry<Id, Consolidator>> it = atlasMeasurements.entrySet().iterator();
Expand Down

0 comments on commit 73e5bab

Please sign in to comment.