Skip to content

Commit

Permalink
#12681 atomically retain the buffer with a check that it still is in …
Browse files Browse the repository at this point in the history
…the cache, otherwise retain fails + remove single-threaded shrinking

Signed-off-by: Ludovic Orban <[email protected]>
  • Loading branch information
lorban committed Jan 15, 2025
1 parent 9e0da24 commit 19214f7
Show file tree
Hide file tree
Showing 2 changed files with 130 additions and 39 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ public class CachingHttpContentFactory implements HttpContent.Factory
private final HttpContent.Factory _authority;
private final ConcurrentHashMap<String, CachingHttpContent> _cache = new ConcurrentHashMap<>();
private final AtomicLong _cachedSize = new AtomicLong();
private final AtomicBoolean _shrinking = new AtomicBoolean();
private final ByteBufferPool _bufferPool;
private int _maxCachedFileSize = DEFAULT_MAX_CACHED_FILE_SIZE;
private int _maxCachedFiles = DEFAULT_MAX_CACHED_FILES;
Expand Down Expand Up @@ -149,46 +148,35 @@ public void setUseDirectByteBuffers(boolean useDirectByteBuffers)

private void shrinkCache()
{
// Only 1 thread shrinking at once
if (_shrinking.compareAndSet(false, true))
// While we need to shrink
int numCacheEntries = _cache.size();
while (numCacheEntries > 0 && (numCacheEntries > _maxCachedFiles || _cachedSize.get() > _maxCacheSize))
{
try
// Scan the entire cache and generate an ordered list by last accessed time.
SortedSet<CachingHttpContent> sorted = new TreeSet<>((c1, c2) ->
{
// While we need to shrink
int numCacheEntries = _cache.size();
while (numCacheEntries > 0 && (numCacheEntries > _maxCachedFiles || _cachedSize.get() > _maxCacheSize))
{
// Scan the entire cache and generate an ordered list by last accessed time.
SortedSet<CachingHttpContent> sorted = new TreeSet<>((c1, c2) ->
{
long delta = NanoTime.elapsed(c2.getLastAccessedNanos(), c1.getLastAccessedNanos());
if (delta != 0)
return delta < 0 ? -1 : 1;

delta = c1.getContentLengthValue() - c2.getContentLengthValue();
if (delta != 0)
return delta < 0 ? -1 : 1;

return c1.getKey().compareTo(c2.getKey());
});
sorted.addAll(_cache.values());

// TODO: Can we remove the buffers from the content before evicting.
// Invalidate least recently used first
for (CachingHttpContent content : sorted)
{
if (_cache.size() <= _maxCachedFiles && _cachedSize.get() <= _maxCacheSize)
break;
removeFromCache(content);
}

numCacheEntries = _cache.size();
}
}
finally
long delta = NanoTime.elapsed(c2.getLastAccessedNanos(), c1.getLastAccessedNanos());
if (delta != 0)
return delta < 0 ? -1 : 1;

delta = c1.getContentLengthValue() - c2.getContentLengthValue();
if (delta != 0)
return delta < 0 ? -1 : 1;

return c1.getKey().compareTo(c2.getKey());
});
sorted.addAll(_cache.values());

// TODO: Can we remove the buffers from the content before evicting.
// Invalidate least recently used first
for (CachingHttpContent content : sorted)
{
_shrinking.set(false);
if (_cache.size() <= _maxCachedFiles && _cachedSize.get() <= _maxCacheSize)
break;
removeFromCache(content);
}

numCacheEntries = _cache.size();
}
}

Expand Down Expand Up @@ -253,7 +241,6 @@ public HttpContent getContent(String path) throws IOException
if (!isCacheable(httpContent))
return httpContent;

// The re-mapping function may be run multiple times by compute.
AtomicBoolean added = new AtomicBoolean();
cachingHttpContent = _cache.computeIfAbsent(path, key ->
{
Expand Down Expand Up @@ -418,7 +405,12 @@ public String getKey()
@Override
public boolean retain()
{
return _referenceCount.tryRetain();
// Retain only if the content is still in the cache.
return _cache.computeIfPresent(_cacheKey, (s, cachingHttpContent) ->
{
_referenceCount.retain();
return cachingHttpContent;
}) != null;
}

@Override
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
//
// ========================================================================
// Copyright (c) 1995 Mort Bay Consulting Pty Ltd and others.
//
// This program and the accompanying materials are made available under the
// terms of the Eclipse Public License v. 2.0 which is available at
// https://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
// which is available at https://www.apache.org/licenses/LICENSE-2.0.
//
// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
// ========================================================================
//

package org.eclipse.jetty.http.content;

import java.io.ByteArrayOutputStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;

import org.eclipse.jetty.http.MimeTypes;
import org.eclipse.jetty.io.ArrayByteBufferPool;
import org.eclipse.jetty.io.ByteBufferPool;
import org.eclipse.jetty.toolchain.test.jupiter.WorkDir;
import org.eclipse.jetty.toolchain.test.jupiter.WorkDirExtension;
import org.eclipse.jetty.util.BufferUtil;
import org.eclipse.jetty.util.resource.ResourceFactory;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;

import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.is;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;

@ExtendWith(WorkDirExtension.class)
public class CachingHttpContentFactoryTest
{
public WorkDir workDir;
private ArrayByteBufferPool.Tracking trackingPool;
private ByteBufferPool.Sized sizedPool;

@BeforeEach
public void setUp()
{
trackingPool = new ArrayByteBufferPool.Tracking();
sizedPool = new ByteBufferPool.Sized(trackingPool);
}

@AfterEach
public void tearDown()
{
assertThat("Leaks: " + trackingPool.dumpLeaks(), trackingPool.getLeaks().size(), is(0));
}

@Test
public void testCannotRetainEvictedContent() throws Exception
{
Path file = Files.writeString(workDir.getEmptyPathDir().resolve("file.txt"), "0123456789abcdefghijABCDEFGHIJ");
ResourceHttpContentFactory resourceHttpContentFactory = new ResourceHttpContentFactory(ResourceFactory.root().newResource(file.getParent()), MimeTypes.DEFAULTS);
CachingHttpContentFactory cachingHttpContentFactory = new CachingHttpContentFactory(resourceHttpContentFactory, sizedPool);

CachingHttpContentFactory.CachingHttpContent content = (CachingHttpContentFactory.CachingHttpContent)cachingHttpContentFactory.getContent("file.txt");

// Empty the cache so 'content' gets released.
cachingHttpContentFactory.flushCache();

assertFalse(content.retain());

// Even if the content cannot be retained, whatever we got from cachingHttpContentFactory must be released.
content.release();
}

@Test
public void testRetainThenEvictContent() throws Exception
{
Path file = Files.writeString(workDir.getEmptyPathDir().resolve("file.txt"), "0123456789abcdefghijABCDEFGHIJ");
ResourceHttpContentFactory resourceHttpContentFactory = new ResourceHttpContentFactory(ResourceFactory.root().newResource(file.getParent()), MimeTypes.DEFAULTS);
CachingHttpContentFactory cachingHttpContentFactory = new CachingHttpContentFactory(resourceHttpContentFactory, sizedPool);

CachingHttpContentFactory.CachingHttpContent content = (CachingHttpContentFactory.CachingHttpContent)cachingHttpContentFactory.getContent("file.txt");

assertTrue(content.retain());

// Empty the cache so 'content' gets released.
cachingHttpContentFactory.flushCache();

ByteArrayOutputStream baos = new ByteArrayOutputStream();
BufferUtil.writeTo(content.getByteBuffer(), baos);
assertThat(baos.toString(StandardCharsets.UTF_8), is("0123456789abcdefghijABCDEFGHIJ"));
// Pair the explicit retain that succeeded.
content.release();

// Even if the content cannot be retained, whatever we got from cachingHttpContentFactory must be released.
content.release();
}
}

0 comments on commit 19214f7

Please sign in to comment.