|
| 1 | +// Licensed to the .NET Foundation under one or more agreements. |
| 2 | +// The .NET Foundation licenses this file to you under the MIT license. |
| 3 | + |
| 4 | +using System.Collections.Generic; |
| 5 | +using System.Diagnostics; |
| 6 | +using System.Runtime.CompilerServices; |
| 7 | +using System.Threading.Tasks; |
| 8 | + |
| 9 | +namespace System.Threading.RateLimiting |
| 10 | +{ |
| 11 | + /// <summary> |
| 12 | + /// Contains methods to assist with creating a <see cref="PartitionedRateLimiter{TResource}"/>. |
| 13 | + /// </summary> |
| 14 | + public static class PartitionedRateLimiter |
| 15 | + { |
| 16 | + /// <summary> |
| 17 | + /// Method used to create a default implementation of <see cref="PartitionedRateLimiter{TResource}"/>. |
| 18 | + /// </summary> |
| 19 | + /// <typeparam name="TResource">The resource type that is being rate limited.</typeparam> |
| 20 | + /// <typeparam name="TPartitionKey">The type to distinguish partitions with.</typeparam> |
| 21 | + /// <param name="partitioner">Method called every time an Acquire or WaitAsync call is made to figure out what rate limiter to apply to the request. |
| 22 | + /// If the <see cref="RateLimitPartition{TKey}.PartitionKey"/> matches a cached entry then the rate limiter previously used for that key is used. Otherwise, the factory is called to get a new rate limiter.</param> |
| 23 | + /// <param name="equalityComparer">Optional <see cref="IEqualityComparer{T}"/> to customize the comparison logic for <typeparamref name="TPartitionKey"/>.</param> |
| 24 | + /// <returns></returns> |
| 25 | + public static PartitionedRateLimiter<TResource> Create<TResource, TPartitionKey>( |
| 26 | + Func<TResource, RateLimitPartition<TPartitionKey>> partitioner, |
| 27 | + IEqualityComparer<TPartitionKey>? equalityComparer = null) where TPartitionKey : notnull |
| 28 | + { |
| 29 | + return new DefaultPartitionedRateLimiter<TResource, TPartitionKey>(partitioner, equalityComparer); |
| 30 | + } |
| 31 | + } |
| 32 | + |
| 33 | + internal sealed class DefaultPartitionedRateLimiter<TResource, TKey> : PartitionedRateLimiter<TResource> where TKey : notnull |
| 34 | + { |
| 35 | + private readonly Func<TResource, RateLimitPartition<TKey>> _partitioner; |
| 36 | + |
| 37 | + // TODO: Look at ConcurrentDictionary to try and avoid a global lock |
| 38 | + private Dictionary<TKey, Lazy<RateLimiter>> _limiters; |
| 39 | + private bool _disposed; |
| 40 | + private TaskCompletionSource<object?> _disposeComplete = new(TaskCreationOptions.RunContinuationsAsynchronously); |
| 41 | + |
| 42 | + // Used by the Timer to call TryRelenish on ReplenishingRateLimiters |
| 43 | + // We use a separate list to avoid running TryReplenish (which might be user code) inside our lock |
| 44 | + // And we cache the list to amortize the allocation cost to as close to 0 as we can get |
| 45 | + private List<Lazy<RateLimiter>> _cachedLimiters = new(); |
| 46 | + private bool _cacheInvalid; |
| 47 | + private TimerAwaitable _timer; |
| 48 | + private Task _timerTask; |
| 49 | + |
| 50 | + // Use the Dictionary as the lock field so we don't need to allocate another object for a lock and have another field in the object |
| 51 | + private object Lock => _limiters; |
| 52 | + |
| 53 | + public DefaultPartitionedRateLimiter(Func<TResource, RateLimitPartition<TKey>> partitioner, |
| 54 | + IEqualityComparer<TKey>? equalityComparer = null) |
| 55 | + { |
| 56 | + _limiters = new Dictionary<TKey, Lazy<RateLimiter>>(equalityComparer); |
| 57 | + _partitioner = partitioner; |
| 58 | + |
| 59 | + // TODO: Figure out what interval we should use |
| 60 | + _timer = new TimerAwaitable(TimeSpan.FromMilliseconds(100), TimeSpan.FromMilliseconds(100)); |
| 61 | + _timerTask = RunTimer(); |
| 62 | + } |
| 63 | + |
| 64 | + private async Task RunTimer() |
| 65 | + { |
| 66 | + _timer.Start(); |
| 67 | + while (await _timer) |
| 68 | + { |
| 69 | + try |
| 70 | + { |
| 71 | + Replenish(this); |
| 72 | + } |
| 73 | + // TODO: Can we log to EventSource or somewhere? Maybe dispatch throwing the exception so it is at least an unhandled exception? |
| 74 | + catch { } |
| 75 | + } |
| 76 | + _timer.Dispose(); |
| 77 | + } |
| 78 | + |
| 79 | + public override int GetAvailablePermits(TResource resourceID) |
| 80 | + { |
| 81 | + return GetRateLimiter(resourceID).GetAvailablePermits(); |
| 82 | + } |
| 83 | + |
| 84 | + protected override RateLimitLease AcquireCore(TResource resourceID, int permitCount) |
| 85 | + { |
| 86 | + return GetRateLimiter(resourceID).Acquire(permitCount); |
| 87 | + } |
| 88 | + |
| 89 | + protected override ValueTask<RateLimitLease> WaitAsyncCore(TResource resourceID, int permitCount, CancellationToken cancellationToken) |
| 90 | + { |
| 91 | + return GetRateLimiter(resourceID).WaitAsync(permitCount, cancellationToken); |
| 92 | + } |
| 93 | + |
| 94 | + private RateLimiter GetRateLimiter(TResource resourceID) |
| 95 | + { |
| 96 | + RateLimitPartition<TKey> partition = _partitioner(resourceID); |
| 97 | + Lazy<RateLimiter>? limiter; |
| 98 | + lock (Lock) |
| 99 | + { |
| 100 | + ThrowIfDisposed(); |
| 101 | + if (!_limiters.TryGetValue(partition.PartitionKey, out limiter)) |
| 102 | + { |
| 103 | + // Using Lazy avoids calling user code (partition.Factory) inside the lock |
| 104 | + limiter = new Lazy<RateLimiter>(() => partition.Factory(partition.PartitionKey)); |
| 105 | + _limiters.Add(partition.PartitionKey, limiter); |
| 106 | + // Cache is invalid now |
| 107 | + _cacheInvalid = true; |
| 108 | + } |
| 109 | + } |
| 110 | + return limiter.Value; |
| 111 | + } |
| 112 | + |
| 113 | + protected override void Dispose(bool disposing) |
| 114 | + { |
| 115 | + if (!disposing) |
| 116 | + { |
| 117 | + return; |
| 118 | + } |
| 119 | + |
| 120 | + bool alreadyDisposed = CommonDispose(); |
| 121 | + |
| 122 | + _timerTask.GetAwaiter().GetResult(); |
| 123 | + _cachedLimiters.Clear(); |
| 124 | + |
| 125 | + if (alreadyDisposed) |
| 126 | + { |
| 127 | + _disposeComplete.Task.GetAwaiter().GetResult(); |
| 128 | + return; |
| 129 | + } |
| 130 | + |
| 131 | + // Safe to access _limiters outside the lock |
| 132 | + // The timer is no longer running and _disposed is set so anyone trying to access fields will be checking that first |
| 133 | + foreach (KeyValuePair<TKey, Lazy<RateLimiter>> limiter in _limiters) |
| 134 | + { |
| 135 | + limiter.Value.Value.Dispose(); |
| 136 | + } |
| 137 | + _limiters.Clear(); |
| 138 | + _disposeComplete.TrySetResult(null); |
| 139 | + } |
| 140 | + |
| 141 | + protected override async ValueTask DisposeAsyncCore() |
| 142 | + { |
| 143 | + bool alreadyDisposed = CommonDispose(); |
| 144 | + |
| 145 | + await _timerTask.ConfigureAwait(false); |
| 146 | + _cachedLimiters.Clear(); |
| 147 | + |
| 148 | + if (alreadyDisposed) |
| 149 | + { |
| 150 | + await _disposeComplete.Task.ConfigureAwait(false); |
| 151 | + return; |
| 152 | + } |
| 153 | + |
| 154 | + foreach (KeyValuePair<TKey, Lazy<RateLimiter>> limiter in _limiters) |
| 155 | + { |
| 156 | + await limiter.Value.Value.DisposeAsync().ConfigureAwait(false); |
| 157 | + } |
| 158 | + _limiters.Clear(); |
| 159 | + _disposeComplete.TrySetResult(null); |
| 160 | + } |
| 161 | + |
| 162 | + // This handles the common state changes that Dispose and DisposeAsync need to do, the individual limiters still need to be Disposed after this call |
| 163 | + private bool CommonDispose() |
| 164 | + { |
| 165 | + lock (Lock) |
| 166 | + { |
| 167 | + if (_disposed) |
| 168 | + { |
| 169 | + return true; |
| 170 | + } |
| 171 | + _disposed = true; |
| 172 | + _timer.Stop(); |
| 173 | + } |
| 174 | + return false; |
| 175 | + } |
| 176 | + |
| 177 | + private void ThrowIfDisposed() |
| 178 | + { |
| 179 | + if (_disposed) |
| 180 | + { |
| 181 | + throw new ObjectDisposedException(nameof(PartitionedRateLimiter)); |
| 182 | + } |
| 183 | + } |
| 184 | + |
| 185 | + private static void Replenish(DefaultPartitionedRateLimiter<TResource, TKey> limiter) |
| 186 | + { |
| 187 | + lock (limiter.Lock) |
| 188 | + { |
| 189 | + if (limiter._disposed) |
| 190 | + { |
| 191 | + return; |
| 192 | + } |
| 193 | + |
| 194 | + // If the cache has been invalidated we need to recreate it |
| 195 | + if (limiter._cacheInvalid) |
| 196 | + { |
| 197 | + limiter._cachedLimiters.Clear(); |
| 198 | + bool cacheStillInvalid = false; |
| 199 | + foreach (KeyValuePair<TKey, Lazy<RateLimiter>> kvp in limiter._limiters) |
| 200 | + { |
| 201 | + if (kvp.Value.IsValueCreated) |
| 202 | + { |
| 203 | + if (kvp.Value.Value is ReplenishingRateLimiter) |
| 204 | + { |
| 205 | + limiter._cachedLimiters.Add(kvp.Value); |
| 206 | + } |
| 207 | + } |
| 208 | + else |
| 209 | + { |
| 210 | + // In rare cases the RateLimiter will be added to the storage but not be initialized yet |
| 211 | + // keep cache invalid if there was a non-initialized RateLimiter |
| 212 | + // the next time we run the timer the cache will be updated |
| 213 | + // with the initialized RateLimiter |
| 214 | + cacheStillInvalid = true; |
| 215 | + } |
| 216 | + } |
| 217 | + limiter._cacheInvalid = cacheStillInvalid; |
| 218 | + } |
| 219 | + } |
| 220 | + |
| 221 | + // cachedLimiters is safe to use outside the lock because it is only updated by the Timer |
| 222 | + // and the Timer avoids re-entrancy issues via the _executingTimer field |
| 223 | + foreach (Lazy<RateLimiter> rateLimiter in limiter._cachedLimiters) |
| 224 | + { |
| 225 | + Debug.Assert(rateLimiter.IsValueCreated && rateLimiter.Value is ReplenishingRateLimiter); |
| 226 | + ((ReplenishingRateLimiter)rateLimiter.Value).TryReplenish(); |
| 227 | + } |
| 228 | + } |
| 229 | + } |
| 230 | +} |
0 commit comments