{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE FunctionalDependencies #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE InstanceSigs #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE TypeApplications #-}
{-# LANGUAGE GADTs #-}

{-|
Module      : Keter.RateLimiter.Cache
Description : Cache abstraction and in-memory store for rate limiting, with convenient and customisable key management
Copyright   : (c) 2025 Oleksandr Zhabenko
License     : MIT
Maintainer  : oleksandr.zhabenko@yahoo.com
Stability   : stable
Portability : portable

This file is a ported to Haskell language code with some simplifications of rack-attack
https://github.com/rack/rack-attack/blob/main/lib/rack/attack/cache.rb
and is based on the structure of the original code of
rack-attack, Copyright (c) 2016 by Kickstarter, PBC, under the MIT License.

Oleksandr Zhabenko added several implementations of the window algorithm: sliding window, token bucket window, leaky bucket window alongside with the initial count algorithm using AI chatbots. Also there is extended multiple IP zones and combined usage of the algorithms with convenient wrappers provided.

This implementation is released under the MIT License.

This module provides a unified cache abstraction layer that supports multiple
rate limiting algorithms and storage backends. It uses advanced Haskell type
system features including GADTs, DataKinds, and functional dependencies to
provide type-safe, algorithm-specific storage while maintaining a common interface.

== Architecture Overview

@
┌─────────────────┐    ┌──────────────────┐    ┌─────────────────┐
│   Algorithm     │    │   Cache Layer    │    │  Storage Backend│
│                 │    │                  │    │                 │
│ • FixedWindow   │◄───┤ • Type Safety    │◄───┤ • InMemoryStore │
│ • SlidingWindow │    │ • Key Prefixing  │    │ • Auto Purging  │
│ • TokenBucket   │    │ • Serialization  │    │ • STM Based     │
│ • LeakyBucket   │    │ • Error Handling │    │ • Thread Safe   │
│ • TinyLRU       │    │                  │    │                 │
└─────────────────┘    └──────────────────┘    └─────────────────┘
@

== Type-Level Algorithm Safety

The module uses DataKinds and GADTs to ensure compile-time type safety:

@
-- Algorithm types are promoted to type-level
data Algorithm = FixedWindow | TokenBucket | ...

-- Storage is parameterized by algorithm type
data InMemoryStore (a :: Algorithm) where
  CounterStore :: TVar (C.Cache Text Text) -> InMemoryStore 'FixedWindow
  TokenBucketStore :: TVar (StmMap.Map Text TokenBucketEntry) -> InMemoryStore 'TokenBucket
  -- ... other algorithms
@

This prevents runtime errors like trying to use token bucket operations on
a sliding window cache.

== Supported Algorithms

=== Fixed Window
* __Use Case__: Simple request counting per time window
* __Storage__: JSON-serialized counters with TTL
* __Performance__: O(1) read/write operations
* __Memory__: Minimal overhead, automatic expiration

=== Sliding Window  
* __Use Case__: Precise rate limiting with timestamp tracking
* __Storage__: Lists of request timestamps per key
* __Performance__: O(n) where n is requests in window
* __Memory__: Proportional to request frequency

=== Token Bucket
* __Use Case__: Bursty traffic with sustained rate limits
* __Storage__: Worker threads with STM-based state
* __Performance__: O(1) with background token refill
* __Memory__: Fixed per bucket, automatic cleanup

=== Leaky Bucket
* __Use Case__: Smooth rate limiting without bursts
* __Storage__: Continuous drain workers with STM state
* __Performance__: O(1) with background draining
* __Memory__: Fixed per bucket, automatic cleanup

=== TinyLRU
* __Use Case__: Bounded cache with LRU eviction
* __Storage__: In-memory LRU cache with expiration
* __Performance__: O(1) average case operations
* __Memory__: Bounded by cache size limit

== Example Usage

=== Creating Algorithm-Specific Caches

@
import Keter.RateLimiter.Cache
import Data.Proxy

-- Type-safe cache creation
tokenCache <- do
  store <- createInMemoryStore \@'TokenBucket
  return $ newCache TokenBucket store

fixedWindowCache <- do
  store <- createInMemoryStore \@'FixedWindow  
  return $ newCache FixedWindow store
@

=== Basic Operations

@
-- Write/Read operations (type-safe based on algorithm)
writeCache tokenCache "user123" initialTokenState 3600
maybeState <- readCache tokenCache "user123"

-- Increment operations for counter-based algorithms
newCount <- incrementCache fixedWindowCache "api_key" 60
@

=== Advanced Usage with Custom Keys

@
-- Composite key generation
let userKey = makeCacheKey "throttle1" TokenBucket "zone1" "user456"
writeCache tokenCache userKey state 7200

-- Cleanup and reset
cacheReset tokenCache  -- Clear all entries
clearInMemoryStore store  -- Direct store cleanup
@

== Thread Safety and Concurrency

All operations are thread-safe using Software Transactional Memory (STM):

* __Atomic Operations__: All read/write operations are atomic
* __Lock-Free__: No explicit locking, uses STM for coordination  
* __Concurrent Access__: Multiple threads can safely access same cache
* __Worker Threads__: Token/Leaky bucket algorithms use background workers
* __Auto-Purging__: Background threads clean up expired entries

== Performance Characteristics

=== Time Complexity
* __Fixed Window__: O(1) for all operations
* __Sliding Window__: O(n) for timestamp list operations
* __Token Bucket__: O(1) with background O(1) refill
* __Leaky Bucket__: O(1) with background O(1) drain
* __TinyLRU__: O(1) average case, O(n) worst case

=== Space Complexity
* __Fixed Window__: O(k) where k is number of active keys
* __Sliding Window__: O(k*n) where n is requests per window
* __Token Bucket__: O(k) with fixed per-bucket overhead
* __Leaky Bucket__: O(k) with fixed per-bucket overhead  
* __TinyLRU__: O(min(k, cache_size)) bounded by cache limit

== Error Handling

The module provides robust error handling:

* __Serialization Errors__: Graceful handling of JSON encode/decode failures
* __Type Safety__: Compile-time prevention of algorithm mismatches
* __Resource Cleanup__: Automatic cleanup of failed operations
* __Thread Exceptions__: Worker threads handle exceptions gracefully
-}
module Keter.RateLimiter.Cache
  ( -- * Core Types
    -- ** Algorithm Specification
    Algorithm(..)
  , Cache(..)
    -- ** Storage Abstraction
  , CacheStore(..)
  , InMemoryStore(..)
  , ResettableStore(..)
  , CreateStore(..)
    -- * Cache Operations  
    -- ** Basic Operations
  , readCache
  , writeCache
  , deleteCache
  , incrementCache
    -- ** Cache Management
  , newCache
  , createInMemoryStore
  , clearInMemoryStore
  , cacheReset
    -- * Utility Functions
    -- ** Key Management
  , algorithmPrefix
  , makeCacheKey
    -- ** Time Utilities
  , secondsToTimeSpec
    -- * Background Services
    -- ** Auto-Purging
  , startAutoPurge
  , startCustomPurgeTokenBucket
  , startCustomPurgeLeakyBucket
    -- ** Worker Threads
  , startTokenBucketWorker
  , startLeakyBucketWorker
    -- * Entry Creation
  , createTokenBucketEntry
  , createLeakyBucketEntry
    -- * Algorithm utilities
  , algoToText
  , parseAlgoText
  ) where

import Control.Concurrent.STM
import Control.Concurrent.MVar (putMVar, takeMVar, newMVar, readMVar, MVar)
import Control.Monad.IO.Class (MonadIO, liftIO)
import Control.Monad (void, forever)
import Control.Concurrent (forkIO, threadDelay)
import Data.Aeson (ToJSON, FromJSON, decodeStrict, encode)
import Data.Aeson.Types (Parser)
import qualified Data.ByteString.Lazy as LBS
import Data.Text (Text)
import qualified Data.Text as Tx (unpack, toLower)
import qualified Data.TinyLRU as TinyLRU
import Data.Text.Encoding (encodeUtf8, decodeUtf8')
import qualified Data.Cache as C
import System.Clock (TimeSpec(..), Clock(Monotonic), getTime, toNanoSecs)
import Data.Time.Clock.POSIX (getPOSIXTime)
import qualified StmContainers.Map as StmMap
import qualified Control.Concurrent.STM.TQueue as TQueue
import qualified Focus
import Keter.RateLimiter.Types (TokenBucketState(..), LeakyBucketState(..))
import Keter.RateLimiter.AutoPurge
import Keter.RateLimiter.TokenBucketWorker (startTokenBucketWorker)
import Data.Maybe (fromMaybe)

-- | Enumeration of supported rate limiting algorithms.
--
-- Each algorithm represents a different approach to rate limiting with distinct
-- characteristics, use cases, and performance profiles. The type is promoted
-- to the kind level using DataKinds for compile-time algorithm verification.
--
-- ==== Algorithm Characteristics Comparison
--
-- @
-- Algorithm      | Bursts | Precision | Memory    | Use Case
-- ---------------|--------|-----------|-----------|-------------------------
-- FixedWindow    | Yes    | Low       | Minimal   | Simple API rate limiting
-- SlidingWindow  | Smooth | High      | Variable  | Precise traffic shaping
-- TokenBucket    | Yes    | Medium    | Fixed     | Bursty API with sustained limits
-- LeakyBucket    | No     | High      | Fixed     | Smooth streaming/bandwidth
-- TinyLRU        | N/A    | N/A       | Bounded   | General caching with eviction
-- @
--
-- ==== Detailed Algorithm Descriptions
--
-- * __FixedWindow__: Divides time into fixed intervals, counts requests per interval
-- * __SlidingWindow__: Maintains precise timestamps, allows smooth rate distribution  
-- * __TokenBucket__: Accumulates tokens over time, consumes tokens per request
-- * __LeakyBucket__: Continuous leak rate, requests fill the bucket
-- * __TinyLRU__: Least-Recently-Used cache with size bounds and TTL
data Algorithm = FixedWindow | SlidingWindow | TokenBucket | LeakyBucket | TinyLRU
  deriving (Int -> Algorithm -> ShowS
[Algorithm] -> ShowS
Algorithm -> String
(Int -> Algorithm -> ShowS)
-> (Algorithm -> String)
-> ([Algorithm] -> ShowS)
-> Show Algorithm
forall a.
(Int -> a -> ShowS) -> (a -> String) -> ([a] -> ShowS) -> Show a
$cshowsPrec :: Int -> Algorithm -> ShowS
showsPrec :: Int -> Algorithm -> ShowS
$cshow :: Algorithm -> String
show :: Algorithm -> String
$cshowList :: [Algorithm] -> ShowS
showList :: [Algorithm] -> ShowS
Show, Algorithm -> Algorithm -> Bool
(Algorithm -> Algorithm -> Bool)
-> (Algorithm -> Algorithm -> Bool) -> Eq Algorithm
forall a. (a -> a -> Bool) -> (a -> a -> Bool) -> Eq a
$c== :: Algorithm -> Algorithm -> Bool
== :: Algorithm -> Algorithm -> Bool
$c/= :: Algorithm -> Algorithm -> Bool
/= :: Algorithm -> Algorithm -> Bool
Eq)

-- | Map each algorithm to its unique cache key prefix.
--
-- Prefixes prevent key collisions between different algorithms and provide
-- clear identification of cached data types. Each algorithm uses a distinct
-- namespace within the same storage backend.
--
-- ==== Prefix Usage Pattern
--
-- @
-- -- Fixed window counter for API key "abc123"
-- Key: "rate_limiter:api:abc123"
--
-- -- Token bucket state for user "user456"  
-- Key: "token_bucket:user:user456"
--
-- -- Sliding window timestamps for IP "192.168.1.1"
-- Key: "timestamps:ip:192.168.1.1"
-- @
--
-- ==== Example
--
-- @
-- ghci> algorithmPrefix TokenBucket
-- "token_bucket"
-- ghci> algorithmPrefix FixedWindow  
-- "rate_limiter"
-- @
algorithmPrefix :: Algorithm   -- ^ The rate limiting algorithm
                -> Text        -- ^ Corresponding cache key prefix
algorithmPrefix :: Algorithm -> Text
algorithmPrefix Algorithm
FixedWindow   = Text
"rate_limiter"
algorithmPrefix Algorithm
SlidingWindow = Text
"timestamps"
algorithmPrefix Algorithm
TokenBucket   = Text
"token_bucket"
algorithmPrefix Algorithm
LeakyBucket   = Text
"leaky_bucket"
algorithmPrefix Algorithm
TinyLRU       = Text
"tiny_lru"

-- | Cache wrapper that combines an algorithm specification with a storage backend.
--
-- The cache type provides a unified interface while maintaining algorithm-specific
-- behavior through the type system. It encapsulates both the algorithm logic
-- and the underlying storage implementation.
--
-- ==== Type Parameters
--
-- The 'store' parameter represents the storage backend type (e.g., 'InMemoryStore', Redis, etc.)
-- The algorithm is captured in the store type for type safety
--
-- ==== Example Usage
--
-- @
-- -- Create a token bucket cache
-- tokenStore <- createInMemoryStore \@'TokenBucket
-- let tokenCache = Cache TokenBucket tokenStore
--
-- -- Create a fixed window cache
-- counterStore <- createInMemoryStore \@'FixedWindow
-- let counterCache = Cache FixedWindow counterStore
-- @
data Cache store = Cache
  { forall store. Cache store -> Algorithm
cacheAlgorithm :: Algorithm  -- ^ The rate limiting algorithm this cache implements.
                                 --   Used for key prefixing and operation validation.
  , forall store. Cache store -> store
cacheStore :: store          -- ^ The underlying storage backend. Type determines
                                 --   supported operations and value types.
  }

-- | Typeclass abstracting cache storage backends with functional dependencies.
--
-- This typeclass provides a uniform interface for different storage implementations
-- while allowing each backend to specify its supported value types. The functional
-- dependency @store -> v@ ensures that each store type uniquely determines its
-- value type, providing additional type safety.
--
-- ==== Design Principles
--
-- * __Type Safety__: Functional dependencies prevent type mismatches
-- * __Flexibility__: Support for different storage backends (memory, Redis, etc.)
-- * __Performance__: Allow backend-specific optimizations
-- * __Consistency__: Uniform interface across all implementations
--
-- ==== Default Implementations
--
-- The typeclass provides sensible defaults for increment operations, but backends
-- can override for performance optimizations:
--
-- @
-- -- Default increment: read, modify, write
-- incStore store prefix key expires = do
--   mval <- readStore store prefix key
--   let newVal = maybe 1 (+1) mval
--   writeStore store prefix key newVal expires
--   return newVal
-- @
--
-- ==== Atomicity Guarantees
--
-- Implementations should provide atomicity guarantees appropriate for their
-- backend:
--
-- * __STM-based stores__: Full ACID transactions
-- * __Memory stores__: Process-level atomicity  
-- * __Distributed stores__: Network-level consistency
class MonadIO m => CacheStore store v m | store -> v where
  -- | Read a value from the store.
  --
  -- @
  -- result <- readStore store "rate_limiter" "api_key_123"
  -- -- result: Maybe Int (for counter-based algorithms)
  -- @
  readStore :: store    -- ^ Storage backend instance
            -> Text     -- ^ Key prefix (algorithm-specific)
            -> Text     -- ^ Full cache key
            -> m (Maybe v)  -- ^ Retrieved value, or Nothing if not found

  -- | Write a value to the store with expiration.
  --
  -- @
  -- writeStore store "token_bucket" "user_456" bucketState 3600
  -- -- Stores bucket state with 1-hour TTL
  -- @
  writeStore :: store   -- ^ Storage backend instance
             -> Text    -- ^ Key prefix (algorithm-specific)
             -> Text    -- ^ Full cache key
             -> v       -- ^ Value to store
             -> Int     -- ^ TTL in seconds
             -> m ()

  -- | Delete a key from the store.
  --
  -- @
  -- deleteStore store "timestamps" "ip_192_168_1_1"
  -- -- Removes sliding window timestamps for IP
  -- @
  deleteStore :: store  -- ^ Storage backend instance
              -> Text   -- ^ Key prefix (algorithm-specific)  
              -> Text   -- ^ Full cache key
              -> m ()

  -- | Atomically increment a numeric value.
  --
  -- Provides atomic increment-or-initialize semantics. If the key doesn't exist,
  -- initializes to 1. If it exists, increments by 1. Essential for counter-based
  -- rate limiting algorithms.
  --
  -- @
  -- newCount <- incStore store "rate_limiter" "api_throttle" 60
  -- -- Returns new count after increment, with 60-second TTL
  -- @
  incStore :: (FromJSON v, ToJSON v, Ord v, Num v) 
           => store     -- ^ Storage backend instance
           -> Text      -- ^ Key prefix (algorithm-specific)
           -> Text      -- ^ Full cache key  
           -> Int       -- ^ TTL in seconds for the incremented value
           -> m v       -- ^ New value after increment
  incStore store
store Text
prefix Text
key Int
expiresIn = do -- Default implementation
      Maybe v
mval <- store -> Text -> Text -> m (Maybe v)
forall store v (m :: * -> *).
CacheStore store v m =>
store -> Text -> Text -> m (Maybe v)
readStore store
store Text
prefix Text
key
      let newVal :: v
newVal = case Maybe v
mval of
            Maybe v
Nothing -> v
1
            Just v
v -> if v
v v -> v -> Bool
forall a. Ord a => a -> a -> Bool
<= v
0 then v
1 else v
v v -> v -> v
forall a. Num a => a -> a -> a
+ v
1
      store -> Text -> Text -> v -> Int -> m ()
forall store v (m :: * -> *).
CacheStore store v m =>
store -> Text -> Text -> v -> Int -> m ()
writeStore store
store Text
prefix Text
key v
newVal Int
expiresIn
      v -> m v
forall a. a -> m a
forall (m :: * -> *) a. Monad m => a -> m a
return v
newVal

-- | Typeclass for storage backends that support complete reset operations.
--
-- Provides a way to clear all data from a store, useful for testing,
-- maintenance, and emergency reset scenarios. Implementations should
-- ensure thread safety and atomic reset behavior.
--
-- ==== Use Cases
--
-- * __Testing__: Clean state between test runs
-- * __Maintenance__: Clear corrupted or stale data
-- * __Memory Management__: Recover from memory pressure
-- * __Configuration Changes__: Reset after algorithm parameter changes
--
-- ==== Example
--
-- @
-- -- Reset all rate limiting data
-- resetStore myTokenBucketStore
--
-- -- Reset entire cache
-- cacheReset myCache
-- @
class ResettableStore store where
  -- | Clear all entries from the store.
  --
  -- Should be atomic and thread-safe. After reset, the store should behave
  -- as if it were newly created.
  resetStore :: store -> IO ()

-- | Algorithm-parameterized in-memory storage using GADTs.
--
-- This type uses GADTs (Generalized Algebraic Data Types) to provide compile-time
-- guarantees that each algorithm uses appropriate storage structures. The phantom
-- type parameter ensures that token bucket operations can't be used on sliding
-- window stores, etc.
--
-- ==== GADT Benefits
--
-- * __Type Safety__: Prevents algorithm/storage mismatches at compile time
-- * __Performance__: Specialized storage for each algorithm's needs
-- * __Extensibility__: Easy to add new algorithms with appropriate storage
-- * __Documentation__: Types serve as executable documentation
--
-- ==== Storage Specialization
--
-- Each algorithm gets optimized storage:
--
-- * __Counters__: Simple key-value cache with TTL
-- * __Timestamps__: STM Map of timestamp lists  
-- * __Token Buckets__: STM Map of worker entries with background threads
-- * __Leaky Buckets__: STM Map with continuous drain workers
-- * __TinyLRU__: Bounded LRU cache with automatic eviction
--
-- ==== Memory Management
--
-- * Token and Leaky bucket stores include automatic purging
-- * TinyLRU provides bounded memory usage
-- * Counter stores use TTL-based expiration
-- * All stores support manual reset for cleanup
data InMemoryStore (a :: Algorithm) where
  -- | Counter-based storage for fixed window algorithm.
  --
  -- Uses 'Data.Cache' for automatic TTL-based expiration. Stores JSON-serialized
  -- counter values with precise expiration timing.
  CounterStore :: TVar (C.Cache Text Text) -> InMemoryStore 'FixedWindow

  -- | Timestamp list storage for sliding window algorithm.
  --
  -- Maintains lists of request timestamps per key. Enables precise rate
  -- calculation by examining timestamps within the sliding time window.
  TimestampStore :: TVar (StmMap.Map Text [Double]) -> InMemoryStore 'SlidingWindow

  -- | Token bucket entry storage with worker thread management.
  --
  -- Each entry includes bucket state, request queue, and worker synchronization.
  -- Automatically starts purge threads to clean up inactive buckets.
  TokenBucketStore :: TVar (StmMap.Map Text TokenBucketEntry) -> InMemoryStore 'TokenBucket

  -- | Leaky bucket entry storage with continuous drain workers.
  --
  -- Similar to token buckets but with continuous drain semantics. Each bucket
  -- has a worker thread that continuously drains at the specified rate.
  LeakyBucketStore :: TVar (StmMap.Map Text LeakyBucketEntry) -> InMemoryStore 'LeakyBucket

  -- | Bounded LRU cache storage.
  --
  -- Provides fixed-size cache with least-recently-used eviction. Suitable
  -- for scenarios where memory bounds are more important than precise rate limiting.
  TinyLRUStore :: TVar (TinyLRU.TinyLRUCache s) -> InMemoryStore 'TinyLRU

-- | Typeclass for creating algorithm-specific storage instances.
--
-- Uses type-level programming to ensure each algorithm gets appropriate storage.
-- The phantom type parameter prevents creation of incompatible store types.
--
-- ==== Type-Level Dispatch
--
-- @
-- -- Compiler ensures correct store type
-- tokenStore <- createStore \@'TokenBucket    -- Creates TokenBucketStore
-- counterStore <- createStore \@'FixedWindow  -- Creates CounterStore
-- 
-- -- This would be a compile error:
-- -- tokenStore <- createStore \@'SlidingWindow  -- Type mismatch!
-- @
--
-- ==== Automatic Services
--
-- Some storage types automatically start background services:
--
-- * __Token/Leaky buckets__: Auto-purge threads for inactive entries
-- * __Counter stores__: TTL-based expiration threads
-- * __Timestamp stores__: Manual cleanup required
-- * __TinyLRU__: Built-in eviction on size limits
class CreateStore (a :: Algorithm) where
  -- | Create a new storage instance for the specified algorithm.
  --
  -- Initializes all necessary data structures and background services.
  -- The created store is immediately ready for use.
  createStore :: IO (InMemoryStore a)

-- | Convert seconds to TimeSpec for use with Data.Cache.
--
-- Calculates an absolute future time by adding the specified duration
-- to the current monotonic time. Used for setting TTL values in cache
-- operations.
--
-- ==== Monotonic Time Benefits
--
-- * __Clock Adjustments__: Unaffected by system clock changes
-- * __Precision__: Nanosecond resolution for accurate timing
-- * __Performance__: Fast system call with minimal overhead
-- * __Reliability__: Guaranteed monotonic progression
--
-- ==== Example
--
-- @
-- -- Create 5-minute expiration time
-- expiryTime <- secondsToTimeSpec 300
-- 
-- -- Use with cache operations
-- C.insertSTM key value cache (Just expiryTime)
-- @
secondsToTimeSpec :: Int        -- ^ Duration in seconds from now
                  -> IO TimeSpec -- ^ Absolute future time for expiration
secondsToTimeSpec :: Int -> IO TimeSpec
secondsToTimeSpec Int
seconds = do
  TimeSpec
now <- Clock -> IO TimeSpec
getTime Clock
Monotonic
  TimeSpec -> IO TimeSpec
forall a. a -> IO a
forall (m :: * -> *) a. Monad m => a -> m a
return (TimeSpec -> IO TimeSpec) -> TimeSpec -> IO TimeSpec
forall a b. (a -> b) -> a -> b
$ TimeSpec
now TimeSpec -> TimeSpec -> TimeSpec
forall a. Num a => a -> a -> a
+ Int64 -> Int64 -> TimeSpec
TimeSpec (Int -> Int64
forall a b. (Integral a, Num b) => a -> b
fromIntegral Int
seconds) Int64
0

-- | Create store instances for each Algorithm.
instance CreateStore 'FixedWindow where
  createStore :: IO (InMemoryStore 'FixedWindow)
createStore = (TVar (Cache Text Text) -> InMemoryStore 'FixedWindow)
-> IO (InMemoryStore 'FixedWindow)
forall (a :: Algorithm).
(TVar (Cache Text Text) -> InMemoryStore a) -> IO (InMemoryStore a)
createStoreWith TVar (Cache Text Text) -> InMemoryStore 'FixedWindow
CounterStore

instance CreateStore 'SlidingWindow where
  createStore :: IO (InMemoryStore 'SlidingWindow)
createStore = do
    Map Text [Double]
emptyMap <- STM (Map Text [Double]) -> IO (Map Text [Double])
forall a. STM a -> IO a
atomically (STM (Map Text [Double])
forall key value. STM (Map key value)
StmMap.new :: STM (StmMap.Map Text [Double]))
    TVar (Map Text [Double])
tvar <- Map Text [Double] -> IO (TVar (Map Text [Double]))
forall a. a -> IO (TVar a)
newTVarIO Map Text [Double]
emptyMap
    InMemoryStore 'SlidingWindow -> IO (InMemoryStore 'SlidingWindow)
forall a. a -> IO a
forall (f :: * -> *) a. Applicative f => a -> f a
pure (InMemoryStore 'SlidingWindow -> IO (InMemoryStore 'SlidingWindow))
-> InMemoryStore 'SlidingWindow
-> IO (InMemoryStore 'SlidingWindow)
forall a b. (a -> b) -> a -> b
$ TVar (Map Text [Double]) -> InMemoryStore 'SlidingWindow
TimestampStore TVar (Map Text [Double])
tvar

instance CreateStore 'TokenBucket where
  createStore :: IO (InMemoryStore 'TokenBucket)
createStore = do
    Map Text TokenBucketEntry
emptyMap <- STM (Map Text TokenBucketEntry) -> IO (Map Text TokenBucketEntry)
forall a. STM a -> IO a
atomically (STM (Map Text TokenBucketEntry)
forall key value. STM (Map key value)
StmMap.new :: STM (StmMap.Map Text TokenBucketEntry))
    TVar (Map Text TokenBucketEntry)
tvar <- Map Text TokenBucketEntry -> IO (TVar (Map Text TokenBucketEntry))
forall a. a -> IO (TVar a)
newTVarIO Map Text TokenBucketEntry
emptyMap
    IO ThreadId -> IO ()
forall (f :: * -> *) a. Functor f => f a -> f ()
void (IO ThreadId -> IO ()) -> IO ThreadId -> IO ()
forall a b. (a -> b) -> a -> b
$ Map Text TokenBucketEntry -> Integer -> Integer -> IO ThreadId
startCustomPurgeTokenBucket Map Text TokenBucketEntry
emptyMap (Integer
60 :: Integer) (Integer
3600 :: Integer)
    InMemoryStore 'TokenBucket -> IO (InMemoryStore 'TokenBucket)
forall a. a -> IO a
forall (f :: * -> *) a. Applicative f => a -> f a
pure (InMemoryStore 'TokenBucket -> IO (InMemoryStore 'TokenBucket))
-> InMemoryStore 'TokenBucket -> IO (InMemoryStore 'TokenBucket)
forall a b. (a -> b) -> a -> b
$ TVar (Map Text TokenBucketEntry) -> InMemoryStore 'TokenBucket
TokenBucketStore TVar (Map Text TokenBucketEntry)
tvar

instance CreateStore 'LeakyBucket where
  createStore :: IO (InMemoryStore 'LeakyBucket)
createStore = do
    Map Text LeakyBucketEntry
emptyMap <- STM (Map Text LeakyBucketEntry) -> IO (Map Text LeakyBucketEntry)
forall a. STM a -> IO a
atomically (STM (Map Text LeakyBucketEntry)
forall key value. STM (Map key value)
StmMap.new :: STM (StmMap.Map Text LeakyBucketEntry))
    TVar (Map Text LeakyBucketEntry)
tvar <- Map Text LeakyBucketEntry -> IO (TVar (Map Text LeakyBucketEntry))
forall a. a -> IO (TVar a)
newTVarIO Map Text LeakyBucketEntry
emptyMap
    IO ThreadId -> IO ()
forall (f :: * -> *) a. Functor f => f a -> f ()
void (IO ThreadId -> IO ()) -> IO ThreadId -> IO ()
forall a b. (a -> b) -> a -> b
$ Map Text LeakyBucketEntry -> Integer -> Integer -> IO ThreadId
startCustomPurgeLeakyBucket Map Text LeakyBucketEntry
emptyMap (Integer
60 :: Integer) (Integer
3600 :: Integer)
    InMemoryStore 'LeakyBucket -> IO (InMemoryStore 'LeakyBucket)
forall a. a -> IO a
forall (f :: * -> *) a. Applicative f => a -> f a
pure (InMemoryStore 'LeakyBucket -> IO (InMemoryStore 'LeakyBucket))
-> InMemoryStore 'LeakyBucket -> IO (InMemoryStore 'LeakyBucket)
forall a b. (a -> b) -> a -> b
$ TVar (Map Text LeakyBucketEntry) -> InMemoryStore 'LeakyBucket
LeakyBucketStore TVar (Map Text LeakyBucketEntry)
tvar

instance CreateStore 'TinyLRU where
  createStore :: IO (InMemoryStore 'TinyLRU)
createStore = TVar (TinyLRUCache Any) -> InMemoryStore 'TinyLRU
forall s. TVar (TinyLRUCache s) -> InMemoryStore 'TinyLRU
TinyLRUStore (TVar (TinyLRUCache Any) -> InMemoryStore 'TinyLRU)
-> IO (TVar (TinyLRUCache Any)) -> IO (InMemoryStore 'TinyLRU)
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
<$> (STM (TVar (TinyLRUCache Any)) -> IO (TVar (TinyLRUCache Any))
forall a. STM a -> IO a
atomically (STM (TVar (TinyLRUCache Any)) -> IO (TVar (TinyLRUCache Any)))
-> STM (TVar (TinyLRUCache Any)) -> IO (TVar (TinyLRUCache Any))
forall a b. (a -> b) -> a -> b
$ TinyLRUCache Any -> STM (TVar (TinyLRUCache Any))
forall a. a -> STM (TVar a)
newTVar (TinyLRUCache Any -> STM (TVar (TinyLRUCache Any)))
-> STM (TinyLRUCache Any) -> STM (TVar (TinyLRUCache Any))
forall (m :: * -> *) a b. Monad m => (a -> m b) -> m a -> m b
=<< Int -> STM (TinyLRUCache Any)
forall s. Int -> STM (TinyLRUCache s)
TinyLRU.initTinyLRU Int
100)

createStoreWith :: (TVar (C.Cache Text Text) -> InMemoryStore a) -> IO (InMemoryStore a)
createStoreWith :: forall (a :: Algorithm).
(TVar (Cache Text Text) -> InMemoryStore a) -> IO (InMemoryStore a)
createStoreWith TVar (Cache Text Text) -> InMemoryStore a
mkStore = do
  Cache Text Text
rawCache <- Maybe TimeSpec -> IO (Cache Text Text)
forall k v. Maybe TimeSpec -> IO (Cache k v)
C.newCache Maybe TimeSpec
forall a. Maybe a
Nothing
  MVar Integer
purgeInterval <- Integer -> IO (MVar Integer)
forall a. a -> IO (MVar a)
newMVar (Integer
60 :: Integer) -- Purge every 60 seconds
  MVar ()
purgeSignal <- () -> IO (MVar ())
forall a. a -> IO (MVar a)
newMVar ()   -- Signal to trigger purge
  IO ThreadId -> IO ()
forall (f :: * -> *) a. Functor f => f a -> f ()
void (IO ThreadId -> IO ()) -> IO ThreadId -> IO ()
forall a b. (a -> b) -> a -> b
$ IO () -> IO ThreadId
forkIO (IO () -> IO ThreadId) -> IO () -> IO ThreadId
forall a b. (a -> b) -> a -> b
$ IO () -> IO ()
forall (f :: * -> *) a b. Applicative f => f a -> f b
forever (IO () -> IO ()) -> IO () -> IO ()
forall a b. (a -> b) -> a -> b
$ do
    MVar () -> IO ()
forall a. MVar a -> IO a
takeMVar MVar ()
purgeSignal
    Integer
interval <- MVar Integer -> IO Integer
forall a. MVar a -> IO a
readMVar MVar Integer
purgeInterval
    TimeSpec
startTime <- Clock -> IO TimeSpec
getTime Clock
Monotonic
    Cache Text Text -> IO ()
forall k v. (Eq k, Hashable k) => Cache k v -> IO ()
C.purgeExpired Cache Text Text
rawCache
    TimeSpec
endTime <- Clock -> IO TimeSpec
getTime Clock
Monotonic
    let elapsedMicros :: Integer
elapsedMicros = (TimeSpec -> Integer
toNanoSecs TimeSpec
endTime Integer -> Integer -> Integer
forall a. Num a => a -> a -> a
- TimeSpec -> Integer
toNanoSecs TimeSpec
startTime) Integer -> Integer -> Integer
forall a. Integral a => a -> a -> a
`div` Integer
1000
        remainingMicros :: Integer
remainingMicros = Integer -> Integer -> Integer
forall a. Ord a => a -> a -> a
max (Integer
0 :: Integer) (Integer
interval Integer -> Integer -> Integer
forall a. Num a => a -> a -> a
* Integer
1000000 Integer -> Integer -> Integer
forall a. Num a => a -> a -> a
- Integer
elapsedMicros)
    TimeSpec -> Integer -> MVar () -> IO ()
waitUntilNextPurge TimeSpec
startTime Integer
remainingMicros MVar ()
purgeSignal
  TVar (Cache Text Text)
tvar <- Cache Text Text -> IO (TVar (Cache Text Text))
forall a. a -> IO (TVar a)
newTVarIO Cache Text Text
rawCache
  InMemoryStore a -> IO (InMemoryStore a)
forall a. a -> IO a
forall (m :: * -> *) a. Monad m => a -> m a
return (InMemoryStore a -> IO (InMemoryStore a))
-> InMemoryStore a -> IO (InMemoryStore a)
forall a b. (a -> b) -> a -> b
$ TVar (Cache Text Text) -> InMemoryStore a
mkStore TVar (Cache Text Text)
tvar

-- | Wait until the next purge interval deterministically
waitUntilNextPurge :: TimeSpec -> Integer -> MVar () -> IO ()
waitUntilNextPurge :: TimeSpec -> Integer -> MVar () -> IO ()
waitUntilNextPurge TimeSpec
startTime Integer
remainingMicros MVar ()
purgeSignal = do
  TimeSpec
currentTime <- Clock -> IO TimeSpec
getTime Clock
Monotonic
  let elapsedMicros :: Integer
elapsedMicros = (TimeSpec -> Integer
toNanoSecs TimeSpec
currentTime Integer -> Integer -> Integer
forall a. Num a => a -> a -> a
- TimeSpec -> Integer
toNanoSecs TimeSpec
startTime) Integer -> Integer -> Integer
forall a. Integral a => a -> a -> a
`div` Integer
1000
  if Integer
elapsedMicros Integer -> Integer -> Bool
forall a. Ord a => a -> a -> Bool
>= Integer
remainingMicros
    then MVar () -> () -> IO ()
forall a. MVar a -> a -> IO ()
putMVar MVar ()
purgeSignal () -- Signal the next purge
    else do
      let sleepMicros :: Int
sleepMicros = Integer -> Int
forall a b. (Integral a, Num b) => a -> b
fromIntegral (Integer -> Integer -> Integer
forall a. Ord a => a -> a -> a
min Integer
remainingMicros (Int -> Integer
forall a. Integral a => a -> Integer
toInteger (Int
forall a. Bounded a => a
maxBound :: Int))) :: Int
      Int -> IO ()
threadDelay Int
sleepMicros -- Use threadDelay for the remaining time
      MVar () -> () -> IO ()
forall a. MVar a -> a -> IO ()
putMVar MVar ()
purgeSignal ()  -- Signal after waiting

-- | Create a new in-memory store for a specific rate-limiting algorithm.
--
-- This function provides a convenient, type-safe way to create algorithm-specific
-- storage. It uses TypeApplications to specify which algorithm's store to create,
-- ensuring compile-time correctness.
--
-- ==== Type Safety Example
--
-- @
-- -- These are all valid and type-safe:
-- tokenStore <- createInMemoryStore \@'TokenBucket
-- counterStore <- createInMemoryStore \@'FixedWindow
-- lruStore <- createInMemoryStore \@'TinyLRU
--
-- -- This would be a compile error (typo):
-- -- badStore <- createInMemoryStore \@'TokenBuckett  -- Not a valid algorithm
-- @
--
-- ==== Background Services
--
-- Some algorithms automatically start background services:
--
-- * __TokenBucket/LeakyBucket__: Purge threads for cleanup (60s interval, 1h TTL)
-- * __FixedWindow__: TTL-based expiration threads
-- * __SlidingWindow/TinyLRU__: No automatic background services
--
-- ==== Example Usage
--
-- @
-- import Data.Proxy
--
-- main = do
--   -- Create stores for different algorithms
--   tokenStore <- createInMemoryStore \@'TokenBucket
--   slidingStore <- createInMemoryStore \@'SlidingWindow
--   
--   -- Use in cache creation
--   let tokenCache = newCache TokenBucket tokenStore
--   let slidingCache = newCache SlidingWindow slidingStore
-- @
createInMemoryStore :: forall (a :: Algorithm). CreateStore a => IO (InMemoryStore a)
createInMemoryStore :: forall (a :: Algorithm). CreateStore a => IO (InMemoryStore a)
createInMemoryStore = forall (a :: Algorithm). CreateStore a => IO (InMemoryStore a)
createStore @a

-- | Create a new cache with a given Algorithm and store.
--
-- This is the primary constructor for cache instances. It combines an algorithm
-- specification with a storage backend to create a fully functional cache.
-- The algorithm parameter is used for key prefixing and operation validation.
--
-- ==== Design Rationale
--
-- * __Separation of Concerns__: Algorithm logic separate from storage implementation
-- * __Flexibility__: Same algorithm can use different storage backends
-- * __Type Safety__: Algorithm-store compatibility enforced by types
-- * __Testability__: Easy to mock storage for testing
--
-- ==== Example
--
-- @
-- -- Create a token bucket cache with in-memory storage
-- store <- createInMemoryStore \@'TokenBucket
-- let cache = newCache TokenBucket store
--
-- -- Later operations use the unified cache interface
-- writeCache cache "user123" initialState 3600
-- result <- readCache cache "user123"
-- @
newCache :: Algorithm   -- ^ The rate limiting algorithm this cache implements
         -> store       -- ^ The storage backend (must be compatible with algorithm)
         -> Cache store -- ^ Complete cache instance ready for use
newCache :: forall store. Algorithm -> store -> Cache store
newCache Algorithm
algo store
store = Cache
  { cacheAlgorithm :: Algorithm
cacheAlgorithm = Algorithm
algo
  , cacheStore :: store
cacheStore = store
store
  }

-- | Read from cache using the algorithm-prefixed key.
--
-- Automatically applies the appropriate key prefix based on the cache's algorithm,
-- then delegates to the storage backend's read operation. This ensures consistent
-- key namespacing across all cache operations.
--
-- ==== Key Transformation
--
-- @
-- -- For a TokenBucket cache with key "user123":
-- -- Actual key used: "token_bucket:token_bucket:user123"
-- --                   ^^^^^^^^^^^^  ^^^^^^^^^^^^^^^^^^^^
-- --                   prefix       prefixed key
-- @
--
-- ==== Type Safety
--
-- The return type is determined by the storage backend's CacheStore instance,
-- ensuring you get the correct value type for the algorithm:
--
-- * __FixedWindow__: @Maybe Int@ (counter values)
-- * __TokenBucket__: @Maybe TokenBucketState@ (bucket state)
-- * __SlidingWindow__: @Maybe [Double]@ (timestamp lists)
--
-- ==== Example
--
-- @
-- -- Read token bucket state
-- maybeState <- readCache tokenCache "user456"
-- case maybeState of
--   Nothing -> putStrLn "No bucket exists for user"
--   Just state -> putStrLn $ "User has " ++ show (tokens state) ++ " tokens"
-- @
readCache :: (CacheStore store v IO) 
          => Cache store  -- ^ Cache instance with algorithm and storage
          -> Text         -- ^ Unprefixed key (e.g., "user123", "api_key_abc")
          -> IO (Maybe v) -- ^ Retrieved value with algorithm-appropriate type
readCache :: forall store v.
CacheStore store v IO =>
Cache store -> Text -> IO (Maybe v)
readCache Cache store
cache Text
unprefixedKey =
  store -> Text -> Text -> IO (Maybe v)
forall store v (m :: * -> *).
CacheStore store v m =>
store -> Text -> Text -> m (Maybe v)
readStore (Cache store -> store
forall store. Cache store -> store
cacheStore Cache store
cache) (Algorithm -> Text
algorithmPrefix (Algorithm -> Text) -> Algorithm -> Text
forall a b. (a -> b) -> a -> b
$ Cache store -> Algorithm
forall store. Cache store -> Algorithm
cacheAlgorithm Cache store
cache)
            (Algorithm -> Text
algorithmPrefix (Cache store -> Algorithm
forall store. Cache store -> Algorithm
cacheAlgorithm Cache store
cache) Text -> Text -> Text
forall a. Semigroup a => a -> a -> a
<> Text
":" Text -> Text -> Text
forall a. Semigroup a => a -> a -> a
<> Text
unprefixedKey)

-- | Write to cache using the algorithm-prefixed key.
--
-- Stores a value with automatic key prefixing and TTL handling. The value type
-- must match what the storage backend expects for the cache's algorithm.
--
-- ==== TTL Behavior
--
-- * __Absolute TTL__: Expiration time calculated from current time
-- * __Background Cleanup__: Most stores have automatic cleanup threads
-- * __Precision__: Uses monotonic clock for accurate timing
-- * __Consistency__: TTL behavior consistent across all algorithms
--
-- ==== Example
--
-- @
-- -- Store token bucket state for 1 hour
-- let initialState = TokenBucketState 100 currentTime
-- writeCache tokenCache "new_user" initialState 3600
--
-- -- Store counter value for 5 minutes
-- writeCache counterCache "api_limit" (42 :: Int) 300
-- @
writeCache :: (CacheStore store v IO) 
           => Cache store  -- ^ Cache instance
           -> Text         -- ^ Unprefixed key
           -> v            -- ^ Value to store (type must match store's expectation)
           -> Int          -- ^ TTL in seconds (time until expiration)
           -> IO ()        -- ^ No return value, throws on error
writeCache :: forall store v.
CacheStore store v IO =>
Cache store -> Text -> v -> Int -> IO ()
writeCache Cache store
cache Text
unprefixedKey v
val Int
expiresIn =
  store -> Text -> Text -> v -> Int -> IO ()
forall store v (m :: * -> *).
CacheStore store v m =>
store -> Text -> Text -> v -> Int -> m ()
writeStore (Cache store -> store
forall store. Cache store -> store
cacheStore Cache store
cache) (Algorithm -> Text
algorithmPrefix (Algorithm -> Text) -> Algorithm -> Text
forall a b. (a -> b) -> a -> b
$ Cache store -> Algorithm
forall store. Cache store -> Algorithm
cacheAlgorithm Cache store
cache)
             (Algorithm -> Text
algorithmPrefix (Cache store -> Algorithm
forall store. Cache store -> Algorithm
cacheAlgorithm Cache store
cache) Text -> Text -> Text
forall a. Semigroup a => a -> a -> a
<> Text
":" Text -> Text -> Text
forall a. Semigroup a => a -> a -> a
<> Text
unprefixedKey) v
val Int
expiresIn

-- | Delete a key from cache using the algorithm-prefixed key.
--
-- Removes an entry from the cache, including any associated resources
-- (worker threads, background tasks, etc.). The operation is atomic
-- and thread-safe.
--
-- ==== Resource Cleanup
--
-- * __Token/Leaky Buckets__: Terminates associated worker threads
-- * __Counters__: Simple key removal
-- * __Timestamps__: Clears timestamp lists
-- * __LRU__: Updates LRU ordering and frees space
--
-- ==== Example
--
-- @
-- -- Remove user's rate limiting state
-- deleteCache tokenCache "inactive_user"
--
-- -- Clear API key's counter
-- deleteCache counterCache "expired_api_key"
-- @
deleteCache :: (CacheStore store v IO) 
            => Cache store  -- ^ Cache instance
            -> Text         -- ^ Unprefixed key to delete
            -> IO ()        -- ^ No return value, silent if key doesn't exist
deleteCache :: forall store v.
CacheStore store v IO =>
Cache store -> Text -> IO ()
deleteCache Cache store
cache Text
unprefixedKey =
  store -> Text -> Text -> IO ()
forall store v (m :: * -> *).
CacheStore store v m =>
store -> Text -> Text -> m ()
deleteStore (Cache store -> store
forall store. Cache store -> store
cacheStore Cache store
cache) (Algorithm -> Text
algorithmPrefix (Algorithm -> Text) -> Algorithm -> Text
forall a b. (a -> b) -> a -> b
$ Cache store -> Algorithm
forall store. Cache store -> Algorithm
cacheAlgorithm Cache store
cache)
              (Algorithm -> Text
algorithmPrefix (Cache store -> Algorithm
forall store. Cache store -> Algorithm
cacheAlgorithm Cache store
cache) Text -> Text -> Text
forall a. Semigroup a => a -> a -> a
<> Text
":" Text -> Text -> Text
forall a. Semigroup a => a -> a -> a
<> Text
unprefixedKey)

-- | Increment a numeric cache value or initialise it if missing.
--
-- Provides atomic increment-or-initialize semantics essential for counter-based
-- rate limiting. If the key doesn't exist, initializes to 1. If it exists,
-- increments by 1. The operation is atomic even under high concurrency.
--
-- ==== Atomicity Guarantees
--
-- * __STM-based stores__: Full transaction isolation
-- * __Memory stores__: Process-level atomicity
-- * __Distributed stores__: Backend-specific consistency
--
-- ==== Error Handling
--
-- * __Type Mismatch__: Returns error if existing value isn't numeric
-- * __Serialization__: Handles JSON encoding/decoding failures gracefully
-- * __Overflow__: Behavior depends on numeric type (Int, Double, etc.)
--
-- ==== Example
--
-- @
-- -- Increment API request counter
-- newCount <- incrementCache apiCache "requests_per_minute" 60
-- when (newCount > 1000) $ throwError "Rate limit exceeded"
--
-- -- Initialize or increment user action count
-- actionCount <- incrementCache userCache "daily_actions" 86400
-- @
incrementCache :: (CacheStore store v IO, FromJSON v, ToJSON v, Ord v, Num v) 
               => Cache store  -- ^ Cache instance (must support numeric values)
               -> Text         -- ^ Unprefixed key to increment
               -> Int          -- ^ TTL in seconds for the incremented value
               -> IO v         -- ^ New value after increment
incrementCache :: forall store v.
(CacheStore store v IO, FromJSON v, ToJSON v, Ord v, Num v) =>
Cache store -> Text -> Int -> IO v
incrementCache Cache store
cache Text
unprefixedKey Int
expiresIn = do
  let fullKey :: Text
fullKey = Algorithm -> Text
algorithmPrefix (Cache store -> Algorithm
forall store. Cache store -> Algorithm
cacheAlgorithm Cache store
cache) Text -> Text -> Text
forall a. Semigroup a => a -> a -> a
<> Text
":" Text -> Text -> Text
forall a. Semigroup a => a -> a -> a
<> Text
unprefixedKey
      prefix :: Text
prefix = Algorithm -> Text
algorithmPrefix (Cache store -> Algorithm
forall store. Cache store -> Algorithm
cacheAlgorithm Cache store
cache)
  store -> Text -> Text -> Int -> IO v
forall store v (m :: * -> *).
(CacheStore store v m, FromJSON v, ToJSON v, Ord v, Num v) =>
store -> Text -> Text -> Int -> m v
incStore (Cache store -> store
forall store. Cache store -> store
cacheStore Cache store
cache) Text
prefix Text
fullKey Int
expiresIn

-- | Clear all entries in an in-memory store.
--
-- Provides a direct interface to the ResettableStore functionality.
-- Useful when you need to reset storage without going through the
-- cache wrapper.
--
-- ==== Use Cases
--
-- * __Testing__: Clean slate between test cases
-- * __Maintenance__: Clear corrupted state
-- * __Memory Management__: Free up memory during low usage
-- * __Reconfiguration__: Reset before changing algorithm parameters
--
-- ==== Thread Safety
--
-- The operation is atomic and thread-safe, but concurrent operations
-- may see the reset at different times. Consider coordinating with
-- other threads if precise timing is required.
--
-- ==== Example
--
-- @
-- -- Direct store reset
-- clearInMemoryStore myTokenBucketStore
--
-- -- Conditional reset based on memory usage
-- when memoryPressure $ clearInMemoryStore store
-- @
clearInMemoryStore :: ResettableStore store 
                   => store   -- ^ Storage instance to clear
                   -> IO ()   -- ^ No return value, completes when reset is done
clearInMemoryStore :: forall store. ResettableStore store => store -> IO ()
clearInMemoryStore = store -> IO ()
forall store. ResettableStore store => store -> IO ()
resetStore

-- | Reset all entries in a cache.
--
-- Clears all data from the cache's storage backend. This is a convenience
-- wrapper around clearInMemoryStore that works at the cache level rather
-- than the storage level.
--
-- ==== Behavior
--
-- * __Complete Reset__: All keys and values are removed
-- * __Background Services__: Worker threads and purge threads continue running
-- * __Algorithm State__: Any algorithm-specific state is cleared
-- * __Immediate Effect__: Reset is visible to all threads immediately
--
-- ==== Example
--
-- @
-- -- Reset entire token bucket cache
-- cacheReset tokenBucketCache
--
-- -- Reset counter cache for new time period
-- cacheReset apiCounterCache
-- @
cacheReset :: ResettableStore store 
           => Cache store  -- ^ Cache instance to reset
           -> IO ()        -- ^ No return value, completes when reset is done
cacheReset :: forall store. ResettableStore store => Cache store -> IO ()
cacheReset (Cache Algorithm
_ store
store) = store -> IO ()
forall store. ResettableStore store => store -> IO ()
resetStore store
store

-- | Helper function to create a TokenBucketEntry with proper TMVar initialization.
--
-- Creates a complete token bucket entry with all necessary components:
-- state storage, request queue, and worker synchronization. This ensures
-- proper initialization of all STM components.
--
-- ==== Entry Components
--
-- * __State TVar__: Atomic storage for bucket state (tokens, last update)
-- * __Request Queue__: TQueue for client-worker communication
-- * __Worker Lock__: TMVar for coordinating worker thread lifecycle
--
-- ==== Example
--
-- @
-- -- Create entry for new user bucket
-- now <- floor <$> getPOSIXTime
-- let initialState = TokenBucketState 100 now  -- 100 tokens, current time
-- entry <- createTokenBucketEntry initialState
--
-- -- Entry is ready for insertion into STM map
-- atomically $ StmMap.insert entry "user123" bucketMap
-- @
createTokenBucketEntry :: TokenBucketState      -- ^ Initial bucket state (tokens, timestamp)
                       -> IO TokenBucketEntry   -- ^ Complete entry ready for use
createTokenBucketEntry :: TokenBucketState -> IO TokenBucketEntry
createTokenBucketEntry TokenBucketState
state = do
  TVar TokenBucketState
stateVar <- TokenBucketState -> IO (TVar TokenBucketState)
forall a. a -> IO (TVar a)
newTVarIO TokenBucketState
state
  TQueue (MVar Bool)
queue <- STM (TQueue (MVar Bool)) -> IO (TQueue (MVar Bool))
forall a. STM a -> IO a
atomically STM (TQueue (MVar Bool))
forall a. STM (TQueue a)
TQueue.newTQueue
  TMVar ()
workerLock <- STM (TMVar ()) -> IO (TMVar ())
forall a. STM a -> IO a
atomically STM (TMVar ())
forall a. STM (TMVar a)
newEmptyTMVar
  TokenBucketEntry -> IO TokenBucketEntry
forall a. a -> IO a
forall (m :: * -> *) a. Monad m => a -> m a
return (TokenBucketEntry -> IO TokenBucketEntry)
-> TokenBucketEntry -> IO TokenBucketEntry
forall a b. (a -> b) -> a -> b
$ TVar TokenBucketState
-> TQueue (MVar Bool) -> TMVar () -> TokenBucketEntry
TokenBucketEntry TVar TokenBucketState
stateVar TQueue (MVar Bool)
queue TMVar ()
workerLock

-- | Helper function to create a LeakyBucketEntry with proper TMVar initialization.
--
-- Similar to createTokenBucketEntry but for leaky bucket algorithm. Creates
-- all necessary STM components for leaky bucket operation with continuous
-- drain semantics.
--
-- ==== Entry Components
--
-- * __State TVar__: Atomic storage for bucket level and last update time
-- * __Request Queue__: TQueue using TMVar for STM-based responses
-- * __Worker Lock__: TMVar for worker thread coordination
--
-- ==== Example
--
-- @
-- -- Create entry for streaming connection
-- now <- realToFrac <$> getPOSIXTime
-- let initialState = LeakyBucketState 0.0 now  -- Empty bucket, current time
-- entry <- createLeakyBucketEntry initialState
--
-- -- Entry ready for continuous drain processing
-- atomically $ StmMap.insert entry "stream123" bucketMap
-- @
createLeakyBucketEntry :: LeakyBucketState      -- ^ Initial bucket state (level, timestamp)
                       -> IO LeakyBucketEntry   -- ^ Complete entry ready for use
createLeakyBucketEntry :: LeakyBucketState -> IO LeakyBucketEntry
createLeakyBucketEntry LeakyBucketState
state = do
  TVar LeakyBucketState
stateVar <- LeakyBucketState -> IO (TVar LeakyBucketState)
forall a. a -> IO (TVar a)
newTVarIO LeakyBucketState
state
  TQueue (TMVar Bool)
queue <- STM (TQueue (TMVar Bool)) -> IO (TQueue (TMVar Bool))
forall a. STM a -> IO a
atomically STM (TQueue (TMVar Bool))
forall a. STM (TQueue a)
TQueue.newTQueue
  TMVar ()
workerLock <- STM (TMVar ()) -> IO (TMVar ())
forall a. STM a -> IO a
atomically STM (TMVar ())
forall a. STM (TMVar a)
newEmptyTMVar
  LeakyBucketEntry -> IO LeakyBucketEntry
forall a. a -> IO a
forall (m :: * -> *) a. Monad m => a -> m a
return (LeakyBucketEntry -> IO LeakyBucketEntry)
-> LeakyBucketEntry -> IO LeakyBucketEntry
forall a b. (a -> b) -> a -> b
$ TVar LeakyBucketState
-> TQueue (TMVar Bool) -> TMVar () -> LeakyBucketEntry
LeakyBucketEntry TVar LeakyBucketState
stateVar TQueue (TMVar Bool)
queue TMVar ()
workerLock

-- | CacheStore instances

-- | CacheStore instance for FixedWindow algorithm using integer counters.
--
-- Implements counter-based rate limiting with JSON serialization and TTL support.
-- Uses Data.Cache for automatic expiration and efficient storage.
--
-- ==== Storage Format
--
-- * __Keys__: Text identifiers (API keys, user IDs, etc.)
-- * __Values__: JSON-serialized integers representing request counts
-- * __Expiration__: Automatic TTL-based cleanup
--
-- ==== Atomicity
--
-- Increment operations use STM for thread-safe atomic updates, ensuring
-- accurate counting even under high concurrency.
instance CacheStore (InMemoryStore 'FixedWindow) Int IO where
  readStore :: InMemoryStore 'FixedWindow -> Text -> Text -> IO (Maybe Int)
readStore (CounterStore TVar (Cache Text Text)
tvar) Text
_prefix Text
key = do
    Cache Text Text
cache <- TVar (Cache Text Text) -> IO (Cache Text Text)
forall a. TVar a -> IO a
readTVarIO TVar (Cache Text Text)
tvar
    Maybe Text
mval <- Cache Text Text -> Text -> IO (Maybe Text)
forall k v. (Eq k, Hashable k) => Cache k v -> k -> IO (Maybe v)
C.lookup Cache Text Text
cache Text
key
    case Maybe Text
mval of
      Maybe Text
Nothing -> Maybe Int -> IO (Maybe Int)
forall a. a -> IO a
forall (m :: * -> *) a. Monad m => a -> m a
return Maybe Int
forall a. Maybe a
Nothing
      Just Text
txt -> Maybe Int -> IO (Maybe Int)
forall a. a -> IO a
forall (m :: * -> *) a. Monad m => a -> m a
return (Maybe Int -> IO (Maybe Int)) -> Maybe Int -> IO (Maybe Int)
forall a b. (a -> b) -> a -> b
$ ByteString -> Maybe Int
forall a. FromJSON a => ByteString -> Maybe a
decodeStrict (Text -> ByteString
encodeUtf8 Text
txt)
  writeStore :: InMemoryStore 'FixedWindow -> Text -> Text -> Int -> Int -> IO ()
writeStore (CounterStore TVar (Cache Text Text)
tvar) Text
_prefix Text
key Int
val Int
expiresIn = do
    let bs :: ByteString
bs = Int -> ByteString
forall a. ToJSON a => a -> ByteString
encode Int
val
        strictBs :: ByteString
strictBs = ByteString -> ByteString
LBS.toStrict ByteString
bs
        jsonTxt :: Text
jsonTxt = case ByteString -> Either UnicodeException Text
decodeUtf8' ByteString
strictBs of
                    Left UnicodeException
_ -> Text
""
                    Right Text
txt -> Text
txt
    TimeSpec
expiryTimeSpec <- Int -> IO TimeSpec
secondsToTimeSpec Int
expiresIn
    STM () -> IO ()
forall a. STM a -> IO a
atomically (STM () -> IO ()) -> STM () -> IO ()
forall a b. (a -> b) -> a -> b
$ do
      Cache Text Text
cache <- TVar (Cache Text Text) -> STM (Cache Text Text)
forall a. TVar a -> STM a
readTVar TVar (Cache Text Text)
tvar
      Text -> Text -> Cache Text Text -> Maybe TimeSpec -> STM ()
forall k v.
(Eq k, Hashable k) =>
k -> v -> Cache k v -> Maybe TimeSpec -> STM ()
C.insertSTM Text
key Text
jsonTxt Cache Text Text
cache (TimeSpec -> Maybe TimeSpec
forall a. a -> Maybe a
Just TimeSpec
expiryTimeSpec)
  deleteStore :: InMemoryStore 'FixedWindow -> Text -> Text -> IO ()
deleteStore (CounterStore TVar (Cache Text Text)
tvar) Text
_prefix Text
key = do
    Cache Text Text
cache <- TVar (Cache Text Text) -> IO (Cache Text Text)
forall a. TVar a -> IO a
readTVarIO TVar (Cache Text Text)
tvar
    Cache Text Text -> Text -> IO ()
forall k v. (Eq k, Hashable k) => Cache k v -> k -> IO ()
C.delete Cache Text Text
cache Text
key
  incStore :: (FromJSON Int, ToJSON Int, Ord Int, Num Int) =>
InMemoryStore 'FixedWindow -> Text -> Text -> Int -> IO Int
incStore (CounterStore TVar (Cache Text Text)
tvar) Text
_prefix Text
key Int
expiresIn = do
    TimeSpec
now <- Clock -> IO TimeSpec
getTime Clock
Monotonic
    TimeSpec
expiryTimeSpec <- Int -> IO TimeSpec
secondsToTimeSpec Int
expiresIn
    STM Int -> IO Int
forall a. STM a -> IO a
atomically (STM Int -> IO Int) -> STM Int -> IO Int
forall a b. (a -> b) -> a -> b
$ do
        Cache Text Text
cache <- TVar (Cache Text Text) -> STM (Cache Text Text)
forall a. TVar a -> STM a
readTVar TVar (Cache Text Text)
tvar
        Maybe Text
mval <- Bool -> Text -> Cache Text Text -> TimeSpec -> STM (Maybe Text)
forall k v.
(Eq k, Hashable k) =>
Bool -> k -> Cache k v -> TimeSpec -> STM (Maybe v)
C.lookupSTM Bool
False Text
key Cache Text Text
cache TimeSpec
now
        let currentVal :: Int
currentVal = case Maybe Text
mval of
                           Maybe Text
Nothing -> Int
0
                           Just Text
txt -> Int -> Maybe Int -> Int
forall a. a -> Maybe a -> a
fromMaybe Int
0 (ByteString -> Maybe Int
forall a. FromJSON a => ByteString -> Maybe a
decodeStrict (Text -> ByteString
encodeUtf8 Text
txt))
        let newVal :: Int
newVal = Int
currentVal Int -> Int -> Int
forall a. Num a => a -> a -> a
+ Int
1
        let bs :: ByteString
bs = Int -> ByteString
forall a. ToJSON a => a -> ByteString
encode Int
newVal
            strictBs :: ByteString
strictBs = ByteString -> ByteString
LBS.toStrict ByteString
bs
            jsonTxt :: Text
jsonTxt = case ByteString -> Either UnicodeException Text
decodeUtf8' ByteString
strictBs of
                        Left UnicodeException
_ -> Text
""
                        Right Text
txt -> Text
txt
        Text -> Text -> Cache Text Text -> Maybe TimeSpec -> STM ()
forall k v.
(Eq k, Hashable k) =>
k -> v -> Cache k v -> Maybe TimeSpec -> STM ()
C.insertSTM Text
key Text
jsonTxt Cache Text Text
cache (TimeSpec -> Maybe TimeSpec
forall a. a -> Maybe a
Just TimeSpec
expiryTimeSpec)
        Int -> STM Int
forall a. a -> STM a
forall (m :: * -> *) a. Monad m => a -> m a
return Int
newVal

-- | CacheStore instance for SlidingWindow algorithm using timestamp lists.
--
-- Stores lists of request timestamps for precise sliding window calculations.
-- Does not use TTL as timestamps are managed by the sliding window logic.
instance CacheStore (InMemoryStore 'SlidingWindow) [Double] IO where
  readStore :: InMemoryStore 'SlidingWindow -> Text -> Text -> IO (Maybe [Double])
readStore (TimestampStore TVar (Map Text [Double])
tvar) Text
_prefix Text
key = STM (Maybe [Double]) -> IO (Maybe [Double])
forall a. STM a -> IO a
atomically (STM (Maybe [Double]) -> IO (Maybe [Double]))
-> STM (Maybe [Double]) -> IO (Maybe [Double])
forall a b. (a -> b) -> a -> b
$ do
    Map Text [Double]
stmMap <- TVar (Map Text [Double]) -> STM (Map Text [Double])
forall a. TVar a -> STM a
readTVar TVar (Map Text [Double])
tvar
    Text -> Map Text [Double] -> STM (Maybe [Double])
forall key value.
Hashable key =>
key -> Map key value -> STM (Maybe value)
StmMap.lookup Text
key Map Text [Double]
stmMap
  writeStore :: InMemoryStore 'SlidingWindow
-> Text -> Text -> [Double] -> Int -> IO ()
writeStore (TimestampStore TVar (Map Text [Double])
tvar) Text
_prefix Text
key [Double]
val Int
_expiresIn = STM () -> IO ()
forall a. STM a -> IO a
atomically (STM () -> IO ()) -> STM () -> IO ()
forall a b. (a -> b) -> a -> b
$ do
    Map Text [Double]
stmMap <- TVar (Map Text [Double]) -> STM (Map Text [Double])
forall a. TVar a -> STM a
readTVar TVar (Map Text [Double])
tvar
    [Double] -> Text -> Map Text [Double] -> STM ()
forall key value.
Hashable key =>
value -> key -> Map key value -> STM ()
StmMap.insert [Double]
val Text
key Map Text [Double]
stmMap
  deleteStore :: InMemoryStore 'SlidingWindow -> Text -> Text -> IO ()
deleteStore (TimestampStore TVar (Map Text [Double])
tvar) Text
_prefix Text
key = STM () -> IO ()
forall a. STM a -> IO a
atomically (STM () -> IO ()) -> STM () -> IO ()
forall a b. (a -> b) -> a -> b
$ do
    Map Text [Double]
stmMap <- TVar (Map Text [Double]) -> STM (Map Text [Double])
forall a. TVar a -> STM a
readTVar TVar (Map Text [Double])
tvar
    Text -> Map Text [Double] -> STM ()
forall key value. Hashable key => key -> Map key value -> STM ()
StmMap.delete Text
key Map Text [Double]
stmMap

-- | CacheStore instance for TokenBucket algorithm.
--
-- Provides access to token bucket state while maintaining the worker thread
-- infrastructure. Read operations return current bucket state, write operations
-- update state and manage worker lifecycle.
instance CacheStore (InMemoryStore 'TokenBucket) TokenBucketState IO where
  readStore :: InMemoryStore 'TokenBucket
-> Text -> Text -> IO (Maybe TokenBucketState)
readStore (TokenBucketStore TVar (Map Text TokenBucketEntry)
tvar) Text
_prefix Text
key = STM (Maybe TokenBucketState) -> IO (Maybe TokenBucketState)
forall a. STM a -> IO a
atomically (STM (Maybe TokenBucketState) -> IO (Maybe TokenBucketState))
-> STM (Maybe TokenBucketState) -> IO (Maybe TokenBucketState)
forall a b. (a -> b) -> a -> b
$ do
    Map Text TokenBucketEntry
stmMap <- TVar (Map Text TokenBucketEntry) -> STM (Map Text TokenBucketEntry)
forall a. TVar a -> STM a
readTVar TVar (Map Text TokenBucketEntry)
tvar
    Maybe TokenBucketEntry
mval <- Text -> Map Text TokenBucketEntry -> STM (Maybe TokenBucketEntry)
forall key value.
Hashable key =>
key -> Map key value -> STM (Maybe value)
StmMap.lookup Text
key Map Text TokenBucketEntry
stmMap
    case Maybe TokenBucketEntry
mval of
      Maybe TokenBucketEntry
Nothing -> Maybe TokenBucketState -> STM (Maybe TokenBucketState)
forall a. a -> STM a
forall (m :: * -> *) a. Monad m => a -> m a
return Maybe TokenBucketState
forall a. Maybe a
Nothing
      Just TokenBucketEntry
entry -> TokenBucketState -> Maybe TokenBucketState
forall a. a -> Maybe a
Just (TokenBucketState -> Maybe TokenBucketState)
-> STM TokenBucketState -> STM (Maybe TokenBucketState)
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
<$> TVar TokenBucketState -> STM TokenBucketState
forall a. TVar a -> STM a
readTVar (TokenBucketEntry -> TVar TokenBucketState
tbeState TokenBucketEntry
entry)
  writeStore :: InMemoryStore 'TokenBucket
-> Text -> Text -> TokenBucketState -> Int -> IO ()
writeStore (TokenBucketStore TVar (Map Text TokenBucketEntry)
tvar) Text
_prefix Text
key TokenBucketState
val Int
_expiresIn = do
    TokenBucketEntry
entry <- TokenBucketState -> IO TokenBucketEntry
createTokenBucketEntry TokenBucketState
val
    STM () -> IO ()
forall a. STM a -> IO a
atomically (STM () -> IO ()) -> STM () -> IO ()
forall a b. (a -> b) -> a -> b
$ do
      Map Text TokenBucketEntry
stmMap <- TVar (Map Text TokenBucketEntry) -> STM (Map Text TokenBucketEntry)
forall a. TVar a -> STM a
readTVar TVar (Map Text TokenBucketEntry)
tvar
      Focus TokenBucketEntry STM ()
-> Text -> Map Text TokenBucketEntry -> STM ()
forall key value result.
Hashable key =>
Focus value STM result -> key -> Map key value -> STM result
StmMap.focus ((TokenBucketEntry -> TVar TokenBucketState)
-> TokenBucketEntry
-> TokenBucketState
-> Focus TokenBucketEntry STM ()
forall entry v.
(entry -> TVar v) -> entry -> v -> Focus entry STM ()
focusInsertOrUpdate TokenBucketEntry -> TVar TokenBucketState
tbeState TokenBucketEntry
entry TokenBucketState
val) Text
key Map Text TokenBucketEntry
stmMap
  deleteStore :: InMemoryStore 'TokenBucket -> Text -> Text -> IO ()
deleteStore (TokenBucketStore TVar (Map Text TokenBucketEntry)
tvar) Text
_prefix Text
key = STM () -> IO ()
forall a. STM a -> IO a
atomically (STM () -> IO ()) -> STM () -> IO ()
forall a b. (a -> b) -> a -> b
$ do
    Map Text TokenBucketEntry
stmMap <- TVar (Map Text TokenBucketEntry) -> STM (Map Text TokenBucketEntry)
forall a. TVar a -> STM a
readTVar TVar (Map Text TokenBucketEntry)
tvar
    Text -> Map Text TokenBucketEntry -> STM ()
forall key value. Hashable key => key -> Map key value -> STM ()
StmMap.delete Text
key Map Text TokenBucketEntry
stmMap

-- | CacheStore instance for LeakyBucket algorithm.
--
-- Similar to TokenBucket but for continuous drain semantics. Manages
-- leaky bucket state and worker thread lifecycle.
instance CacheStore (InMemoryStore 'LeakyBucket) LeakyBucketState IO where
  readStore :: InMemoryStore 'LeakyBucket
-> Text -> Text -> IO (Maybe LeakyBucketState)
readStore (LeakyBucketStore TVar (Map Text LeakyBucketEntry)
tvar) Text
_prefix Text
key = STM (Maybe LeakyBucketState) -> IO (Maybe LeakyBucketState)
forall a. STM a -> IO a
atomically (STM (Maybe LeakyBucketState) -> IO (Maybe LeakyBucketState))
-> STM (Maybe LeakyBucketState) -> IO (Maybe LeakyBucketState)
forall a b. (a -> b) -> a -> b
$ do
    Map Text LeakyBucketEntry
stmMap <- TVar (Map Text LeakyBucketEntry) -> STM (Map Text LeakyBucketEntry)
forall a. TVar a -> STM a
readTVar TVar (Map Text LeakyBucketEntry)
tvar
    Maybe LeakyBucketEntry
mval <- Text -> Map Text LeakyBucketEntry -> STM (Maybe LeakyBucketEntry)
forall key value.
Hashable key =>
key -> Map key value -> STM (Maybe value)
StmMap.lookup Text
key Map Text LeakyBucketEntry
stmMap
    case Maybe LeakyBucketEntry
mval of
      Maybe LeakyBucketEntry
Nothing -> Maybe LeakyBucketState -> STM (Maybe LeakyBucketState)
forall a. a -> STM a
forall (m :: * -> *) a. Monad m => a -> m a
return Maybe LeakyBucketState
forall a. Maybe a
Nothing
      Just LeakyBucketEntry
entry -> LeakyBucketState -> Maybe LeakyBucketState
forall a. a -> Maybe a
Just (LeakyBucketState -> Maybe LeakyBucketState)
-> STM LeakyBucketState -> STM (Maybe LeakyBucketState)
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
<$> TVar LeakyBucketState -> STM LeakyBucketState
forall a. TVar a -> STM a
readTVar (LeakyBucketEntry -> TVar LeakyBucketState
lbeState LeakyBucketEntry
entry)
  writeStore :: InMemoryStore 'LeakyBucket
-> Text -> Text -> LeakyBucketState -> Int -> IO ()
writeStore (LeakyBucketStore TVar (Map Text LeakyBucketEntry)
tvar) Text
_prefix Text
key LeakyBucketState
val Int
_expiresIn = do
    LeakyBucketEntry
entry <- LeakyBucketState -> IO LeakyBucketEntry
createLeakyBucketEntry LeakyBucketState
val
    STM () -> IO ()
forall a. STM a -> IO a
atomically (STM () -> IO ()) -> STM () -> IO ()
forall a b. (a -> b) -> a -> b
$ do
      Map Text LeakyBucketEntry
stmMap <- TVar (Map Text LeakyBucketEntry) -> STM (Map Text LeakyBucketEntry)
forall a. TVar a -> STM a
readTVar TVar (Map Text LeakyBucketEntry)
tvar
      Focus LeakyBucketEntry STM ()
-> Text -> Map Text LeakyBucketEntry -> STM ()
forall key value result.
Hashable key =>
Focus value STM result -> key -> Map key value -> STM result
StmMap.focus ((LeakyBucketEntry -> TVar LeakyBucketState)
-> LeakyBucketEntry
-> LeakyBucketState
-> Focus LeakyBucketEntry STM ()
forall entry v.
(entry -> TVar v) -> entry -> v -> Focus entry STM ()
focusInsertOrUpdate LeakyBucketEntry -> TVar LeakyBucketState
lbeState LeakyBucketEntry
entry LeakyBucketState
val) Text
key Map Text LeakyBucketEntry
stmMap
  deleteStore :: InMemoryStore 'LeakyBucket -> Text -> Text -> IO ()
deleteStore (LeakyBucketStore TVar (Map Text LeakyBucketEntry)
tvar) Text
_prefix Text
key = STM () -> IO ()
forall a. STM a -> IO a
atomically (STM () -> IO ()) -> STM () -> IO ()
forall a b. (a -> b) -> a -> b
$ do
    Map Text LeakyBucketEntry
stmMap <- TVar (Map Text LeakyBucketEntry) -> STM (Map Text LeakyBucketEntry)
forall a. TVar a -> STM a
readTVar TVar (Map Text LeakyBucketEntry)
tvar
    Text -> Map Text LeakyBucketEntry -> STM ()
forall key value. Hashable key => key -> Map key value -> STM ()
StmMap.delete Text
key Map Text LeakyBucketEntry
stmMap

-- | Generic helper for Focus insert-or-update operations.
--
-- Provides atomic insert-or-update semantics for STM maps. If the key doesn't
-- exist, inserts the new entry. If it exists, updates the state within the
-- existing entry.
focusInsertOrUpdate
  :: (entry -> TVar v)    -- ^ Function to extract state TVar from entry
  -> entry                -- ^ New entry to insert if key doesn't exist
  -> v                    -- ^ New value to set in state TVar
  -> Focus.Focus entry STM ()  -- ^ Focus operation for atomic insert-or-update
focusInsertOrUpdate :: forall entry v.
(entry -> TVar v) -> entry -> v -> Focus entry STM ()
focusInsertOrUpdate entry -> TVar v
getState entry
entry v
newVal = STM ((), Change entry)
-> (entry -> STM ((), Change entry)) -> Focus entry STM ()
forall element (m :: * -> *) result.
m (result, Change element)
-> (element -> m (result, Change element))
-> Focus element m result
Focus.Focus
  (((), Change entry) -> STM ((), Change entry)
forall a. a -> STM a
forall (f :: * -> *) a. Applicative f => a -> f a
pure ((), entry -> Change entry
forall a. a -> Change a
Focus.Set entry
entry))
  (\entry
existing -> do
     TVar v -> v -> STM ()
forall a. TVar a -> a -> STM ()
writeTVar (entry -> TVar v
getState entry
existing) v
newVal
     ((), Change entry) -> STM ((), Change entry)
forall a. a -> STM a
forall (f :: * -> *) a. Applicative f => a -> f a
pure ((), Change entry
forall a. Change a
Focus.Leave))

-- | Leaky bucket worker thread implementation.
--
-- Implements the continuous drain algorithm for leaky buckets. Processes
-- requests from a queue and updates bucket state based on elapsed time
-- and leak rate.
--
-- ==== Algorithm Details
--
-- 1. __Drain Calculation__: level' = max(0, level - elapsed * leakRate)
-- 2. __Request Processing__: level'' = level' + requestSize (typically 1)
-- 3. __Capacity Check__: allowed = level'' <= capacity
-- 4. __State Update__: Apply leak even on denial for accurate timing
--
-- ==== Example
--
-- @
-- -- Start worker for streaming rate limiter
-- startLeakyBucketWorker stateVar queue 100 2.0
-- -- Capacity: 100 requests, Leak rate: 2 requests\/second
-- @
startLeakyBucketWorker
  :: TVar LeakyBucketState          -- ^ Shared bucket state
  -> TQueue.TQueue (TMVar Bool)     -- ^ Request queue with STM responses
  -> Int                            -- ^ Bucket capacity (maximum level)
  -> Double                         -- ^ Leak rate (requests drained per second)
  -> IO ()                          -- ^ Returns immediately, worker runs in background
startLeakyBucketWorker :: TVar LeakyBucketState
-> TQueue (TMVar Bool) -> Int -> Double -> IO ()
startLeakyBucketWorker TVar LeakyBucketState
stateVar TQueue (TMVar Bool)
queue Int
capacity Double
leakRate = IO ThreadId -> IO ()
forall (f :: * -> *) a. Functor f => f a -> f ()
void (IO ThreadId -> IO ()) -> (IO () -> IO ThreadId) -> IO () -> IO ()
forall b c a. (b -> c) -> (a -> b) -> a -> c
. IO () -> IO ThreadId
forkIO (IO () -> IO ()) -> IO () -> IO ()
forall a b. (a -> b) -> a -> b
$ 
  IO () -> IO ()
forall (f :: * -> *) a b. Applicative f => f a -> f b
forever (IO () -> IO ()) -> IO () -> IO ()
forall a b. (a -> b) -> a -> b
$ do
    TMVar Bool
replyVar <- STM (TMVar Bool) -> IO (TMVar Bool)
forall a. STM a -> IO a
atomically (STM (TMVar Bool) -> IO (TMVar Bool))
-> STM (TMVar Bool) -> IO (TMVar Bool)
forall a b. (a -> b) -> a -> b
$ TQueue (TMVar Bool) -> STM (TMVar Bool)
forall a. TQueue a -> STM a
readTQueue TQueue (TMVar Bool)
queue
    Double
now <- POSIXTime -> Double
forall a b. (Real a, Fractional b) => a -> b
realToFrac (POSIXTime -> Double) -> IO POSIXTime -> IO Double
forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
<$> IO POSIXTime
getPOSIXTime
    Bool
result <- STM Bool -> IO Bool
forall a. STM a -> IO a
atomically (STM Bool -> IO Bool) -> STM Bool -> IO Bool
forall a b. (a -> b) -> a -> b
$ do
      LeakyBucketState{Double
level :: Double
level :: LeakyBucketState -> Double
level,Double
lastTime :: Double
lastTime :: LeakyBucketState -> Double
lastTime} <- TVar LeakyBucketState -> STM LeakyBucketState
forall a. TVar a -> STM a
readTVar TVar LeakyBucketState
stateVar
      let elapsed :: Double
elapsed     = Double
now Double -> Double -> Double
forall a. Num a => a -> a -> a
- Double
lastTime
          leakedLevel :: Double
leakedLevel = Double -> Double -> Double
forall a. Ord a => a -> a -> a
max Double
0 (Double
level Double -> Double -> Double
forall a. Num a => a -> a -> a
- Double
elapsed Double -> Double -> Double
forall a. Num a => a -> a -> a
* Double
leakRate)
          nextLevel :: Double
nextLevel   = Double
leakedLevel Double -> Double -> Double
forall a. Num a => a -> a -> a
+ Double
1
          allowed :: Bool
allowed     = Double
nextLevel Double -> Double -> Bool
forall a. Ord a => a -> a -> Bool
<= Int -> Double
forall a b. (Integral a, Num b) => a -> b
fromIntegral Int
capacity
          -- **FIX:** apply leak even on denial
          finalLevel :: Double
finalLevel  = if Bool
allowed then Double
nextLevel else Double
leakedLevel
      TVar LeakyBucketState -> LeakyBucketState -> STM ()
forall a. TVar a -> a -> STM ()
writeTVar TVar LeakyBucketState
stateVar LeakyBucketState{ level :: Double
level = Double
finalLevel, lastTime :: Double
lastTime = Double
now }
      Bool -> STM Bool
forall a. a -> STM a
forall (m :: * -> *) a. Monad m => a -> m a
return Bool
allowed
    STM () -> IO ()
forall a. STM a -> IO a
atomically (STM () -> IO ()) -> STM () -> IO ()
forall a b. (a -> b) -> a -> b
$ TMVar Bool -> Bool -> STM ()
forall a. TMVar a -> a -> STM ()
putTMVar TMVar Bool
replyVar Bool
result

-- | CacheStore instance for TinyLRU algorithm.
--
-- Provides bounded cache with least-recently-used eviction and TTL support.
-- Automatically handles expiration and LRU ordering updates.
instance CacheStore (InMemoryStore 'TinyLRU) Int IO where
  readStore :: InMemoryStore 'TinyLRU -> Text -> Text -> IO (Maybe Int)
readStore (TinyLRUStore TVar (TinyLRUCache s)
ref) Text
_prefix Text
key = do
    TimeSpec
now <- IO TimeSpec -> IO TimeSpec
forall a. IO a -> IO a
forall (m :: * -> *) a. MonadIO m => IO a -> m a
liftIO (IO TimeSpec -> IO TimeSpec) -> IO TimeSpec -> IO TimeSpec
forall a b. (a -> b) -> a -> b
$ Clock -> IO TimeSpec
getTime Clock
Monotonic
    STM (Maybe Int) -> IO (Maybe Int)
forall a. STM a -> IO a
atomically (STM (Maybe Int) -> IO (Maybe Int))
-> STM (Maybe Int) -> IO (Maybe Int)
forall a b. (a -> b) -> a -> b
$ do
      TinyLRUCache s
cache <- TVar (TinyLRUCache s) -> STM (TinyLRUCache s)
forall a. TVar a -> STM a
readTVar TVar (TinyLRUCache s)
ref
      Maybe (TVar (LRUNode s))
maybeNodeRef <- Text
-> Map Text (TVar (LRUNode s)) -> STM (Maybe (TVar (LRUNode s)))
forall key value.
Hashable key =>
key -> Map key value -> STM (Maybe value)
StmMap.lookup Text
key (TinyLRUCache s -> Map Text (TVar (LRUNode s))
forall s. TinyLRUCache s -> Map Text (TVar (LRUNode s))
TinyLRU.lruCache TinyLRUCache s
cache)
      case Maybe (TVar (LRUNode s))
maybeNodeRef of
        Just TVar (LRUNode s)
nodeRef -> do
          LRUNode s
node <- TVar (LRUNode s) -> STM (LRUNode s)
forall a. TVar a -> STM a
readTVar TVar (LRUNode s)
nodeRef
          let expired :: Bool
expired = TimeSpec -> LRUNode s -> Bool
forall s. TimeSpec -> LRUNode s -> Bool
TinyLRU.isExpired TimeSpec
now LRUNode s
node
          if Bool
expired
            then do
              Text -> TinyLRUCache s -> STM ()
forall s. Text -> TinyLRUCache s -> STM ()
TinyLRU.deleteKey Text
key TinyLRUCache s
cache
              Maybe Int -> STM (Maybe Int)
forall a. a -> STM a
forall (m :: * -> *) a. Monad m => a -> m a
return Maybe Int
forall a. Maybe a
Nothing
            else do
              let Maybe Int
decoded :: Maybe Int = ByteString -> Maybe Int
forall a. FromJSON a => ByteString -> Maybe a
decodeStrict (LRUNode s -> ByteString
forall s. LRUNode s -> ByteString
TinyLRU.nodeValue LRUNode s
node)
              TinyLRUCache s -> TVar (LRUNode s) -> STM ()
forall s. TinyLRUCache s -> TVar (LRUNode s) -> STM ()
TinyLRU.moveToFrontInCache TinyLRUCache s
cache TVar (LRUNode s)
nodeRef
              Maybe Int -> STM (Maybe Int)
forall a. a -> STM a
forall (m :: * -> *) a. Monad m => a -> m a
return Maybe Int
decoded
        Maybe (TVar (LRUNode s))
Nothing -> Maybe Int -> STM (Maybe Int)
forall a. a -> STM a
forall (m :: * -> *) a. Monad m => a -> m a
return Maybe Int
forall a. Maybe a
Nothing
  writeStore :: InMemoryStore 'TinyLRU -> Text -> Text -> Int -> Int -> IO ()
writeStore (TinyLRUStore TVar (TinyLRUCache s)
ref) Text
_prefix Text
key Int
val Int
expiresIn = do
    TimeSpec
now <- IO TimeSpec -> IO TimeSpec
forall a. IO a -> IO a
forall (m :: * -> *) a. MonadIO m => IO a -> m a
liftIO (IO TimeSpec -> IO TimeSpec) -> IO TimeSpec -> IO TimeSpec
forall a b. (a -> b) -> a -> b
$ Clock -> IO TimeSpec
getTime Clock
Monotonic
    STM () -> IO ()
forall a. STM a -> IO a
atomically (STM () -> IO ()) -> STM () -> IO ()
forall a b. (a -> b) -> a -> b
$ do
      TinyLRUCache s
cache <- TVar (TinyLRUCache s) -> STM (TinyLRUCache s)
forall a. TVar a -> STM a
readTVar TVar (TinyLRUCache s)
ref
      Maybe Int
_ <- TimeSpec -> Text -> Int -> Int -> TinyLRUCache s -> STM (Maybe Int)
forall a s.
(FromJSON a, ToJSON a) =>
TimeSpec -> Text -> a -> Int -> TinyLRUCache s -> STM (Maybe a)
TinyLRU.updateValue TimeSpec
now Text
key Int
val Int
expiresIn TinyLRUCache s
cache
      () -> STM ()
forall a. a -> STM a
forall (m :: * -> *) a. Monad m => a -> m a
return ()
  deleteStore :: InMemoryStore 'TinyLRU -> Text -> Text -> IO ()
deleteStore (TinyLRUStore TVar (TinyLRUCache s)
ref) Text
_prefix Text
key = do
    STM () -> IO ()
forall a. STM a -> IO a
atomically (STM () -> IO ()) -> STM () -> IO ()
forall a b. (a -> b) -> a -> b
$ do
      TinyLRUCache s
cache <- TVar (TinyLRUCache s) -> STM (TinyLRUCache s)
forall a. TVar a -> STM a
readTVar TVar (TinyLRUCache s)
ref
      Text -> TinyLRUCache s -> STM ()
forall s. Text -> TinyLRUCache s -> STM ()
TinyLRU.deleteKey Text
key TinyLRUCache s
cache

-- | ResettableStore instances

-- | ResettableStore instances for all InMemoryStore variants.
--
-- Provides uniform reset behavior across all algorithm-specific stores.
-- Each implementation handles algorithm-specific cleanup requirements.
instance ResettableStore (InMemoryStore a) where
  resetStore :: InMemoryStore a -> IO ()
resetStore (CounterStore TVar (Cache Text Text)
tvar) = TVar (Cache Text Text) -> IO ()
resetStoreWith TVar (Cache Text Text)
tvar
  resetStore (TimestampStore TVar (Map Text [Double])
tvar) = STM () -> IO ()
forall a. STM a -> IO a
atomically (STM () -> IO ()) -> STM () -> IO ()
forall a b. (a -> b) -> a -> b
$ do
    Map Text [Double]
stmMap <- TVar (Map Text [Double]) -> STM (Map Text [Double])
forall a. TVar a -> STM a
readTVar TVar (Map Text [Double])
tvar
    Map Text [Double] -> STM ()
forall key value. Map key value -> STM ()
StmMap.reset Map Text [Double]
stmMap
  resetStore (TokenBucketStore TVar (Map Text TokenBucketEntry)
tvar) = STM () -> IO ()
forall a. STM a -> IO a
atomically (STM () -> IO ()) -> STM () -> IO ()
forall a b. (a -> b) -> a -> b
$ do
    Map Text TokenBucketEntry
stmMap <- TVar (Map Text TokenBucketEntry) -> STM (Map Text TokenBucketEntry)
forall a. TVar a -> STM a
readTVar TVar (Map Text TokenBucketEntry)
tvar
    Map Text TokenBucketEntry -> STM ()
forall key value. Map key value -> STM ()
StmMap.reset Map Text TokenBucketEntry
stmMap
  resetStore (LeakyBucketStore TVar (Map Text LeakyBucketEntry)
tvar) = STM () -> IO ()
forall a. STM a -> IO a
atomically (STM () -> IO ()) -> STM () -> IO ()
forall a b. (a -> b) -> a -> b
$ do
    Map Text LeakyBucketEntry
stmMap <- TVar (Map Text LeakyBucketEntry) -> STM (Map Text LeakyBucketEntry)
forall a. TVar a -> STM a
readTVar TVar (Map Text LeakyBucketEntry)
tvar
    Map Text LeakyBucketEntry -> STM ()
forall key value. Map key value -> STM ()
StmMap.reset Map Text LeakyBucketEntry
stmMap
  resetStore (TinyLRUStore TVar (TinyLRUCache s)
ref) = STM () -> IO ()
forall a. STM a -> IO a
atomically (STM () -> IO ()) -> STM () -> IO ()
forall a b. (a -> b) -> a -> b
$ do
    TinyLRUCache s
cache <- TVar (TinyLRUCache s) -> STM (TinyLRUCache s)
forall a. TVar a -> STM a
readTVar TVar (TinyLRUCache s)
ref
    TinyLRUCache s -> STM ()
forall s. TinyLRUCache s -> STM ()
TinyLRU.resetTinyLRU TinyLRUCache s
cache

-- | Reset helper for Data.Cache-based stores.
--
-- Creates a fresh cache instance and atomically replaces the old one.
-- This ensures all TTL timers and cached data are completely cleared.
resetStoreWith :: TVar (C.Cache Text Text) -> IO ()
resetStoreWith :: TVar (Cache Text Text) -> IO ()
resetStoreWith TVar (Cache Text Text)
tvar = do
  Cache Text Text
newCache <- Maybe TimeSpec -> IO (Cache Text Text)
forall k v. Maybe TimeSpec -> IO (Cache k v)
C.newCache Maybe TimeSpec
forall a. Maybe a
Nothing
  STM () -> IO ()
forall a. STM a -> IO a
atomically (STM () -> IO ()) -> STM () -> IO ()
forall a b. (a -> b) -> a -> b
$ TVar (Cache Text Text) -> Cache Text Text -> STM ()
forall a. TVar a -> a -> STM ()
writeTVar TVar (Cache Text Text)
tvar Cache Text Text
newCache

-- | Compose a unique cache key from throttle name, algorithm, IP zone, and user identifier.
--
-- Creates hierarchical cache keys that prevent collisions and enable
-- efficient organization of rate limiting data. The key format follows
-- a consistent pattern across all algorithms.
--
-- ==== Key Format
--
-- @
-- \<algorithm>:\<throttleName>:\<ipZone>:\<userKey>
-- @
--
-- ==== Use Cases
--
-- * __Multi-tenant Applications__: Separate rate limits per tenant
-- * __Geographic Zones__: Different limits for different regions
-- * __Service Tiers__: Varied limits based on user subscription level
-- * __API Versioning__: Separate limits for different API versions
--
-- ==== Benefits
--
-- * __Collision Prevention__: Hierarchical structure prevents key conflicts
-- * __Query Efficiency__: Pattern-based queries and cleanup
-- * __Debugging__: Clear key structure aids troubleshooting
-- * __Monitoring__: Easy to aggregate metrics by zone or user type
--
-- ==== Example
--
-- @
-- -- Create key for API rate limiting
-- let key = makeCacheKey "api_limit" TokenBucket "us-east-1" "user123"
-- -- Result: "TokenBucket:api_limit:us-east-1:user123"
--
-- -- Create key for login attempts
-- let key = makeCacheKey "login_attempts" FixedWindow "global" "192.168.1.1"
-- -- Result: "FixedWindow:login_attempts:global:192.168.1.1"
-- @
makeCacheKey :: Text      -- ^ Throttle name (unique per rule)
             -> Algorithm -- ^ Rate limiting algorithm
             -> Text      -- ^ IP zone or region identifier
             -> Text      -- ^ User key (user ID, IP address, API key, etc.)
             -> Text      -- ^ Complete hierarchical cache key
makeCacheKey :: Text -> Algorithm -> Text -> Text -> Text
makeCacheKey Text
throttleName Algorithm
algo Text
ipZone Text
userKey =
  Algorithm -> Text
algoToText Algorithm
algo Text -> Text -> Text
forall a. Semigroup a => a -> a -> a
<> Text
":" Text -> Text -> Text
forall a. Semigroup a => a -> a -> a
<> Text
throttleName Text -> Text -> Text
forall a. Semigroup a => a -> a -> a
<> Text
":" Text -> Text -> Text
forall a. Semigroup a => a -> a -> a
<> Text
ipZone Text -> Text -> Text
forall a. Semigroup a => a -> a -> a
<> Text
":" Text -> Text -> Text
forall a. Semigroup a => a -> a -> a
<> Text
userKey

-- | Convert an Algorithm value to its canonical Text representation.
--
-- This function provides a standardized textual representation of rate limiting
-- algorithms for serialization, logging, and configuration files. Each algorithm
-- has exactly one canonical PascalCase representation matching the constructor names.
--
-- ==== Output Format
--
-- @
-- FixedWindow    → "FixedWindow"
-- SlidingWindow  → "SlidingWindow" 
-- TokenBucket    → "TokenBucket"
-- LeakyBucket    → "LeakyBucket"
-- TinyLRU        → "TinyLRU"
-- @
--
-- ==== Examples
--
-- @
-- -- Basic usage
-- algoToText TokenBucket  -- "TokenBucket"
--
-- -- Configuration serialization
-- data RateLimitConfig = RateLimitConfig
--   { algorithm :: Text, limit :: Int, window :: Int }
-- 
-- config = RateLimitConfig (algoToText SlidingWindow) 1000 3600
--
-- -- Logging
-- logInfo $ "Using " <> algoToText currentAlgo <> " rate limiting"
-- @
--
-- This function is pure, thread-safe, and forms a reversible pair with 'parseAlgoText'.
-- Time complexity: O(1), Space complexity: O(1).
algoToText :: Algorithm -- ^ The algorithm to convert
           -> Text      -- ^ Canonical text representation
algoToText :: Algorithm -> Text
algoToText Algorithm
FixedWindow   = Text
"FixedWindow"
algoToText Algorithm
SlidingWindow = Text
"SlidingWindow"
algoToText Algorithm
TokenBucket   = Text
"TokenBucket"
algoToText Algorithm
LeakyBucket   = Text
"LeakyBucket"
algoToText Algorithm
TinyLRU       = Text
"TinyLRU"

-- | Parse a Text representation back into an Algorithm value.
--
-- Provides flexible parsing with case-insensitive matching and hyphenated format support.
-- Returns an Aeson Parser for seamless JSON integration with rich error reporting.
--
-- ==== Supported Formats
--
-- @
-- Algorithm      │ Accepted Inputs (case-insensitive)
-- ──────────────┼───────────────────────────────────
-- FixedWindow   │ "fixedwindow", "fixed-window"
-- SlidingWindow │ "slidingwindow", "sliding-window"
-- TokenBucket   │ "tokenbucket", "token-bucket"
-- LeakyBucket   │ "leakybucket", "leaky-bucket"
-- TinyLRU       │ "tinylru", "tiny-lru"
-- @
--
-- ==== Examples
--
-- @
-- -- JSON parsing
-- instance FromJSON RateLimitConfig where
--   parseJSON = withObject "RateLimitConfig" $ \\o ->
--     RateLimitConfig <$> (o .: "algorithm" >>= parseAlgoText)
--                     <*> o .: "limit"
--
-- -- Configuration file (accepts flexible formats)
-- parseAlgoText "token-bucket"    -- Success TokenBucket
-- parseAlgoText "SLIDING-WINDOW"  -- Success SlidingWindow
-- parseAlgoText "invalid"         -- Error "Unknown algorithm: invalid"
-- @
--
-- ==== Error Handling
--
-- Calls 'fail' with descriptive error messages for invalid inputs. In aeson's
-- Parser context, this provides rich error reporting with parsing context.
--
-- ==== Round-trip Property
--
-- @
-- forall algo. parseAlgoText (algoToText algo) == Success algo
-- @
--
-- This function is pure, thread-safe, with O(1) time complexity after O(n) normalization.
parseAlgoText :: Text            -- ^ Text representation to parse
              -> Parser Algorithm -- ^ Parsed algorithm or error
parseAlgoText :: Text -> Parser Algorithm
parseAlgoText Text
t =
  case Text -> Text
Tx.toLower Text
t of
    Text
"fixedwindow"    -> Algorithm -> Parser Algorithm
forall a. a -> Parser a
forall (f :: * -> *) a. Applicative f => a -> f a
pure Algorithm
FixedWindow
    Text
"fixed-window"   -> Algorithm -> Parser Algorithm
forall a. a -> Parser a
forall (f :: * -> *) a. Applicative f => a -> f a
pure Algorithm
FixedWindow
    Text
"slidingwindow"  -> Algorithm -> Parser Algorithm
forall a. a -> Parser a
forall (f :: * -> *) a. Applicative f => a -> f a
pure Algorithm
SlidingWindow
    Text
"sliding-window" -> Algorithm -> Parser Algorithm
forall a. a -> Parser a
forall (f :: * -> *) a. Applicative f => a -> f a
pure Algorithm
SlidingWindow
    Text
"tokenbucket"    -> Algorithm -> Parser Algorithm
forall a. a -> Parser a
forall (f :: * -> *) a. Applicative f => a -> f a
pure Algorithm
TokenBucket
    Text
"token-bucket"   -> Algorithm -> Parser Algorithm
forall a. a -> Parser a
forall (f :: * -> *) a. Applicative f => a -> f a
pure Algorithm
TokenBucket
    Text
"leakybucket"    -> Algorithm -> Parser Algorithm
forall a. a -> Parser a
forall (f :: * -> *) a. Applicative f => a -> f a
pure Algorithm
LeakyBucket
    Text
"leaky-bucket"   -> Algorithm -> Parser Algorithm
forall a. a -> Parser a
forall (f :: * -> *) a. Applicative f => a -> f a
pure Algorithm
LeakyBucket
    Text
"tinylru"        -> Algorithm -> Parser Algorithm
forall a. a -> Parser a
forall (f :: * -> *) a. Applicative f => a -> f a
pure Algorithm
TinyLRU
    Text
"tiny-lru"       -> Algorithm -> Parser Algorithm
forall a. a -> Parser a
forall (f :: * -> *) a. Applicative f => a -> f a
pure Algorithm
TinyLRU
    Text
_ -> String -> Parser Algorithm
forall a. String -> Parser a
forall (m :: * -> *) a. MonadFail m => String -> m a
fail (String
"Unknown algorithm: " String -> ShowS
forall a. Semigroup a => a -> a -> a
<> Text -> String
Tx.unpack Text
t)