module
LLM::Cache
Defined in:
llm/cache.crClass Method Summary
- .cache_dir : String
- .clear : Int32
- .delete(key : String) : Bool
- .disable : Nil
- .disabled_by_env? : Bool
- .enable : Nil
- .enabled? : Bool
- .ensure_dir : Nil
- .fetch(key : String) : String | Nil
-
.key(provider : String, model : String, kind : String, format : String, payload : String) : String
Build a deterministic cache key from inputs
- .path_for(key : String) : String
- .purge_older_than(days : Int32) : Int32
- .stats : Hash(String, Int64)
- .store(key : String, content : String) : Bool
Class Method Detail
def self.key(provider : String, model : String, kind : String, format : String, payload : String) : String
#
Build a deterministic cache key from inputs
- provider: "openai", "ollama", url, etc.
- model: "gpt-4o", "llama3", etc.
- kind: logical operation e.g. "FILTER", "ANALYZE", "BUNDLE_ANALYZE"
- format: response_format string (e.g., "json" or JSON schema string)
- payload: variable content (file list, source code, bundle, etc.)
Returns a hex-encoded SHA256 digest.