module LLM::Cache

Defined in:

llm/cache.cr

Class Method Summary

Class Method Detail

def self.cache_dir : String #

[View source]
def self.clear : Int32 #

[View source]
def self.delete(key : String) : Bool #

[View source]
def self.disable : Nil #

[View source]
def self.disabled_by_env? : Bool #

[View source]
def self.enable : Nil #

[View source]
def self.enabled? : Bool #

[View source]
def self.ensure_dir : Nil #

[View source]
def self.fetch(key : String) : String | Nil #

[View source]
def self.key(provider : String, model : String, kind : String, format : String, payload : String) : String #

Build a deterministic cache key from inputs

  • provider: "openai", "ollama", url, etc.
  • model: "gpt-4o", "llama3", etc.
  • kind: logical operation e.g. "FILTER", "ANALYZE", "BUNDLE_ANALYZE"
  • format: response_format string (e.g., "json" or JSON schema string)
  • payload: variable content (file list, source code, bundle, etc.)

Returns a hex-encoded SHA256 digest.


[View source]
def self.path_for(key : String) : String #

[View source]
def self.purge_older_than(days : Int32) : Int32 #

[View source]
def self.stats : Hash(String, Int64) #

[View source]
def self.store(key : String, content : String) : Bool #

[View source]