chore: initial public snapshot for github upload

This commit is contained in:
Your Name
2026-03-26 20:06:14 +08:00
commit 0e5ecd930e
3497 changed files with 1586236 additions and 0 deletions

View File

@@ -0,0 +1,170 @@
"""
This file contains the LangFuseHandler class
Used to get the LangFuseLogger for a given request
Handles Key/Team Based Langfuse Logging
"""
from typing import TYPE_CHECKING, Any, Dict, Optional
from litellm.litellm_core_utils.litellm_logging import StandardCallbackDynamicParams
from .langfuse import LangFuseLogger, LangfuseLoggingConfig
if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import DynamicLoggingCache
else:
DynamicLoggingCache = Any
class LangFuseHandler:
@staticmethod
def get_langfuse_logger_for_request(
standard_callback_dynamic_params: StandardCallbackDynamicParams,
in_memory_dynamic_logger_cache: DynamicLoggingCache,
globalLangfuseLogger: Optional[LangFuseLogger] = None,
) -> LangFuseLogger:
"""
This function is used to get the LangFuseLogger for a given request
1. If dynamic credentials are passed
- check if a LangFuseLogger is cached for the dynamic credentials
- if cached LangFuseLogger is not found, create a new LangFuseLogger and cache it
2. If dynamic credentials are not passed return the globalLangfuseLogger
"""
temp_langfuse_logger: Optional[LangFuseLogger] = globalLangfuseLogger
if (
LangFuseHandler._dynamic_langfuse_credentials_are_passed(
standard_callback_dynamic_params
)
is False
):
return LangFuseHandler._return_global_langfuse_logger(
globalLangfuseLogger=globalLangfuseLogger,
in_memory_dynamic_logger_cache=in_memory_dynamic_logger_cache,
)
# get langfuse logging config to use for this request, based on standard_callback_dynamic_params
_credentials = LangFuseHandler.get_dynamic_langfuse_logging_config(
globalLangfuseLogger=globalLangfuseLogger,
standard_callback_dynamic_params=standard_callback_dynamic_params,
)
credentials_dict = dict(_credentials)
# check if langfuse logger is already cached
temp_langfuse_logger = in_memory_dynamic_logger_cache.get_cache(
credentials=credentials_dict, service_name="langfuse"
)
# if not cached, create a new langfuse logger and cache it
if temp_langfuse_logger is None:
temp_langfuse_logger = (
LangFuseHandler._create_langfuse_logger_from_credentials(
credentials=credentials_dict,
in_memory_dynamic_logger_cache=in_memory_dynamic_logger_cache,
)
)
return temp_langfuse_logger
@staticmethod
def _return_global_langfuse_logger(
globalLangfuseLogger: Optional[LangFuseLogger],
in_memory_dynamic_logger_cache: DynamicLoggingCache,
) -> LangFuseLogger:
"""
Returns the Global LangfuseLogger set on litellm
(this is the default langfuse logger - used when no dynamic credentials are passed)
If no Global LangfuseLogger is set, it will check in_memory_dynamic_logger_cache for a cached LangFuseLogger
This function is used to return the globalLangfuseLogger if it exists, otherwise it will check in_memory_dynamic_logger_cache for a cached LangFuseLogger
"""
if globalLangfuseLogger is not None:
return globalLangfuseLogger
credentials_dict: Dict[
str, Any
] = (
{}
) # the global langfuse logger uses Environment Variables, there are no dynamic credentials
globalLangfuseLogger = in_memory_dynamic_logger_cache.get_cache(
credentials=credentials_dict,
service_name="langfuse",
)
if globalLangfuseLogger is None:
globalLangfuseLogger = (
LangFuseHandler._create_langfuse_logger_from_credentials(
credentials=credentials_dict,
in_memory_dynamic_logger_cache=in_memory_dynamic_logger_cache,
)
)
return globalLangfuseLogger
@staticmethod
def _create_langfuse_logger_from_credentials(
credentials: Dict,
in_memory_dynamic_logger_cache: DynamicLoggingCache,
) -> LangFuseLogger:
"""
This function is used to
1. create a LangFuseLogger from the credentials
2. cache the LangFuseLogger to prevent re-creating it for the same credentials
"""
langfuse_logger = LangFuseLogger(
langfuse_public_key=credentials.get("langfuse_public_key"),
langfuse_secret=credentials.get("langfuse_secret"),
langfuse_host=credentials.get("langfuse_host"),
)
in_memory_dynamic_logger_cache.set_cache(
credentials=credentials,
service_name="langfuse",
logging_obj=langfuse_logger,
)
return langfuse_logger
@staticmethod
def get_dynamic_langfuse_logging_config(
standard_callback_dynamic_params: StandardCallbackDynamicParams,
globalLangfuseLogger: Optional[LangFuseLogger] = None,
) -> LangfuseLoggingConfig:
"""
This function is used to get the Langfuse logging config to use for a given request.
It checks if the dynamic parameters are provided in the standard_callback_dynamic_params and uses them to get the Langfuse logging config.
If no dynamic parameters are provided, it uses the `globalLangfuseLogger` values
"""
# only use dynamic params if langfuse credentials are passed dynamically
return LangfuseLoggingConfig(
langfuse_secret=standard_callback_dynamic_params.get("langfuse_secret")
or standard_callback_dynamic_params.get("langfuse_secret_key"),
langfuse_public_key=standard_callback_dynamic_params.get(
"langfuse_public_key"
),
langfuse_host=standard_callback_dynamic_params.get("langfuse_host"),
)
@staticmethod
def _dynamic_langfuse_credentials_are_passed(
standard_callback_dynamic_params: StandardCallbackDynamicParams,
) -> bool:
"""
This function is used to check if the dynamic langfuse credentials are passed in standard_callback_dynamic_params
Returns:
bool: True if the dynamic langfuse credentials are passed, False otherwise
"""
if (
standard_callback_dynamic_params.get("langfuse_host") is not None
or standard_callback_dynamic_params.get("langfuse_public_key") is not None
or standard_callback_dynamic_params.get("langfuse_secret") is not None
or standard_callback_dynamic_params.get("langfuse_secret_key") is not None
):
return True
return False

View File

@@ -0,0 +1,42 @@
"""
Mock httpx client for Langfuse integration testing.
This module intercepts Langfuse API calls and returns successful mock responses,
allowing full code execution without making actual network calls.
Usage:
Set LANGFUSE_MOCK=true in environment variables or config to enable mock mode.
"""
import httpx
from litellm.integrations.mock_client_factory import (
MockClientConfig,
create_mock_client_factory,
)
# Create mock client using factory
_config = MockClientConfig(
name="LANGFUSE",
env_var="LANGFUSE_MOCK",
default_latency_ms=100,
default_status_code=200,
default_json_data={"status": "success"},
url_matchers=[
".langfuse.com",
"langfuse.com",
],
patch_async_handler=False,
patch_sync_client=True,
)
(
_create_mock_langfuse_client_internal,
should_use_langfuse_mock,
) = create_mock_client_factory(_config)
# Langfuse needs to return an httpx.Client instance
def create_mock_langfuse_client():
"""Create and return an httpx.Client instance - the monkey-patch intercepts all calls."""
_create_mock_langfuse_client_internal()
return httpx.Client()

View File

@@ -0,0 +1,422 @@
import base64
import json # <--- NEW
import os
from datetime import datetime
from typing import TYPE_CHECKING, Any, Optional, Union
from litellm._logging import verbose_logger
from litellm.integrations.arize import _utils
from litellm.integrations.langfuse.langfuse_otel_attributes import (
LangfuseLLMObsOTELAttributes,
)
from litellm.integrations.opentelemetry import OpenTelemetry, OpenTelemetryConfig
from litellm.types.integrations.langfuse_otel import (
LangfuseSpanAttributes,
)
from litellm.types.utils import StandardCallbackDynamicParams
if TYPE_CHECKING:
from opentelemetry.trace import Span as _Span
Span = Union[_Span, Any]
else:
Span = Any
LANGFUSE_CLOUD_EU_ENDPOINT = "https://cloud.langfuse.com/api/public/otel"
LANGFUSE_CLOUD_US_ENDPOINT = "https://us.cloud.langfuse.com/api/public/otel"
class LangfuseOtelLogger(OpenTelemetry):
def __init__(self, config=None, *args, **kwargs):
# Prevent LangfuseOtelLogger from modifying global environment variables by constructing config manually
# and passing it to the parent OpenTelemetry class
if config is None:
config = self._create_open_telemetry_config_from_langfuse_env()
super().__init__(config=config, *args, **kwargs)
@staticmethod
def set_langfuse_otel_attributes(span: Span, kwargs, response_obj):
"""
Sets OpenTelemetry span attributes for Langfuse observability.
Uses the same attribute setting logic as Arize Phoenix for consistency.
"""
_utils.set_attributes(span, kwargs, response_obj, LangfuseLLMObsOTELAttributes)
#########################################################
# Set Langfuse specific attributes
#########################################################
LangfuseOtelLogger._set_langfuse_specific_attributes(
span=span, kwargs=kwargs, response_obj=response_obj
)
return
@staticmethod
def _extract_langfuse_metadata(kwargs: dict) -> dict:
"""
Extracts Langfuse metadata from the standard LiteLLM kwargs structure.
1. Reads kwargs["litellm_params"]["metadata"] if present and is a dict.
2. Enriches it with any `langfuse_*` request-header params via the
existing LangFuseLogger.add_metadata_from_header helper so that proxy
users get identical behaviour across vanilla and OTEL integrations.
"""
litellm_params = kwargs.get("litellm_params", {}) or {}
metadata = litellm_params.get("metadata") or {}
# Ensure we only work with dicts
if metadata is None or not isinstance(metadata, dict):
metadata = {}
# Re-use header extraction logic from the vanilla logger if available
try:
from litellm.integrations.langfuse.langfuse import (
LangFuseLogger as _LFLogger,
)
metadata = _LFLogger.add_metadata_from_header(litellm_params, metadata) # type: ignore
except Exception:
# Fallback silently if import fails; header enrichment just won't happen
pass
return metadata
@staticmethod
def _set_metadata_attributes(span: Span, metadata: dict):
"""Helper to set metadata attributes from mapping."""
from litellm.integrations.arize._utils import safe_set_attribute
mapping = {
"generation_name": LangfuseSpanAttributes.GENERATION_NAME,
"generation_id": LangfuseSpanAttributes.GENERATION_ID,
"parent_observation_id": LangfuseSpanAttributes.PARENT_OBSERVATION_ID,
"version": LangfuseSpanAttributes.GENERATION_VERSION,
"mask_input": LangfuseSpanAttributes.MASK_INPUT,
"mask_output": LangfuseSpanAttributes.MASK_OUTPUT,
"trace_user_id": LangfuseSpanAttributes.TRACE_USER_ID,
"session_id": LangfuseSpanAttributes.SESSION_ID,
"tags": LangfuseSpanAttributes.TAGS,
"trace_name": LangfuseSpanAttributes.TRACE_NAME,
"trace_id": LangfuseSpanAttributes.TRACE_ID,
"trace_metadata": LangfuseSpanAttributes.TRACE_METADATA,
"trace_version": LangfuseSpanAttributes.TRACE_VERSION,
"trace_release": LangfuseSpanAttributes.TRACE_RELEASE,
"existing_trace_id": LangfuseSpanAttributes.EXISTING_TRACE_ID,
"update_trace_keys": LangfuseSpanAttributes.UPDATE_TRACE_KEYS,
"debug_langfuse": LangfuseSpanAttributes.DEBUG_LANGFUSE,
}
for key, enum_attr in mapping.items():
if key in metadata and metadata[key] is not None:
value = metadata[key]
if key == "trace_id" and isinstance(value, str):
# trace_id must be 32 hex char no dashes for langfuse : Litellm sends uuid with dashes (might be breaking at some point)
value = value.replace("-", "")
if isinstance(value, (list, dict)):
try:
value = json.dumps(value)
except Exception:
value = str(value)
safe_set_attribute(span, enum_attr.value, value)
@staticmethod
def _set_observation_output(span: Span, response_obj):
"""Helper to set observation output attributes."""
from litellm.integrations.arize._utils import safe_set_attribute
from litellm.litellm_core_utils.safe_json_dumps import safe_dumps
if not response_obj or not hasattr(response_obj, "get"):
return
choices = response_obj.get("choices", [])
if choices:
first_choice = choices[0]
message = first_choice.get("message", {})
tool_calls = message.get("tool_calls")
if tool_calls:
transformed_tool_calls = []
for tool_call in tool_calls:
function = tool_call.get("function", {})
arguments_str = function.get("arguments", "{}")
try:
arguments_obj = (
json.loads(arguments_str)
if isinstance(arguments_str, str)
else arguments_str
)
except json.JSONDecodeError:
arguments_obj = {}
langfuse_tool_call = {
"id": response_obj.get("id", ""),
"name": function.get("name", ""),
"call_id": tool_call.get("id", ""),
"type": "function_call",
"arguments": arguments_obj,
}
transformed_tool_calls.append(langfuse_tool_call)
safe_set_attribute(
span,
LangfuseSpanAttributes.OBSERVATION_OUTPUT.value,
safe_dumps(transformed_tool_calls),
)
else:
output_data = {}
if message.get("role"):
output_data["role"] = message.get("role")
if message.get("content") is not None:
output_data["content"] = message.get("content")
if output_data:
safe_set_attribute(
span,
LangfuseSpanAttributes.OBSERVATION_OUTPUT.value,
safe_dumps(output_data),
)
output = response_obj.get("output", [])
if output:
output_items_data: list[dict] = []
for item in output:
if hasattr(item, "type"):
item_type = item.type
if item_type == "reasoning" and hasattr(item, "summary"):
for summary in item.summary:
if hasattr(summary, "text"):
output_items_data.append(
{
"role": "reasoning_summary",
"content": summary.text,
}
)
elif item_type == "message":
output_items_data.append(
{
"role": getattr(item, "role", "assistant"),
"content": getattr(
getattr(item, "content", [{}])[0], "text", ""
),
}
)
elif item_type == "function_call":
arguments_str = getattr(item, "arguments", "{}")
arguments_obj = (
json.loads(arguments_str)
if isinstance(arguments_str, str)
else arguments_str
)
langfuse_tool_call = {
"id": getattr(item, "id", ""),
"name": getattr(item, "name", ""),
"call_id": getattr(item, "call_id", ""),
"type": "function_call",
"arguments": arguments_obj,
}
output_items_data.append(langfuse_tool_call)
if output_items_data:
safe_set_attribute(
span,
LangfuseSpanAttributes.OBSERVATION_OUTPUT.value,
safe_dumps(output_items_data),
)
@staticmethod
def _set_langfuse_specific_attributes(span: Span, kwargs, response_obj):
"""
Sets Langfuse specific metadata attributes onto the OTEL span.
All keys supported by the vanilla Langfuse integration are mapped to
OTEL-safe attribute names defined in LangfuseSpanAttributes. Complex
values (lists/dicts) are serialised to JSON strings for OTEL
compatibility.
"""
from litellm.integrations.arize._utils import safe_set_attribute
from litellm.litellm_core_utils.safe_json_dumps import safe_dumps
langfuse_environment = os.environ.get("LANGFUSE_TRACING_ENVIRONMENT")
if langfuse_environment:
safe_set_attribute(
span,
LangfuseSpanAttributes.LANGFUSE_ENVIRONMENT.value,
langfuse_environment,
)
metadata = LangfuseOtelLogger._extract_langfuse_metadata(kwargs)
LangfuseOtelLogger._set_metadata_attributes(span=span, metadata=metadata)
messages = kwargs.get("messages")
if messages:
safe_set_attribute(
span,
LangfuseSpanAttributes.OBSERVATION_INPUT.value,
safe_dumps(messages),
)
LangfuseOtelLogger._set_observation_output(span=span, response_obj=response_obj)
@staticmethod
def _get_langfuse_otel_host() -> Optional[str]:
"""
Returns the Langfuse OTEL host based on environment variables.
Returned in the following order of precedence:
1. LANGFUSE_OTEL_HOST
2. LANGFUSE_HOST
"""
return os.environ.get("LANGFUSE_OTEL_HOST") or os.environ.get("LANGFUSE_HOST")
def _create_open_telemetry_config_from_langfuse_env(self) -> OpenTelemetryConfig:
"""
Creates OpenTelemetryConfig from Langfuse environment variables.
Does NOT modify global environment variables.
"""
from litellm.integrations.opentelemetry import OpenTelemetryConfig
public_key = os.environ.get("LANGFUSE_PUBLIC_KEY", None)
secret_key = os.environ.get("LANGFUSE_SECRET_KEY", None)
if not public_key or not secret_key:
# If no keys, return default from env (likely logging to console or something else)
return OpenTelemetryConfig.from_env()
# Determine endpoint - default to US cloud
langfuse_host = LangfuseOtelLogger._get_langfuse_otel_host()
if langfuse_host:
# If LANGFUSE_HOST is provided, construct OTEL endpoint from it
if not langfuse_host.startswith("http"):
langfuse_host = "https://" + langfuse_host
endpoint = f"{langfuse_host.rstrip('/')}/api/public/otel"
verbose_logger.debug(f"Using Langfuse OTEL endpoint from host: {endpoint}")
else:
# Default to US cloud endpoint
endpoint = LANGFUSE_CLOUD_US_ENDPOINT
verbose_logger.debug(f"Using Langfuse US cloud endpoint: {endpoint}")
auth_header = LangfuseOtelLogger._get_langfuse_authorization_header(
public_key=public_key, secret_key=secret_key
)
otlp_auth_headers = f"Authorization={auth_header}"
return OpenTelemetryConfig(
exporter="otlp_http",
endpoint=endpoint,
headers=otlp_auth_headers,
)
@staticmethod
def get_langfuse_otel_config() -> "OpenTelemetryConfig":
"""
Retrieves the Langfuse OpenTelemetry configuration based on environment variables.
Environment Variables:
LANGFUSE_PUBLIC_KEY: Required. Langfuse public key for authentication.
LANGFUSE_SECRET_KEY: Required. Langfuse secret key for authentication.
LANGFUSE_HOST: Optional. Custom Langfuse host URL. Defaults to US cloud.
Returns:
OpenTelemetryConfig: A Pydantic model containing Langfuse OTEL configuration.
Raises:
ValueError: If required keys are missing.
"""
public_key = os.environ.get("LANGFUSE_PUBLIC_KEY", None)
secret_key = os.environ.get("LANGFUSE_SECRET_KEY", None)
if not public_key or not secret_key:
raise ValueError(
"LANGFUSE_PUBLIC_KEY and LANGFUSE_SECRET_KEY must be set for Langfuse OpenTelemetry integration."
)
# Determine endpoint - default to US cloud
langfuse_host = LangfuseOtelLogger._get_langfuse_otel_host()
if langfuse_host:
# If LANGFUSE_HOST is provided, construct OTEL endpoint from it
if not langfuse_host.startswith("http"):
langfuse_host = "https://" + langfuse_host
endpoint = f"{langfuse_host.rstrip('/')}/api/public/otel"
verbose_logger.debug(f"Using Langfuse OTEL endpoint from host: {endpoint}")
else:
# Default to US cloud endpoint
endpoint = LANGFUSE_CLOUD_US_ENDPOINT
verbose_logger.debug(f"Using Langfuse US cloud endpoint: {endpoint}")
auth_header = LangfuseOtelLogger._get_langfuse_authorization_header(
public_key=public_key, secret_key=secret_key
)
otlp_auth_headers = f"Authorization={auth_header}"
# Prevent modification of global env vars which causes leakage
# os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = endpoint
# os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = otlp_auth_headers
return OpenTelemetryConfig(
exporter="otlp_http",
endpoint=endpoint,
headers=otlp_auth_headers,
)
@staticmethod
def _get_langfuse_authorization_header(public_key: str, secret_key: str) -> str:
"""
Get the authorization header for Langfuse OpenTelemetry.
"""
auth_string = f"{public_key}:{secret_key}"
auth_header = base64.b64encode(auth_string.encode()).decode()
return f"Basic {auth_header}"
def construct_dynamic_otel_headers(
self, standard_callback_dynamic_params: StandardCallbackDynamicParams
) -> Optional[dict]:
"""
Construct dynamic Langfuse headers from standard callback dynamic params
This is used for team/key based logging.
Returns:
dict: A dictionary of dynamic Langfuse headers
"""
dynamic_headers = {}
dynamic_langfuse_public_key = standard_callback_dynamic_params.get(
"langfuse_public_key"
)
dynamic_langfuse_secret_key = standard_callback_dynamic_params.get(
"langfuse_secret_key"
)
if dynamic_langfuse_public_key and dynamic_langfuse_secret_key:
auth_header = LangfuseOtelLogger._get_langfuse_authorization_header(
public_key=dynamic_langfuse_public_key,
secret_key=dynamic_langfuse_secret_key,
)
dynamic_headers["Authorization"] = auth_header
return dynamic_headers
def create_litellm_proxy_request_started_span(
self,
start_time: datetime,
headers: dict,
) -> Optional[Span]:
"""
Override to prevent creating empty proxy request spans.
Langfuse should only receive spans for actual LLM calls, not for
internal proxy operations (auth, postgres, proxy_pre_call, etc.).
By returning None, we prevent the parent span from being created,
which in turn prevents empty traces from being sent to Langfuse.
"""
return None
async def async_service_success_hook(self, *args, **kwargs):
"""
Langfuse should not receive service success logs.
"""
pass
async def async_service_failure_hook(self, *args, **kwargs):
"""
Langfuse should not receive service failure logs.
"""
pass

View File

@@ -0,0 +1,108 @@
"""
If the LLM Obs has any specific attributes to log request or response, we can add them here.
Relevant Issue: https://github.com/BerriAI/litellm/issues/13764
"""
import json
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
from pydantic import BaseModel
from typing_extensions import override
from litellm.integrations.opentelemetry_utils.base_otel_llm_obs_attributes import (
BaseLLMObsOTELAttributes,
safe_set_attribute,
)
from litellm.types.llms.openai import HttpxBinaryResponseContent, ResponsesAPIResponse
from litellm.types.utils import (
EmbeddingResponse,
ImageResponse,
ModelResponse,
RerankResponse,
TextCompletionResponse,
TranscriptionResponse,
)
if TYPE_CHECKING:
from opentelemetry.trace import Span
def get_output_content_by_type(
response_obj: Union[
None,
dict,
EmbeddingResponse,
ModelResponse,
TextCompletionResponse,
ImageResponse,
TranscriptionResponse,
RerankResponse,
HttpxBinaryResponseContent,
ResponsesAPIResponse,
list,
],
kwargs: Optional[Dict[str, Any]] = None,
) -> str:
"""
Extract output content from response objects based on their type.
This utility function handles the type-specific logic for converting
various response objects into appropriate output formats for Langfuse logging.
Args:
response_obj: The response object returned by the function
kwargs: Optional keyword arguments containing call_type and other metadata
Returns:
The formatted output content suitable for Langfuse logging, or None
"""
if response_obj is None:
return ""
kwargs = kwargs or {}
call_type = kwargs.get("call_type", None)
# Embedding responses - no output content
if call_type == "embedding" or isinstance(response_obj, EmbeddingResponse):
return "embedding-output"
# Binary/Speech responses
if isinstance(response_obj, HttpxBinaryResponseContent):
return "speech-output"
if isinstance(response_obj, BaseModel):
return response_obj.model_dump_json()
if response_obj and (
isinstance(response_obj, dict) or isinstance(response_obj, list)
):
return json.dumps(response_obj)
else:
return ""
class LangfuseLLMObsOTELAttributes(BaseLLMObsOTELAttributes):
@staticmethod
@override
def set_messages(span: "Span", kwargs: Dict[str, Any]):
prompt = {"messages": kwargs.get("messages")}
optional_params = kwargs.get("optional_params", {})
functions = optional_params.get("functions")
tools = optional_params.get("tools")
if functions is not None:
prompt["functions"] = functions
if tools is not None:
prompt["tools"] = tools
input = prompt
safe_set_attribute(span, "langfuse.observation.input", json.dumps(input))
@staticmethod
@override
def set_response_output_messages(span: "Span", response_obj):
safe_set_attribute(
span,
"langfuse.observation.output",
get_output_content_by_type(response_obj),
)

View File

@@ -0,0 +1,358 @@
"""
Call Hook for LiteLLM Proxy which allows Langfuse prompt management.
"""
import os
from functools import lru_cache
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple, Union, cast
from packaging.version import Version
from typing_extensions import TypeAlias
from litellm.integrations.custom_logger import CustomLogger
from litellm.integrations.prompt_management_base import PromptManagementClient
from litellm.litellm_core_utils.asyncify import run_async_function
from litellm.types.llms.openai import AllMessageValues, ChatCompletionSystemMessage
from litellm.types.prompts.init_prompts import PromptSpec
from litellm.types.utils import StandardCallbackDynamicParams, StandardLoggingPayload
from ...litellm_core_utils.specialty_caches.dynamic_logging_cache import (
DynamicLoggingCache,
)
from ..prompt_management_base import PromptManagementBase
from .langfuse import LangFuseLogger
from .langfuse_handler import LangFuseHandler
if TYPE_CHECKING:
from langfuse import Langfuse
from langfuse.client import ChatPromptClient, TextPromptClient
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
LangfuseClass: TypeAlias = Langfuse
PROMPT_CLIENT = Union[TextPromptClient, ChatPromptClient]
else:
PROMPT_CLIENT = Any
LangfuseClass = Any
LiteLLMLoggingObj = Any
in_memory_dynamic_logger_cache = DynamicLoggingCache()
@lru_cache(maxsize=10)
def langfuse_client_init(
langfuse_public_key=None,
langfuse_secret=None,
langfuse_secret_key=None,
langfuse_host=None,
flush_interval=1,
) -> LangfuseClass:
"""
Initialize Langfuse client with caching to prevent multiple initializations.
Args:
langfuse_public_key (str, optional): Public key for Langfuse. Defaults to None.
langfuse_secret (str, optional): Secret key for Langfuse. Defaults to None.
langfuse_host (str, optional): Host URL for Langfuse. Defaults to None.
flush_interval (int, optional): Flush interval in seconds. Defaults to 1.
Returns:
Langfuse: Initialized Langfuse client instance
Raises:
Exception: If langfuse package is not installed
"""
try:
import langfuse
from langfuse import Langfuse
except Exception as e:
raise Exception(
f"\033[91mLangfuse not installed, try running 'pip install langfuse' to fix this error: {e}\n\033[0m"
)
# Instance variables
secret_key = (
langfuse_secret or langfuse_secret_key or os.getenv("LANGFUSE_SECRET_KEY")
)
public_key = langfuse_public_key or os.getenv("LANGFUSE_PUBLIC_KEY")
langfuse_host = langfuse_host or os.getenv(
"LANGFUSE_HOST", "https://cloud.langfuse.com"
)
if not (
langfuse_host.startswith("http://") or langfuse_host.startswith("https://")
):
# add http:// if unset, assume communicating over private network - e.g. render
langfuse_host = "http://" + langfuse_host
langfuse_release = os.getenv("LANGFUSE_RELEASE")
langfuse_debug = os.getenv("LANGFUSE_DEBUG")
parameters = {
"public_key": public_key,
"secret_key": secret_key,
"host": langfuse_host,
"release": langfuse_release,
"debug": langfuse_debug,
"flush_interval": LangFuseLogger._get_langfuse_flush_interval(
flush_interval
), # flush interval in seconds
}
if Version(langfuse.version.__version__) >= Version("2.6.0"):
parameters["sdk_integration"] = "litellm"
client = Langfuse(**parameters)
return client
class LangfusePromptManagement(LangFuseLogger, PromptManagementBase, CustomLogger):
def __init__(
self,
langfuse_public_key=None,
langfuse_secret=None,
langfuse_host=None,
flush_interval=1,
):
import langfuse
self.langfuse_sdk_version = langfuse.version.__version__
self.Langfuse = langfuse_client_init(
langfuse_public_key=langfuse_public_key,
langfuse_secret=langfuse_secret,
langfuse_host=langfuse_host,
flush_interval=flush_interval,
)
@property
def integration_name(self):
return "langfuse"
def _get_prompt_from_id(
self,
langfuse_prompt_id: str,
langfuse_client: LangfuseClass,
prompt_label: Optional[str] = None,
prompt_version: Optional[int] = None,
) -> PROMPT_CLIENT:
prompt_client = langfuse_client.get_prompt(
langfuse_prompt_id, label=prompt_label, version=prompt_version
)
return prompt_client
def _compile_prompt(
self,
langfuse_prompt_client: PROMPT_CLIENT,
langfuse_prompt_variables: Optional[dict],
call_type: Union[Literal["completion"], Literal["text_completion"]],
) -> List[AllMessageValues]:
compiled_prompt: Optional[Union[str, list]] = None
if langfuse_prompt_variables is None:
langfuse_prompt_variables = {}
compiled_prompt = langfuse_prompt_client.compile(**langfuse_prompt_variables)
if isinstance(compiled_prompt, str):
compiled_prompt = [
ChatCompletionSystemMessage(role="system", content=compiled_prompt)
]
else:
compiled_prompt = cast(List[AllMessageValues], compiled_prompt)
return compiled_prompt
def _get_optional_params_from_langfuse(
self, langfuse_prompt_client: PROMPT_CLIENT
) -> dict:
config = langfuse_prompt_client.config
optional_params = {}
for k, v in config.items():
if k != "model":
optional_params[k] = v
return optional_params
async def async_get_chat_completion_prompt(
self,
model: str,
messages: List[AllMessageValues],
non_default_params: dict,
prompt_id: Optional[str],
prompt_variables: Optional[dict],
dynamic_callback_params: StandardCallbackDynamicParams,
litellm_logging_obj: LiteLLMLoggingObj,
prompt_spec: Optional[PromptSpec] = None,
tools: Optional[List[Dict]] = None,
prompt_label: Optional[str] = None,
prompt_version: Optional[int] = None,
ignore_prompt_manager_model: Optional[bool] = False,
ignore_prompt_manager_optional_params: Optional[bool] = False,
) -> Tuple[str, List[AllMessageValues], dict,]:
return self.get_chat_completion_prompt(
model,
messages,
non_default_params,
prompt_id,
prompt_variables,
dynamic_callback_params,
prompt_spec=prompt_spec,
prompt_label=prompt_label,
prompt_version=prompt_version,
ignore_prompt_manager_model=ignore_prompt_manager_model,
ignore_prompt_manager_optional_params=ignore_prompt_manager_optional_params,
)
def should_run_prompt_management(
self,
prompt_id: Optional[str],
prompt_spec: Optional[PromptSpec],
dynamic_callback_params: StandardCallbackDynamicParams,
) -> bool:
if prompt_id is None:
return False
langfuse_client = langfuse_client_init(
langfuse_public_key=dynamic_callback_params.get("langfuse_public_key"),
langfuse_secret=dynamic_callback_params.get("langfuse_secret"),
langfuse_secret_key=dynamic_callback_params.get("langfuse_secret_key"),
langfuse_host=dynamic_callback_params.get("langfuse_host"),
)
langfuse_prompt_client = self._get_prompt_from_id(
langfuse_prompt_id=prompt_id,
langfuse_client=langfuse_client,
)
return langfuse_prompt_client is not None
def _compile_prompt_helper(
self,
prompt_id: Optional[str],
prompt_spec: Optional[PromptSpec],
prompt_variables: Optional[dict],
dynamic_callback_params: StandardCallbackDynamicParams,
prompt_label: Optional[str] = None,
prompt_version: Optional[int] = None,
) -> PromptManagementClient:
if prompt_id is None:
raise ValueError("prompt_id is required for Langfuse prompt management")
langfuse_client = langfuse_client_init(
langfuse_public_key=dynamic_callback_params.get("langfuse_public_key"),
langfuse_secret=dynamic_callback_params.get("langfuse_secret"),
langfuse_secret_key=dynamic_callback_params.get("langfuse_secret_key"),
langfuse_host=dynamic_callback_params.get("langfuse_host"),
)
langfuse_prompt_client = self._get_prompt_from_id(
langfuse_prompt_id=prompt_id,
langfuse_client=langfuse_client,
prompt_label=prompt_label,
prompt_version=prompt_version,
)
## SET PROMPT
compiled_prompt = self._compile_prompt(
langfuse_prompt_client=langfuse_prompt_client,
langfuse_prompt_variables=prompt_variables,
call_type="completion",
)
template_model = langfuse_prompt_client.config.get("model")
template_optional_params = self._get_optional_params_from_langfuse(
langfuse_prompt_client
)
return PromptManagementClient(
prompt_id=prompt_id,
prompt_template=compiled_prompt,
prompt_template_model=template_model,
prompt_template_optional_params=template_optional_params,
completed_messages=None,
)
async def async_compile_prompt_helper(
self,
prompt_id: Optional[str],
prompt_variables: Optional[dict],
dynamic_callback_params: StandardCallbackDynamicParams,
prompt_spec: Optional[PromptSpec] = None,
prompt_label: Optional[str] = None,
prompt_version: Optional[int] = None,
) -> PromptManagementClient:
return self._compile_prompt_helper(
prompt_id=prompt_id,
prompt_variables=prompt_variables,
dynamic_callback_params=dynamic_callback_params,
prompt_spec=prompt_spec,
prompt_label=prompt_label,
prompt_version=prompt_version,
)
def log_success_event(self, kwargs, response_obj, start_time, end_time):
return run_async_function(
self.async_log_success_event, kwargs, response_obj, start_time, end_time
)
def log_failure_event(self, kwargs, response_obj, start_time, end_time):
return run_async_function(
self.async_log_failure_event, kwargs, response_obj, start_time, end_time
)
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
try:
standard_callback_dynamic_params = kwargs.get(
"standard_callback_dynamic_params"
)
langfuse_logger_to_use = LangFuseHandler.get_langfuse_logger_for_request(
globalLangfuseLogger=self,
standard_callback_dynamic_params=standard_callback_dynamic_params,
in_memory_dynamic_logger_cache=in_memory_dynamic_logger_cache,
)
langfuse_logger_to_use.log_event_on_langfuse(
kwargs=kwargs,
response_obj=response_obj,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
)
except Exception as e:
from litellm._logging import verbose_logger
verbose_logger.exception(
f"Langfuse Layer Error - Exception occurred while logging success event: {str(e)}"
)
self.handle_callback_failure(callback_name="langfuse")
async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time):
try:
standard_callback_dynamic_params = kwargs.get(
"standard_callback_dynamic_params"
)
langfuse_logger_to_use = LangFuseHandler.get_langfuse_logger_for_request(
globalLangfuseLogger=self,
standard_callback_dynamic_params=standard_callback_dynamic_params,
in_memory_dynamic_logger_cache=in_memory_dynamic_logger_cache,
)
standard_logging_object = cast(
Optional[StandardLoggingPayload],
kwargs.get("standard_logging_object", None),
)
if standard_logging_object is None:
return
langfuse_logger_to_use.log_event_on_langfuse(
start_time=start_time,
end_time=end_time,
response_obj=None,
user_id=kwargs.get("user", None),
status_message=standard_logging_object["error_str"],
level="ERROR",
kwargs=kwargs,
)
except Exception as e:
from litellm._logging import verbose_logger
verbose_logger.exception(
f"Langfuse Layer Error - Exception occurred while logging failure event: {str(e)}"
)
self.handle_callback_failure(callback_name="langfuse")