chore: initial snapshot for gitea/github upload

This commit is contained in:
Your Name
2026-03-26 16:04:46 +08:00
commit a699a1ac98
3497 changed files with 1586237 additions and 0 deletions

View File

@@ -0,0 +1,27 @@
from typing import Any
import litellm
from litellm.types.utils import ImageResponse
def cost_calculator(
model: str,
image_response: Any,
) -> float:
"""
Recraft image generation cost calculator
"""
_model_info = litellm.get_model_info(
model=model,
custom_llm_provider=litellm.LlmProviders.RECRAFT.value,
)
output_cost_per_image: float = _model_info.get("output_cost_per_image") or 0.0
num_images: int = 0
if isinstance(image_response, ImageResponse):
if image_response.data:
num_images = len(image_response.data)
return output_cost_per_image * num_images
else:
raise ValueError(
f"image_response must be of type ImageResponse got type={type(image_response)}"
)

View File

@@ -0,0 +1,185 @@
from io import BufferedReader
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, cast
import httpx
from httpx._types import RequestFiles
from litellm.images.utils import ImageEditRequestUtils
from litellm.llms.base_llm.image_edit.transformation import BaseImageEditConfig
from litellm.secret_managers.main import get_secret_str
from litellm.types.images.main import ImageEditOptionalRequestParams
from litellm.types.llms.recraft import RecraftImageEditRequestParams
from litellm.types.responses.main import *
from litellm.types.router import GenericLiteLLMParams
from litellm.types.utils import FileTypes, ImageObject, ImageResponse
if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
LiteLLMLoggingObj = _LiteLLMLoggingObj
else:
LiteLLMLoggingObj = Any
class RecraftImageEditConfig(BaseImageEditConfig):
DEFAULT_BASE_URL: str = "https://external.api.recraft.ai"
IMAGE_EDIT_ENDPOINT: str = "v1/images/imageToImage"
DEFAULT_STRENGTH: float = 0.2
def get_supported_openai_params(self, model: str) -> List:
"""
Supported OpenAI parameters that can be mapped to Recraft image edit API.
Based on Recraft API docs: https://www.recraft.ai/docs#image-to-image
"""
return [
"n", # Maps to n (number of images)
"response_format", # Maps to response_format (url or b64_json)
"style", # Maps to style parameter
]
def map_openai_params(
self,
image_edit_optional_params: ImageEditOptionalRequestParams,
model: str,
drop_params: bool,
) -> Dict:
"""
Map OpenAI image edit parameters to Recraft parameters.
Reuses OpenAI logic but filters to supported params only.
"""
# Start with all params like OpenAI does
all_params = dict(image_edit_optional_params)
# Filter to only supported Recraft parameters
supported_params = self.get_supported_openai_params(model)
filtered_params = {k: v for k, v in all_params.items() if k in supported_params}
return filtered_params
def get_complete_url(
self,
model: str,
api_base: Optional[str],
litellm_params: dict,
) -> str:
"""
Get the complete url for the request
Some providers need `model` in `api_base`
"""
complete_url: str = (
api_base or get_secret_str("RECRAFT_API_BASE") or self.DEFAULT_BASE_URL
)
complete_url = complete_url.rstrip("/")
complete_url = f"{complete_url}/{self.IMAGE_EDIT_ENDPOINT}"
return complete_url
def validate_environment(
self,
headers: dict,
model: str,
api_key: Optional[str] = None,
) -> dict:
final_api_key: Optional[str] = api_key or get_secret_str("RECRAFT_API_KEY")
if not final_api_key:
raise ValueError("RECRAFT_API_KEY is not set")
headers["Authorization"] = f"Bearer {final_api_key}"
return headers
def transform_image_edit_request(
self,
model: str,
prompt: Optional[str],
image: Optional[FileTypes],
image_edit_optional_request_params: Dict,
litellm_params: GenericLiteLLMParams,
headers: dict,
) -> Tuple[Dict, RequestFiles]:
"""
Transform the image edit request to Recraft's multipart form format.
Reuses OpenAI file handling logic but adapts for Recraft API structure.
https://www.recraft.ai/docs#image-to-image
"""
request_params = {
"model": model,
"strength": image_edit_optional_request_params.pop(
"strength", self.DEFAULT_STRENGTH
),
**image_edit_optional_request_params,
}
if prompt is not None:
request_params["prompt"] = prompt
request_body = RecraftImageEditRequestParams(**request_params)
request_dict = cast(Dict, request_body)
#########################################################
# Reuse OpenAI logic: Separate images as `files` and send other parameters as `data`
#########################################################
files_list = (
self._get_image_files_for_request(image=image) if image is not None else []
)
data_without_images = {k: v for k, v in request_dict.items() if k != "image"}
return data_without_images, files_list
def _get_image_files_for_request(
self,
image: Optional[FileTypes],
) -> List[Tuple[str, Any]]:
files_list: List[Tuple[str, Any]] = []
# Handle single image (Recraft expects single image, not array)
if image:
# OpenAI wraps images in arrays, but for Recraft we need single image
if isinstance(image, list):
_image = image[0] if image else None # Take first image for Recraft
else:
_image = image
if _image is not None:
image_content_type: str = ImageEditRequestUtils.get_image_content_type(
_image
)
if isinstance(_image, BufferedReader):
files_list.append(
("image", (_image.name, _image, image_content_type))
)
else:
files_list.append(
("image", ("image.png", _image, image_content_type))
)
return files_list
def transform_image_edit_response(
self,
model: str,
raw_response: httpx.Response,
logging_obj: LiteLLMLoggingObj,
) -> ImageResponse:
model_response = ImageResponse()
try:
response_data = raw_response.json()
except Exception as e:
raise self.get_error_class(
error_message=f"Error transforming image edit response: {e}",
status_code=raw_response.status_code,
headers=raw_response.headers,
)
if not model_response.data:
model_response.data = []
for image_data in response_data["data"]:
model_response.data.append(
ImageObject(
url=image_data.get("url", None),
b64_json=image_data.get("b64_json", None),
)
)
return model_response

View File

@@ -0,0 +1,13 @@
from litellm.llms.base_llm.image_generation.transformation import (
BaseImageGenerationConfig,
)
from .transformation import RecraftImageGenerationConfig
__all__ = [
"RecraftImageGenerationConfig",
]
def get_recraft_image_generation_config(model: str) -> BaseImageGenerationConfig:
return RecraftImageGenerationConfig()

View File

@@ -0,0 +1,155 @@
from typing import TYPE_CHECKING, Any, List, Optional
import httpx
from litellm.llms.base_llm.image_generation.transformation import (
BaseImageGenerationConfig,
)
from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import (
AllMessageValues,
OpenAIImageGenerationOptionalParams,
)
from litellm.types.llms.recraft import RecraftImageGenerationRequestParams
from litellm.types.utils import ImageObject, ImageResponse
if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
LiteLLMLoggingObj = _LiteLLMLoggingObj
else:
LiteLLMLoggingObj = Any
class RecraftImageGenerationConfig(BaseImageGenerationConfig):
DEFAULT_BASE_URL: str = "https://external.api.recraft.ai"
IMAGE_GENERATION_ENDPOINT: str = "v1/images/generations"
def get_supported_openai_params(
self, model: str
) -> List[OpenAIImageGenerationOptionalParams]:
"""
https://www.recraft.ai/docs#generate-image
"""
return ["n", "response_format", "size", "style"]
def map_openai_params(
self,
non_default_params: dict,
optional_params: dict,
model: str,
drop_params: bool,
) -> dict:
supported_params = self.get_supported_openai_params(model)
for k in non_default_params.keys():
if k not in optional_params.keys():
if k in supported_params:
optional_params[k] = non_default_params[k]
elif drop_params:
pass
else:
raise ValueError(
f"Parameter {k} is not supported for model {model}. Supported parameters are {supported_params}. Set drop_params=True to drop unsupported parameters."
)
return optional_params
def get_complete_url(
self,
api_base: Optional[str],
api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
stream: Optional[bool] = None,
) -> str:
"""
Get the complete url for the request
Some providers need `model` in `api_base`
"""
complete_url: str = (
api_base or get_secret_str("RECRAFT_API_BASE") or self.DEFAULT_BASE_URL
)
complete_url = complete_url.rstrip("/")
complete_url = f"{complete_url}/{self.IMAGE_GENERATION_ENDPOINT}"
return complete_url
def validate_environment(
self,
headers: dict,
model: str,
messages: List[AllMessageValues],
optional_params: dict,
litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
final_api_key: Optional[str] = api_key or get_secret_str("RECRAFT_API_KEY")
if not final_api_key:
raise ValueError("RECRAFT_API_KEY is not set")
headers["Authorization"] = f"Bearer {final_api_key}"
return headers
def transform_image_generation_request(
self,
model: str,
prompt: str,
optional_params: dict,
litellm_params: dict,
headers: dict,
) -> dict:
"""
Transform the image generation request to the recraft image generation request body
https://www.recraft.ai/docs#generate-image
"""
recratft_image_generation_request_body: RecraftImageGenerationRequestParams = (
RecraftImageGenerationRequestParams(
prompt=prompt,
model=model,
**optional_params,
)
)
return dict(recratft_image_generation_request_body)
def transform_image_generation_response(
self,
model: str,
raw_response: httpx.Response,
model_response: ImageResponse,
logging_obj: LiteLLMLoggingObj,
request_data: dict,
optional_params: dict,
litellm_params: dict,
encoding: Any,
api_key: Optional[str] = None,
json_mode: Optional[bool] = None,
) -> ImageResponse:
"""
Transform the image generation response to the litellm image response
https://www.recraft.ai/docs#generate-image
"""
try:
response_data = raw_response.json()
except Exception as e:
raise self.get_error_class(
error_message=f"Error transforming image generation response: {e}",
status_code=raw_response.status_code,
headers=raw_response.headers,
)
if not model_response.data:
model_response.data = []
for image_data in response_data["data"]:
model_response.data.append(
ImageObject(
url=image_data.get("url", None),
b64_json=image_data.get("b64_json", None),
)
)
return model_response