chore: initial public snapshot for github upload

This commit is contained in:
Your Name
2026-03-26 20:06:14 +08:00
commit 0e5ecd930e
3497 changed files with 1586236 additions and 0 deletions

View File

@@ -0,0 +1,365 @@
import json
import os
import time
from datetime import datetime
from typing import Any, Dict, Optional
import httpx
from litellm._logging import verbose_logger
from litellm.llms.custom_httpx.http_handler import _get_httpx_client
from .common_utils import (
APIKeyExpiredError,
GetAccessTokenError,
GetAPIKeyError,
GetDeviceCodeError,
RefreshAPIKeyError,
)
# Constants
GITHUB_CLIENT_ID = "Iv1.b507a08c87ecfe98"
GITHUB_DEVICE_CODE_URL = "https://github.com/login/device/code"
GITHUB_ACCESS_TOKEN_URL = "https://github.com/login/oauth/access_token"
GITHUB_API_KEY_URL = "https://api.github.com/copilot_internal/v2/token"
class Authenticator:
def __init__(self) -> None:
"""Initialize the GitHub Copilot authenticator with configurable token paths."""
# Token storage paths
self.token_dir = os.getenv(
"GITHUB_COPILOT_TOKEN_DIR",
os.path.expanduser("~/.config/litellm/github_copilot"),
)
self.access_token_file = os.path.join(
self.token_dir,
os.getenv("GITHUB_COPILOT_ACCESS_TOKEN_FILE", "access-token"),
)
self.api_key_file = os.path.join(
self.token_dir, os.getenv("GITHUB_COPILOT_API_KEY_FILE", "api-key.json")
)
self._ensure_token_dir()
def get_access_token(self) -> str:
"""
Login to Copilot with retry 3 times.
Returns:
str: The GitHub access token.
Raises:
GetAccessTokenError: If unable to obtain an access token after retries.
"""
try:
with open(self.access_token_file, "r") as f:
access_token = f.read().strip()
if access_token:
return access_token
except IOError:
verbose_logger.warning(
"No existing access token found or error reading file"
)
for attempt in range(3):
verbose_logger.debug(f"Access token acquisition attempt {attempt + 1}/3")
try:
access_token = self._login()
try:
with open(self.access_token_file, "w") as f:
f.write(access_token)
except IOError:
verbose_logger.error("Error saving access token to file")
return access_token
except (GetDeviceCodeError, GetAccessTokenError, RefreshAPIKeyError) as e:
verbose_logger.warning(f"Failed attempt {attempt + 1}: {str(e)}")
continue
raise GetAccessTokenError(
message="Failed to get access token after 3 attempts",
status_code=401,
)
def get_api_key(self) -> str:
"""
Get the API key, refreshing if necessary.
Returns:
str: The GitHub Copilot API key.
Raises:
GetAPIKeyError: If unable to obtain an API key.
"""
try:
with open(self.api_key_file, "r") as f:
api_key_info = json.load(f)
if api_key_info.get("expires_at", 0) > datetime.now().timestamp():
return api_key_info.get("token")
else:
verbose_logger.warning("API key expired, refreshing")
raise APIKeyExpiredError(
message="API key expired",
status_code=401,
)
except IOError:
verbose_logger.warning("No API key file found or error opening file")
except (json.JSONDecodeError, KeyError) as e:
verbose_logger.warning(f"Error reading API key from file: {str(e)}")
except APIKeyExpiredError:
pass # Already logged in the try block
try:
api_key_info = self._refresh_api_key()
with open(self.api_key_file, "w") as f:
json.dump(api_key_info, f)
token = api_key_info.get("token")
if token:
return token
else:
raise GetAPIKeyError(
message="API key response missing token",
status_code=401,
)
except IOError as e:
verbose_logger.error(f"Error saving API key to file: {str(e)}")
raise GetAPIKeyError(
message=f"Failed to save API key: {str(e)}",
status_code=500,
)
except RefreshAPIKeyError as e:
raise GetAPIKeyError(
message=f"Failed to refresh API key: {str(e)}",
status_code=401,
)
def get_api_base(self) -> Optional[str]:
"""
Get the API endpoint from the api-key.json file.
Returns:
Optional[str]: The GitHub Copilot API endpoint, or None if not found.
"""
try:
with open(self.api_key_file, "r") as f:
api_key_info = json.load(f)
endpoints = api_key_info.get("endpoints", {})
api_endpoint = endpoints.get("api")
return api_endpoint
except (IOError, json.JSONDecodeError, KeyError) as e:
verbose_logger.warning(f"Error reading API endpoint from file: {str(e)}")
return None
def _refresh_api_key(self) -> Dict[str, Any]:
"""
Refresh the API key using the access token.
Returns:
Dict[str, Any]: The API key information including token and expiration.
Raises:
RefreshAPIKeyError: If unable to refresh the API key.
"""
access_token = self.get_access_token()
headers = self._get_github_headers(access_token)
max_retries = 3
for attempt in range(max_retries):
try:
sync_client = _get_httpx_client()
response = sync_client.get(GITHUB_API_KEY_URL, headers=headers)
response.raise_for_status()
response_json = response.json()
if "token" in response_json:
return response_json
else:
verbose_logger.warning(
f"API key response missing token: {response_json}"
)
except httpx.HTTPStatusError as e:
verbose_logger.error(
f"HTTP error refreshing API key (attempt {attempt+1}/{max_retries}): {str(e)}"
)
except Exception as e:
verbose_logger.error(f"Unexpected error refreshing API key: {str(e)}")
raise RefreshAPIKeyError(
message="Failed to refresh API key after maximum retries",
status_code=401,
)
def _ensure_token_dir(self) -> None:
"""Ensure the token directory exists."""
if not os.path.exists(self.token_dir):
os.makedirs(self.token_dir, exist_ok=True)
def _get_github_headers(self, access_token: Optional[str] = None) -> Dict[str, str]:
"""
Generate standard GitHub headers for API requests.
Args:
access_token: Optional access token to include in the headers.
Returns:
Dict[str, str]: Headers for GitHub API requests.
"""
headers = {
"accept": "application/json",
"editor-version": "vscode/1.85.1",
"editor-plugin-version": "copilot/1.155.0",
"user-agent": "GithubCopilot/1.155.0",
"accept-encoding": "gzip,deflate,br",
}
if access_token:
headers["authorization"] = f"token {access_token}"
if "content-type" not in headers:
headers["content-type"] = "application/json"
return headers
def _get_device_code(self) -> Dict[str, str]:
"""
Get a device code for GitHub authentication.
Returns:
Dict[str, str]: Device code information.
Raises:
GetDeviceCodeError: If unable to get a device code.
"""
try:
sync_client = _get_httpx_client()
resp = sync_client.post(
GITHUB_DEVICE_CODE_URL,
headers=self._get_github_headers(),
json={"client_id": GITHUB_CLIENT_ID, "scope": "read:user"},
)
resp.raise_for_status()
resp_json = resp.json()
required_fields = ["device_code", "user_code", "verification_uri"]
if not all(field in resp_json for field in required_fields):
verbose_logger.error(f"Response missing required fields: {resp_json}")
raise GetDeviceCodeError(
message="Response missing required fields",
status_code=400,
)
return resp_json
except httpx.HTTPStatusError as e:
verbose_logger.error(f"HTTP error getting device code: {str(e)}")
raise GetDeviceCodeError(
message=f"Failed to get device code: {str(e)}",
status_code=400,
)
except json.JSONDecodeError as e:
verbose_logger.error(f"Error decoding JSON response: {str(e)}")
raise GetDeviceCodeError(
message=f"Failed to decode device code response: {str(e)}",
status_code=400,
)
except Exception as e:
verbose_logger.error(f"Unexpected error getting device code: {str(e)}")
raise GetDeviceCodeError(
message=f"Failed to get device code: {str(e)}",
status_code=400,
)
def _poll_for_access_token(self, device_code: str) -> str:
"""
Poll for an access token after user authentication.
Args:
device_code: The device code to use for polling.
Returns:
str: The access token.
Raises:
GetAccessTokenError: If unable to get an access token.
"""
sync_client = _get_httpx_client()
max_attempts = 12 # 1 minute (12 * 5 seconds)
for attempt in range(max_attempts):
try:
resp = sync_client.post(
GITHUB_ACCESS_TOKEN_URL,
headers=self._get_github_headers(),
json={
"client_id": GITHUB_CLIENT_ID,
"device_code": device_code,
"grant_type": "urn:ietf:params:oauth:grant-type:device_code",
},
)
resp.raise_for_status()
resp_json = resp.json()
if "access_token" in resp_json:
verbose_logger.info("Authentication successful!")
return resp_json["access_token"]
elif (
"error" in resp_json
and resp_json.get("error") == "authorization_pending"
):
verbose_logger.debug(
f"Authorization pending (attempt {attempt+1}/{max_attempts})"
)
else:
verbose_logger.warning(f"Unexpected response: {resp_json}")
except httpx.HTTPStatusError as e:
verbose_logger.error(f"HTTP error polling for access token: {str(e)}")
raise GetAccessTokenError(
message=f"Failed to get access token: {str(e)}",
status_code=400,
)
except json.JSONDecodeError as e:
verbose_logger.error(f"Error decoding JSON response: {str(e)}")
raise GetAccessTokenError(
message=f"Failed to decode access token response: {str(e)}",
status_code=400,
)
except Exception as e:
verbose_logger.error(
f"Unexpected error polling for access token: {str(e)}"
)
raise GetAccessTokenError(
message=f"Failed to get access token: {str(e)}",
status_code=400,
)
time.sleep(5)
raise GetAccessTokenError(
message="Timed out waiting for user to authorize the device",
status_code=400,
)
def _login(self) -> str:
"""
Login to GitHub Copilot using device code flow.
Returns:
str: The GitHub access token.
Raises:
GetDeviceCodeError: If unable to get a device code.
GetAccessTokenError: If unable to get an access token.
"""
device_code_info = self._get_device_code()
device_code = device_code_info["device_code"]
user_code = device_code_info["user_code"]
verification_uri = device_code_info["verification_uri"]
print( # noqa: T201
f"Please visit {verification_uri} and enter code {user_code} to authenticate.",
# When this is running in docker, it may not be flushed immediately
# so we force flush to ensure the user sees the message
flush=True,
)
return self._poll_for_access_token(device_code)

View File

@@ -0,0 +1,160 @@
from typing import List, Optional, Tuple
from litellm.exceptions import AuthenticationError
from litellm.llms.openai.openai import OpenAIConfig
from litellm.types.llms.openai import AllMessageValues
from ..authenticator import Authenticator
from ..common_utils import (
GITHUB_COPILOT_API_BASE,
GetAPIKeyError,
get_copilot_default_headers,
)
class GithubCopilotConfig(OpenAIConfig):
def __init__(
self,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
custom_llm_provider: str = "openai",
) -> None:
super().__init__()
self.authenticator = Authenticator()
def _get_openai_compatible_provider_info(
self,
model: str,
api_base: Optional[str],
api_key: Optional[str],
custom_llm_provider: str,
) -> Tuple[Optional[str], Optional[str], str]:
dynamic_api_base = self.authenticator.get_api_base() or GITHUB_COPILOT_API_BASE
try:
dynamic_api_key = self.authenticator.get_api_key()
except GetAPIKeyError as e:
raise AuthenticationError(
model=model,
llm_provider=custom_llm_provider,
message=str(e),
)
return dynamic_api_base, dynamic_api_key, custom_llm_provider
def _transform_messages(
self,
messages,
model: str,
):
import litellm
# Check if system-to-assistant conversion is disabled
if litellm.disable_copilot_system_to_assistant:
# GitHub Copilot API now supports system prompts for all models (Claude, GPT, etc.)
# No conversion needed - just return messages as-is
return messages
# Default behavior: convert system messages to assistant for compatibility
transformed_messages = []
for message in messages:
if message.get("role") == "system":
# Convert system message to assistant message
transformed_message = message.copy()
transformed_message["role"] = "assistant"
transformed_messages.append(transformed_message)
else:
transformed_messages.append(message)
return transformed_messages
def validate_environment(
self,
headers: dict,
model: str,
messages: List[AllMessageValues],
optional_params: dict,
litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
# Get base headers from parent
validated_headers = super().validate_environment(
headers, model, messages, optional_params, litellm_params, api_key, api_base
)
# Add Copilot-specific headers (editor-version, user-agent, etc.)
try:
copilot_api_key = self.authenticator.get_api_key()
copilot_headers = get_copilot_default_headers(copilot_api_key)
validated_headers = {**copilot_headers, **validated_headers}
except GetAPIKeyError:
pass # Will be handled later in the request flow
# Add X-Initiator header based on message roles
initiator = self._determine_initiator(messages)
validated_headers["X-Initiator"] = initiator
# Add Copilot-Vision-Request header if request contains images
if self._has_vision_content(messages):
validated_headers["Copilot-Vision-Request"] = "true"
return validated_headers
def get_supported_openai_params(self, model: str) -> list:
"""
Get supported OpenAI parameters for GitHub Copilot.
For Claude models that support extended thinking (Claude 4 family and Claude 3-7), includes thinking and reasoning_effort parameters.
For other models, returns standard OpenAI parameters (which may include reasoning_effort for o-series models).
"""
from litellm.utils import supports_reasoning
# Get base OpenAI parameters
base_params = super().get_supported_openai_params(model)
# Add Claude-specific parameters for models that support extended thinking
if "claude" in model.lower() and supports_reasoning(
model=model.lower(),
):
if "thinking" not in base_params:
base_params.append("thinking")
# reasoning_effort is not included by parent for Claude models, so add it
if "reasoning_effort" not in base_params:
base_params.append("reasoning_effort")
return base_params
def _determine_initiator(self, messages: List[AllMessageValues]) -> str:
"""
Determine if request is user or agent initiated based on message roles.
Returns 'agent' if any message has role 'tool' or 'assistant', otherwise 'user'.
"""
for message in messages:
role = message.get("role")
if role in ["tool", "assistant"]:
return "agent"
return "user"
def _has_vision_content(self, messages: List[AllMessageValues]) -> bool:
"""
Check if any message contains vision content (images).
Returns True if any message has content with vision-related types, otherwise False.
Checks for:
- image_url content type (OpenAI format)
- Content items with type 'image_url'
"""
for message in messages:
content = message.get("content")
if isinstance(content, list):
# Check if any content item indicates vision content
for content_item in content:
if isinstance(content_item, dict):
# Check for image_url field (direct image URL)
if "image_url" in content_item:
return True
# Check for type field indicating image content
content_type = content_item.get("type")
if content_type == "image_url":
return True
return False

View File

@@ -0,0 +1,76 @@
"""
Constants for Copilot integration
"""
from typing import Optional, Union
from uuid import uuid4
import httpx
from litellm.llms.base_llm.chat.transformation import BaseLLMException
# Constants
COPILOT_VERSION = "0.26.7"
EDITOR_PLUGIN_VERSION = f"copilot-chat/{COPILOT_VERSION}"
USER_AGENT = f"GitHubCopilotChat/{COPILOT_VERSION}"
API_VERSION = "2025-04-01"
GITHUB_COPILOT_API_BASE = "https://api.githubcopilot.com"
class GithubCopilotError(BaseLLMException):
def __init__(
self,
status_code,
message,
request: Optional[httpx.Request] = None,
response: Optional[httpx.Response] = None,
headers: Optional[Union[httpx.Headers, dict]] = None,
body: Optional[dict] = None,
):
super().__init__(
status_code=status_code,
message=message,
request=request,
response=response,
headers=headers,
body=body,
)
class GetDeviceCodeError(GithubCopilotError):
pass
class GetAccessTokenError(GithubCopilotError):
pass
class APIKeyExpiredError(GithubCopilotError):
pass
class RefreshAPIKeyError(GithubCopilotError):
pass
class GetAPIKeyError(GithubCopilotError):
pass
def get_copilot_default_headers(api_key: str) -> dict:
"""
Get default headers for GitHub Copilot Responses API.
Based on copilot-api's header configuration.
"""
return {
"Authorization": f"Bearer {api_key}",
"content-type": "application/json",
"copilot-integration-id": "vscode-chat",
"editor-version": "vscode/1.95.0", # Fixed version for stability
"editor-plugin-version": EDITOR_PLUGIN_VERSION,
"user-agent": USER_AGENT,
"openai-intent": "conversation-panel",
"x-github-api-version": API_VERSION,
"x-request-id": str(uuid4()),
"x-vscode-user-agent-library-version": "electron-fetch",
}

View File

@@ -0,0 +1,189 @@
"""
GitHub Copilot Embedding API Configuration.
This module provides the configuration for GitHub Copilot's Embedding API.
Implementation based on analysis of the copilot-api project by caozhiyuan:
https://github.com/caozhiyuan/copilot-api
"""
from typing import TYPE_CHECKING, Any, Optional
import httpx
from litellm._logging import verbose_logger
from litellm.exceptions import AuthenticationError
from litellm.llms.base_llm.embedding.transformation import BaseEmbeddingConfig
from litellm.types.llms.openai import AllEmbeddingInputValues
from litellm.types.utils import EmbeddingResponse
from litellm.utils import convert_to_model_response_object
from ..authenticator import Authenticator
from ..common_utils import (
GetAPIKeyError,
GITHUB_COPILOT_API_BASE,
get_copilot_default_headers,
)
if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
LiteLLMLoggingObj = _LiteLLMLoggingObj
else:
LiteLLMLoggingObj = Any
class GithubCopilotEmbeddingConfig(BaseEmbeddingConfig):
"""
Configuration for GitHub Copilot's Embedding API.
Reference: https://api.githubcopilot.com/embeddings
"""
def __init__(self) -> None:
super().__init__()
self.authenticator = Authenticator()
def validate_environment(
self,
headers: dict,
model: str,
messages: list,
optional_params: dict,
litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
"""
Validate environment and set up headers for GitHub Copilot API.
"""
try:
# Get GitHub Copilot API key via OAuth
api_key = self.authenticator.get_api_key()
if not api_key:
raise AuthenticationError(
model=model,
llm_provider="github_copilot",
message="GitHub Copilot API key is required. Please authenticate via OAuth Device Flow.",
)
# Get default headers
default_headers = get_copilot_default_headers(api_key)
# Merge with existing headers (user's extra_headers take priority)
merged_headers = {**default_headers, **headers}
verbose_logger.debug(
f"GitHub Copilot Embedding API: Successfully configured headers for model {model}"
)
return merged_headers
except GetAPIKeyError as e:
raise AuthenticationError(
model=model,
llm_provider="github_copilot",
message=str(e),
)
def get_complete_url(
self,
api_base: Optional[str],
api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
stream: Optional[bool] = None,
) -> str:
"""
Get the complete URL for GitHub Copilot Embedding API endpoint.
"""
# Use provided api_base or fall back to authenticator's base or default
api_base = (
self.authenticator.get_api_base() or api_base or GITHUB_COPILOT_API_BASE
)
# Remove trailing slashes
api_base = api_base.rstrip("/")
# Return the embeddings endpoint
return f"{api_base}/embeddings"
def transform_embedding_request(
self,
model: str,
input: AllEmbeddingInputValues,
optional_params: dict,
headers: dict,
) -> dict:
"""
Transform embedding request to GitHub Copilot format.
"""
# Ensure input is a list
if isinstance(input, str):
input = [input]
# Strip 'github_copilot/' prefix if present
if model.startswith("github_copilot/"):
model = model.replace("github_copilot/", "", 1)
return {
"model": model,
"input": input,
**optional_params,
}
def transform_embedding_response(
self,
model: str,
raw_response: httpx.Response,
model_response: EmbeddingResponse,
logging_obj: LiteLLMLoggingObj,
api_key: Optional[str],
request_data: dict,
optional_params: dict,
litellm_params: dict,
) -> EmbeddingResponse:
"""
Transform embedding response from GitHub Copilot format.
"""
logging_obj.post_call(original_response=raw_response.text)
# GitHub Copilot returns standard OpenAI-compatible embedding response
response_json = raw_response.json()
return convert_to_model_response_object(
response_object=response_json,
model_response_object=model_response,
response_type="embedding",
)
def get_supported_openai_params(self, model: str) -> list:
return [
"timeout",
"dimensions",
"encoding_format",
"user",
]
def map_openai_params(
self,
non_default_params: dict,
optional_params: dict,
model: str,
drop_params: bool,
) -> dict:
for param, value in non_default_params.items():
if param in self.get_supported_openai_params(model):
optional_params[param] = value
return optional_params
def get_error_class(
self, error_message: str, status_code: int, headers: Any
) -> Any:
from litellm.llms.openai.openai import OpenAIConfig
return OpenAIConfig().get_error_class(
error_message=error_message, status_code=status_code, headers=headers
)

View File

@@ -0,0 +1,337 @@
"""
GitHub Copilot Responses API Configuration.
This module provides the configuration for GitHub Copilot's Responses API,
which is required for models like gpt-5.1-codex that only support the /responses endpoint.
Implementation based on analysis of the copilot-api project by caozhiyuan:
https://github.com/caozhiyuan/copilot-api
"""
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
from litellm._logging import verbose_logger
from litellm.constants import DEFAULT_MAX_RECURSE_DEPTH
from litellm.exceptions import AuthenticationError
from litellm.llms.openai.responses.transformation import OpenAIResponsesAPIConfig
from litellm.types.llms.openai import (
ResponseInputParam,
ResponsesAPIOptionalRequestParams,
)
from litellm.types.router import GenericLiteLLMParams
from litellm.types.utils import LlmProviders
from ..authenticator import Authenticator
from ..common_utils import (
GITHUB_COPILOT_API_BASE,
GetAPIKeyError,
get_copilot_default_headers,
)
if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
LiteLLMLoggingObj = _LiteLLMLoggingObj
else:
LiteLLMLoggingObj = Any
class GithubCopilotResponsesAPIConfig(OpenAIResponsesAPIConfig):
"""
Configuration for GitHub Copilot's Responses API.
Inherits from OpenAIResponsesAPIConfig since GitHub Copilot's Responses API
is compatible with OpenAI's Responses API specification.
Key differences from OpenAI:
- Uses OAuth Device Flow authentication (handled by Authenticator)
- Uses api.githubcopilot.com as the API base
- Requires specific headers for VSCode/Copilot integration
- Supports vision requests with special header
- Requires X-Initiator header based on input analysis
Reference: https://api.githubcopilot.com/
"""
def __init__(self) -> None:
super().__init__()
self.authenticator = Authenticator()
@property
def custom_llm_provider(self) -> LlmProviders:
"""Return the GitHub Copilot provider identifier."""
return LlmProviders.GITHUB_COPILOT
def get_supported_openai_params(self, model: str) -> list:
"""
Get supported parameters for GitHub Copilot Responses API.
GitHub Copilot supports all standard OpenAI Responses API parameters.
"""
return super().get_supported_openai_params(model)
def map_openai_params(
self,
response_api_optional_params: ResponsesAPIOptionalRequestParams,
model: str,
drop_params: bool,
) -> Dict:
"""
Map parameters for GitHub Copilot Responses API.
GitHub Copilot uses the same parameter format as OpenAI,
so no transformation is needed.
"""
return dict(response_api_optional_params)
def validate_environment(
self,
headers: dict,
model: str,
litellm_params: Optional[GenericLiteLLMParams],
) -> dict:
"""
Validate environment and set up headers for GitHub Copilot API.
Uses the Authenticator to obtain GitHub Copilot API key via OAuth Device Flow,
then configures all required headers for the Responses API.
Headers include:
- Authorization with API key
- Standard GitHub Copilot headers (editor-version, user-agent, etc.)
- X-Initiator based on input analysis
- copilot-vision-request if vision content detected
- User-provided extra_headers (merged with priority)
"""
try:
# Get GitHub Copilot API key via OAuth
api_key = self.authenticator.get_api_key()
if not api_key:
raise AuthenticationError(
model=model,
llm_provider="github_copilot",
message="GitHub Copilot API key is required. Please authenticate via OAuth Device Flow.",
)
# Get default headers (from copilot-api configuration)
default_headers = get_copilot_default_headers(api_key)
# Merge with existing headers (user's extra_headers take priority)
merged_headers = {**default_headers, **headers}
# Analyze input to determine additional headers
input_param = self._get_input_from_params(litellm_params)
# Add X-Initiator header based on input analysis
if input_param is not None:
initiator = self._get_initiator(input_param)
merged_headers["X-Initiator"] = initiator
verbose_logger.debug(
f"GitHub Copilot Responses API: Set X-Initiator={initiator}"
)
# Add vision header if input contains images
if self._has_vision_input(input_param):
merged_headers["copilot-vision-request"] = "true"
verbose_logger.debug(
"GitHub Copilot Responses API: Enabled vision request"
)
verbose_logger.debug(
f"GitHub Copilot Responses API: Successfully configured headers for model {model}"
)
return merged_headers
except GetAPIKeyError as e:
raise AuthenticationError(
model=model,
llm_provider="github_copilot",
message=str(e),
)
def get_complete_url(
self,
api_base: Optional[str],
litellm_params: dict,
) -> str:
"""
Get the complete URL for GitHub Copilot Responses API endpoint.
Returns: https://api.githubcopilot.com/responses
Note: Currently only supports individual accounts.
Business/enterprise accounts (api.business.githubcopilot.com) can be
added in the future by detecting account type.
"""
# Use provided api_base or fall back to authenticator's base or default
api_base = (
api_base or self.authenticator.get_api_base() or GITHUB_COPILOT_API_BASE
)
# Remove trailing slashes
api_base = api_base.rstrip("/")
# Return the responses endpoint
return f"{api_base}/responses"
def _handle_reasoning_item(self, item: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle reasoning items for GitHub Copilot, preserving encrypted_content.
GitHub Copilot uses encrypted_content in reasoning items to maintain
conversation state across turns. The parent class strips this field
when converting to OpenAI's ResponseReasoningItem model, which causes
"encrypted content could not be verified" errors on multi-turn requests.
This override preserves encrypted_content while still filtering out
status=None which OpenAI's API rejects.
"""
if item.get("type") == "reasoning":
# Preserve encrypted_content before parent processing
encrypted_content = item.get("encrypted_content")
# Filter out None values for known problematic fields,
# but preserve encrypted_content even if it exists
filtered_item: Dict[str, Any] = {}
for k, v in item.items():
# Always include encrypted_content if present (even if None)
if k == "encrypted_content":
if encrypted_content is not None:
filtered_item[k] = v
continue
# Filter out status=None which OpenAI API rejects
if k == "status" and v is None:
continue
# Include all other non-None values
if v is not None:
filtered_item[k] = v
verbose_logger.debug(
f"GitHub Copilot reasoning item processed, encrypted_content preserved: {encrypted_content is not None}"
)
return filtered_item
return item
# ==================== Helper Methods ====================
def _get_input_from_params(
self, litellm_params: Optional[GenericLiteLLMParams]
) -> Optional[Union[str, ResponseInputParam]]:
"""
Extract input parameter from litellm_params.
The input parameter contains the conversation history and is needed
for vision detection and initiator determination.
"""
if litellm_params is None:
return None
# Try to get input from litellm_params
# This might be in different locations depending on how LiteLLM structures it
if hasattr(litellm_params, "input"):
return litellm_params.input
# If not found, return None and let the API handle it
return None
def _get_initiator(self, input_param: Union[str, ResponseInputParam]) -> str:
"""
Determine X-Initiator header value based on input analysis.
Based on copilot-api's hasAgentInitiator logic:
- Returns "agent" if input contains assistant role or items without role
- Returns "user" otherwise
Args:
input_param: The input parameter (string or list of input items)
Returns:
"agent" or "user"
"""
# If input is a string, it's user-initiated
if isinstance(input_param, str):
return "user"
# If input is a list, analyze items
if isinstance(input_param, list):
for item in input_param:
if not isinstance(item, dict):
continue
# Check if item has no role (agent-initiated)
if "role" not in item or not item.get("role"):
return "agent"
# Check if role is assistant (agent-initiated)
role = item.get("role")
if isinstance(role, str) and role.lower() == "assistant":
return "agent"
# Default to user-initiated
return "user"
def _has_vision_input(self, input_param: Union[str, ResponseInputParam]) -> bool:
"""
Check if input contains vision content (images).
Based on copilot-api's hasVisionInput and containsVisionContent logic.
Recursively searches for input_image type in the input structure.
Args:
input_param: The input parameter to analyze
Returns:
True if input contains image content, False otherwise
"""
return self._contains_vision_content(input_param)
def _contains_vision_content(
self, value: Any, depth: int = 0, max_depth: int = DEFAULT_MAX_RECURSE_DEPTH
) -> bool:
"""
Recursively check if a value contains vision content.
Looks for items with type="input_image" in the structure.
"""
if depth > max_depth:
verbose_logger.warning(
f"[GitHub Copilot] Max recursion depth {max_depth} reached while checking for vision content"
)
return False
if value is None:
return False
# Check arrays
if isinstance(value, list):
return any(
self._contains_vision_content(
item, depth=depth + 1, max_depth=max_depth
)
for item in value
)
# Only check dict/object types
if not isinstance(value, dict):
return False
# Check if this item is an input_image
item_type = value.get("type")
if isinstance(item_type, str) and item_type.lower() == "input_image":
return True
# Check content field recursively
if "content" in value and isinstance(value["content"], list):
return any(
self._contains_vision_content(
item, depth=depth + 1, max_depth=max_depth
)
for item in value["content"]
)
return False
def supports_native_websocket(self) -> bool:
"""GitHub Copilot does not support native WebSocket for Responses API"""
return False