|
| 1 | +from typing import Tuple, Dict |
| 2 | + |
| 3 | +from app.models import ModelSchema |
| 4 | +from provider_dependency.chat_completion import * |
| 5 | +from .utils import * |
| 6 | + |
| 7 | +logger = logging.getLogger(__name__) |
| 8 | + |
| 9 | + |
| 10 | +async def _build_siliconcloud_message(message: ChatCompletionMessage): |
| 11 | + if message.role in [ |
| 12 | + ChatCompletionRole.user, |
| 13 | + ChatCompletionRole.system, |
| 14 | + ] or is_assistant_text_message(message): |
| 15 | + return {"role": message.role.name, "content": message.content} |
| 16 | + |
| 17 | + if message.role == ChatCompletionRole.function: |
| 18 | + message: ChatCompletionFunctionMessage |
| 19 | + return {"role": "tool", "content": message.content, "tool_call_id": message.id} |
| 20 | + |
| 21 | + if is_assistant_function_calls_message(message): |
| 22 | + message: ChatCompletionAssistantMessage |
| 23 | + function_calls = [] |
| 24 | + |
| 25 | + for f in message.function_calls: |
| 26 | + arguments = f.arguments |
| 27 | + if isinstance(arguments, dict): |
| 28 | + arguments = json.dumps(arguments) |
| 29 | + function_calls.append( |
| 30 | + { |
| 31 | + "id": f.id, |
| 32 | + "type": "function", |
| 33 | + "function": {"name": f.name, "arguments": arguments}, |
| 34 | + } |
| 35 | + ) |
| 36 | + |
| 37 | + return { |
| 38 | + "role": ChatCompletionRole.assistant.name, |
| 39 | + "tool_calls": function_calls, |
| 40 | + "content": None, |
| 41 | + } |
| 42 | + |
| 43 | + |
| 44 | +async def _build_siliconcloud_chat_completion_payload( |
| 45 | + messages: List[ChatCompletionMessage], |
| 46 | + stream: bool, |
| 47 | + provider_model_id: str, |
| 48 | + configs: ChatCompletionModelConfiguration, |
| 49 | + function_call: Optional[str], |
| 50 | + functions: Optional[List[ChatCompletionFunction]], |
| 51 | +): |
| 52 | + # Convert ChatCompletionMessages to the required format |
| 53 | + formatted_messages = [await _build_siliconcloud_message(msg) for msg in messages] |
| 54 | + logger.debug("formatted_messages: %s", formatted_messages) |
| 55 | + payload = { |
| 56 | + "model": provider_model_id, |
| 57 | + "messages": formatted_messages, |
| 58 | + "stream": stream, |
| 59 | + } |
| 60 | + config_dict = configs.model_dump() |
| 61 | + for key, value in config_dict.items(): |
| 62 | + if value is not None: |
| 63 | + payload[key] = value |
| 64 | + |
| 65 | + if configs.response_format: |
| 66 | + payload["response_format"] = {"type": configs.response_format} |
| 67 | + |
| 68 | + if configs.response_format == "json_object": |
| 69 | + |
| 70 | + if payload["messages"][0]["role"] == "system": |
| 71 | + payload["messages"][0][ |
| 72 | + "content" |
| 73 | + ] = f"{payload['messages'][0]['content']} You are designed to output JSON." |
| 74 | + else: |
| 75 | + payload["messages"].insert(0, {"role": "system", "content": "You are designed to output JSON."}) |
| 76 | + |
| 77 | + if function_call: |
| 78 | + if function_call in ["none", "auto"]: |
| 79 | + payload["tool_choice"] = function_call |
| 80 | + else: |
| 81 | + payload["tool_choice"] = {"name": function_call} |
| 82 | + if functions: |
| 83 | + payload["tools"] = [{"type": "function", "function": f.model_dump()} for f in functions] |
| 84 | + return payload |
| 85 | + |
| 86 | + |
| 87 | +class SiliconcloudChatCompletionModel(BaseChatCompletionModel): |
| 88 | + def __init__(self): |
| 89 | + super().__init__() |
| 90 | + |
| 91 | + # ------------------- prepare request data ------------------- |
| 92 | + |
| 93 | + async def prepare_request( |
| 94 | + self, |
| 95 | + stream: bool, |
| 96 | + provider_model_id: str, |
| 97 | + messages: List[ChatCompletionMessage], |
| 98 | + credentials: ProviderCredentials, |
| 99 | + configs: ChatCompletionModelConfiguration, |
| 100 | + function_call: Optional[str] = None, |
| 101 | + functions: Optional[List[ChatCompletionFunction]] = None, |
| 102 | + model_schema: ModelSchema = None, |
| 103 | + ) -> Tuple[str, Dict, Dict]: |
| 104 | + api_url = "https://api.siliconflow.cn/v1/chat/completions" |
| 105 | + headers = build_siliconcloud_header(credentials) |
| 106 | + payload = await _build_siliconcloud_chat_completion_payload( |
| 107 | + messages, stream, provider_model_id, configs, function_call, functions |
| 108 | + ) |
| 109 | + return api_url, headers, payload |
| 110 | + |
| 111 | + # ------------------- handle non-stream chat completion response ------------------- |
| 112 | + |
| 113 | + def extract_core_data(self, response_data: Dict, **kwargs) -> Optional[Dict]: |
| 114 | + if not response_data.get("choices"): |
| 115 | + return None |
| 116 | + return response_data["choices"][0] |
| 117 | + |
| 118 | + def extract_usage_data(self, response_data: Dict, **kwargs) -> Tuple[Optional[int], Optional[int]]: |
| 119 | + usage = response_data.get("usage") if response_data else {} |
| 120 | + return usage.get("prompt_tokens", None), usage.get("completion_tokens", None) |
| 121 | + |
| 122 | + def extract_text_content(self, data: Dict, **kwargs) -> Optional[str]: |
| 123 | + message_data = data.get("message") if data else None |
| 124 | + if message_data and message_data.get("content"): |
| 125 | + return message_data.get("content") |
| 126 | + return None |
| 127 | + |
| 128 | + def extract_function_calls(self, data: Dict, **kwargs) -> Optional[List[ChatCompletionFunctionCall]]: |
| 129 | + message_data = data.get("message") if data else None |
| 130 | + if message_data.get("tool_calls"): |
| 131 | + function_calls = [] |
| 132 | + tool_calls = message_data.get("tool_calls") |
| 133 | + for call in tool_calls: |
| 134 | + func_call = build_function_call( |
| 135 | + name=call["function"]["name"], |
| 136 | + arguments_str=call["function"]["arguments"], |
| 137 | + ) |
| 138 | + function_calls.append(func_call) |
| 139 | + return function_calls |
| 140 | + |
| 141 | + return None |
| 142 | + |
| 143 | + def extract_finish_reason(self, data: Dict, **kwargs) -> Optional[ChatCompletionFinishReason]: |
| 144 | + if not data: |
| 145 | + return ChatCompletionFinishReason.unknown |
| 146 | + finish_reason = data.get("finish_reason", "stop") |
| 147 | + if finish_reason == "tool_calls": |
| 148 | + finish_reason = ChatCompletionFinishReason.function_calls |
| 149 | + elif finish_reason == "eos": |
| 150 | + finish_reason = ChatCompletionFinishReason.stop |
| 151 | + elif finish_reason == "eos_token": |
| 152 | + finish_reason = ChatCompletionFinishReason.length |
| 153 | + return ChatCompletionFinishReason.__members__.get(finish_reason, ChatCompletionFinishReason.unknown) |
| 154 | + |
| 155 | + # ------------------- handle stream chat completion response ------------------- |
| 156 | + |
| 157 | + def stream_check_error(self, sse_data: Dict, **kwargs): |
| 158 | + if sse_data.get("error"): |
| 159 | + raise_provider_api_error(sse_data["error"]) |
| 160 | + |
| 161 | + def stream_extract_chunk_data(self, sse_data: Dict, **kwargs) -> Optional[Dict]: |
| 162 | + if not sse_data.get("choices"): |
| 163 | + return None |
| 164 | + return sse_data["choices"][0] |
| 165 | + |
| 166 | + def stream_extract_usage_data(self, sse_data: Dict, input_tokens, output_tokens, **kwargs) -> Tuple[int, int]: |
| 167 | + usage = sse_data.get("usage") if sse_data else None |
| 168 | + if usage is not None: |
| 169 | + input_tokens = max(input_tokens or 0, usage.get("prompt_tokens", 0)) |
| 170 | + output_tokens = max(output_tokens or 0, usage.get("completion_tokens", 0)) |
| 171 | + return input_tokens, output_tokens |
| 172 | + |
| 173 | + def stream_extract_chunk( |
| 174 | + self, index: int, chunk_data: Dict, text_content: str, **kwargs |
| 175 | + ) -> Tuple[int, Optional[ChatCompletionChunk]]: |
| 176 | + content = chunk_data.get("delta", {}).get("content") if chunk_data else None |
| 177 | + if content: |
| 178 | + return index + 1, ChatCompletionChunk( |
| 179 | + created_timestamp=get_current_timestamp_int(), |
| 180 | + index=index, |
| 181 | + delta=content, |
| 182 | + ) |
| 183 | + return index, None |
| 184 | + |
| 185 | + def stream_extract_finish_reason(self, chunk_data: Dict, **kwargs) -> Optional[ChatCompletionFinishReason]: |
| 186 | + reason = chunk_data.get("finish_reason", "unknown") |
| 187 | + |
| 188 | + if reason == "tool_calls": |
| 189 | + return ChatCompletionFinishReason.function_calls |
| 190 | + if reason == "eos": |
| 191 | + return ChatCompletionFinishReason.stop |
| 192 | + |
| 193 | + return ChatCompletionFinishReason.__members__.get(reason, ChatCompletionFinishReason.unknown) |
| 194 | + |
| 195 | + def stream_handle_function_calls( |
| 196 | + self, chunk_data: Dict, function_calls_content: ChatCompletionFunctionCallsContent, **kwargs |
| 197 | + ) -> Optional[ChatCompletionFunctionCallsContent]: |
| 198 | + delta = chunk_data.get("delta", {}) |
| 199 | + if delta and delta.get("tool_calls"): |
| 200 | + tool_call = delta["tool_calls"][0] |
| 201 | + toll_call_index = tool_call["index"] |
| 202 | + tool_call_function = tool_call["function"] |
| 203 | + |
| 204 | + if toll_call_index == function_calls_content.index: |
| 205 | + # append to the current function call argument string |
| 206 | + function_calls_content.arguments_strs[function_calls_content.index] += tool_call_function["arguments"] |
| 207 | + |
| 208 | + elif toll_call_index > function_calls_content.index: |
| 209 | + # trigger another function call |
| 210 | + function_calls_content.arguments_strs.append(tool_call_function["arguments"] or "") |
| 211 | + function_calls_content.names.append(tool_call_function["name"]) |
| 212 | + function_calls_content.index = toll_call_index |
| 213 | + return function_calls_content |
| 214 | + |
| 215 | + return None |
0 commit comments