1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
| # /autogpt_platform/backend/backend/blocks/llm.py
import logging
from typing import Any, List, Literal, Optional
from pydantic import BaseModel, SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import APIKeyCredentials, CredentialsField, SchemaField
from backend.integrations.providers import ProviderName
class AIStructuredResponseGeneratorBlock(Block):
"""
AI结构化响应生成Block
功能:使用大语言模型生成结构化响应
特性:支持多种LLM提供者、JSON格式输出、工具调用
"""
class Input(BlockSchema):
"""输入Schema"""
credentials: AICredentials = AICredentialsField() # AI服务凭据
model: LlmModel = SchemaField(
description="要使用的语言模型",
default=LlmModel.GPT4O_MINI,
advanced=False,
)
prompt: str = SchemaField(
description="发送给AI模型的提示内容",
placeholder="请描述您想要AI执行的任务...",
advanced=False,
)
system_prompt: str = SchemaField(
description="系统提示,定义AI的角色和行为",
default="你是一个有用的AI助手。请提供准确、有帮助的回答。",
advanced=True,
)
max_tokens: int = SchemaField(
description="生成响应的最大token数",
default=1024,
ge=1,
le=4096,
advanced=True,
)
temperature: float = SchemaField(
description="控制输出随机性的温度参数(0-2)",
default=0.7,
ge=0.0,
le=2.0,
advanced=True,
)
json_format: bool = SchemaField(
description="是否强制JSON格式输出",
default=False,
advanced=True,
)
tools: Optional[list[dict]] = SchemaField(
description="可用的工具定义(JSON格式)",
default=None,
advanced=True,
)
class Output(BlockSchema):
"""输出Schema"""
response: str = SchemaField(
description="AI模型生成的响应内容"
)
usage: dict[str, Any] = SchemaField(
description="Token使用统计信息"
)
model_used: str = SchemaField(
description="实际使用的模型名称"
)
tool_calls: Optional[list[dict]] = SchemaField(
description="AI调用的工具列表",
default=None
)
def __init__(self):
super().__init__(
id="4b3d4c5e-6f7a-8b9c-0d1e-2f3a4b5c6d7e",
description="使用大语言模型生成结构化响应,支持多种AI提供者和高级功能",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=AIStructuredResponseGeneratorBlock.Input,
output_schema=AIStructuredResponseGeneratorBlock.Output,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"model": LlmModel.GPT4O_MINI,
"prompt": "解释什么是人工智能",
"system_prompt": "你是一个AI专家",
"max_tokens": 500,
"temperature": 0.7,
},
test_credentials=TEST_CREDENTIALS,
)
async def run(
self,
input_data: Input,
*,
credentials: APIKeyCredentials,
**kwargs
) -> BlockOutput:
"""
执行AI响应生成
处理流程:
1. 构建消息列表
2. 调用LLM API
3. 处理响应和工具调用
4. 返回结构化结果
"""
try:
# 构建消息列表
messages = [
{"role": "system", "content": input_data.system_prompt},
{"role": "user", "content": input_data.prompt}
]
# 调用LLM API
response = await llm_call(
credentials=credentials,
llm_model=input_data.model,
prompt=messages,
json_format=input_data.json_format,
max_tokens=input_data.max_tokens,
tools=input_data.tools,
temperature=input_data.temperature,
)
# 处理响应
yield "response", response.content
yield "usage", response.usage.model_dump()
yield "model_used", response.model
# 处理工具调用
if response.tool_calls:
yield "tool_calls", [
tool_call.model_dump() for tool_call in response.tool_calls
]
except Exception as e:
yield "error", f"AI响应生成失败: {str(e)}"
async def llm_call(
credentials: APIKeyCredentials,
llm_model: LlmModel,
prompt: list[dict],
json_format: bool,
max_tokens: int | None,
tools: list[dict] | None = None,
temperature: float = 0.7,
**kwargs
) -> LLMResponse:
"""
统一LLM调用接口
支持多个LLM提供者:OpenAI、Anthropic、Groq、Ollama等
参数:
credentials: API凭据
llm_model: 模型名称
prompt: 消息列表
json_format: 是否JSON格式
max_tokens: 最大token数
tools: 工具定义
temperature: 温度参数
返回:
LLMResponse对象,包含响应内容、使用统计等
"""
provider = credentials.provider.lower()
if provider == ProviderName.OPENAI:
return await _call_openai(
api_key=credentials.api_key.get_secret_value(),
model=llm_model.value,
messages=prompt,
max_tokens=max_tokens,
temperature=temperature,
json_format=json_format,
tools=tools,
)
elif provider == ProviderName.ANTHROPIC:
return await _call_anthropic(
api_key=credentials.api_key.get_secret_value(),
model=llm_model.value,
messages=prompt,
max_tokens=max_tokens,
temperature=temperature,
tools=tools,
)
elif provider == ProviderName.GROQ:
return await _call_groq(
api_key=credentials.api_key.get_secret_value(),
model=llm_model.value,
messages=prompt,
max_tokens=max_tokens,
temperature=temperature,
tools=tools,
)
else:
raise ValueError(f"不支持的LLM提供者: {provider}")
async def _call_openai(
api_key: str,
model: str,
messages: list[dict],
max_tokens: int | None,
temperature: float,
json_format: bool,
tools: list[dict] | None,
) -> LLMResponse:
"""OpenAI API调用实现"""
import openai
client = openai.AsyncOpenAI(api_key=api_key)
# 构建请求参数
kwargs = {
"model": model,
"messages": messages,
"temperature": temperature,
}
if max_tokens:
kwargs["max_tokens"] = max_tokens
if json_format:
kwargs["response_format"] = {"type": "json_object"}
if tools:
kwargs["tools"] = tools
kwargs["tool_choice"] = "auto"
# 发起API调用
response = await client.chat.completions.create(**kwargs)
# 处理响应
message = response.choices[0].message
return LLMResponse(
content=message.content or "",
model=response.model,
usage=TokenUsage(
input_tokens=response.usage.prompt_tokens,
output_tokens=response.usage.completion_tokens,
total_tokens=response.usage.total_tokens,
),
tool_calls=message.tool_calls if hasattr(message, 'tool_calls') else None,
)
|