Skip to content

Commit 7dd8ae3

Browse files
committed
Update chatgpt support
1 parent d281883 commit 7dd8ae3

File tree

4 files changed

+63
-56
lines changed

4 files changed

+63
-56
lines changed

oneAI-backend/src/pricing.rs

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -110,16 +110,16 @@ impl Model {
110110
// ==== OpenAI ====
111111
Model::Gpt4_1 => "gpt-4.1",
112112
Model::Gpt4_1Mini => "gpt-4.1-mini",
113-
Model::Gpt4_1Nano => "gpt-4.1-nano",
114-
Model::GptO3 => "gpt-o3",
115-
Model::GptO4Mini => "gpt-o4-mini",
116-
Model::GptO3Pro => "gpt-o3-pro",
113+
Model::Gpt4_1Nano => "4.1-nano",
114+
Model::GptO3 => "o3",
115+
Model::GptO4Mini => "o4-mini",
116+
Model::GptO3Pro => "o3-pro",
117117
Model::Gpt4o => "gpt-4o",
118118
Model::Gpt4oMini => "gpt-4o-mini",
119-
Model::GptO1 => "gpt-o1",
120-
Model::GptO3DeepResearch => "gpt-o3-DeepResearch",
121-
Model::GptO3Mini => "gpt-o3-mini",
122-
Model::GptO1Mini => "gpt-o1-mini",
119+
Model::GptO1 => "o1",
120+
Model::GptO3DeepResearch => "o3-DeepResearch",
121+
Model::GptO3Mini => "o3-mini",
122+
Model::GptO1Mini => "o1-mini",
123123

124124
// ==== Anthropic ====
125125
Model::ClaudeOpus4 => "Opus-4",

oneAI-backend/src/requests/parseapi.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ impl APIInput {
109109
"model": self.model.name(),
110110
"messages": self.messages,
111111
"temperature": self.temperature,
112-
"max_tokens": maxtoken,
112+
"max_completion_tokens": maxtoken,
113113
"top_p": self.top_p,
114114
"stop": self.stop_sequences,
115115
"stream": self.stream,

oneAI-backend/src/requests/requests.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,7 @@ impl APIInput {
8484
let total: u32;
8585
let unified_response: LlmUnifiedResponse = match self.model.provider() {
8686
AIProvider::OpenAI => {
87+
println!("output: {:#?}", output);
8788
let openai: OpenAIResponse = from_str(&output?)?;
8889
total = openai.usage.total_tokens;
8990
openai.into()

oneAI-backend/src/requests/responseparser/openai.rs

Lines changed: 53 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -5,36 +5,43 @@ use serde::{Deserialize, Serialize};
55
pub struct OpenAIResponse {
66
pub id: String,
77
pub object: String,
8+
#[serde(rename = "created")]
89
pub created_at: u64,
9-
pub status: String,
10-
pub error: Option<serde_json::Value>,
11-
pub incomplete_details: Option<serde_json::Value>,
12-
pub instructions: Option<serde_json::Value>,
13-
pub max_output_tokens: Option<serde_json::Value>,
1410
pub model: String,
15-
pub output: Vec<OpenAIOutput>,
16-
pub parallel_tool_calls: bool,
17-
pub previous_response_id: Option<String>,
18-
pub reasoning: OpenAIReasoning,
19-
pub store: bool,
20-
pub temperature: f64,
21-
pub text: OpenAITextField,
22-
pub tool_choice: String,
23-
pub tools: Vec<serde_json::Value>,
24-
pub top_p: f64,
25-
pub truncation: String,
11+
pub choices: Vec<OpenAIOutput>,
2612
pub usage: OpenAIUsage,
27-
pub user: Option<serde_json::Value>,
28-
pub metadata: OpenAIMetadata,
13+
// Optional fields for completeness
14+
pub service_tier: Option<String>,
15+
pub system_fingerprint: Option<String>,
2916
}
30-
3117
#[derive(Debug, Serialize, Deserialize)]
3218
pub struct OpenAIOutput {
33-
pub r#type: String,
34-
pub id: String,
35-
pub status: String,
19+
pub index: u32,
20+
pub finish_reason: Option<String>,
21+
pub message: OpenAIMessage,
22+
}
23+
24+
#[derive(Debug, Serialize, Deserialize)]
25+
pub struct OpenAIUsage {
26+
pub prompt_tokens: u32,
27+
pub completion_tokens: u32,
28+
pub total_tokens: u32,
29+
#[serde(default)]
30+
pub prompt_tokens_details: OpenAIInputTokensDetails,
31+
#[serde(default)]
32+
pub completion_tokens_details: OpenAIOutputTokensDetails,
33+
}
34+
35+
#[derive(Debug, Serialize, Deserialize)]
36+
pub struct OpenAIMessage {
3637
pub role: String,
37-
pub content: Vec<OpenAIContent>,
38+
pub content: String,
39+
pub refusal: Option<serde_json::Value>,
40+
pub function_call: Option<serde_json::Value>,
41+
pub tool_calls: Option<Vec<serde_json::Value>>,
42+
pub parsed: Option<serde_json::Value>,
43+
#[serde(default)]
44+
pub annotations: Vec<serde_json::Value>,
3845
}
3946

4047
#[derive(Debug, Serialize, Deserialize)]
@@ -44,64 +51,63 @@ pub struct OpenAIContent {
4451
pub annotations: Vec<serde_json::Value>,
4552
}
4653

47-
#[derive(Debug, Serialize, Deserialize)]
54+
#[derive(Debug, Serialize, Deserialize, Default)]
4855
pub struct OpenAIReasoning {
4956
pub effort: Option<serde_json::Value>,
5057
pub summary: Option<serde_json::Value>,
5158
}
5259

53-
#[derive(Debug, Serialize, Deserialize)]
60+
#[derive(Debug, Serialize, Deserialize, Default)]
5461
pub struct OpenAITextField {
5562
pub format: OpenAIFormat,
5663
}
5764

58-
#[derive(Debug, Serialize, Deserialize)]
65+
#[derive(Debug, Serialize, Deserialize, Default)]
5966
pub struct OpenAIFormat {
67+
#[serde(rename = "type")]
6068
pub r#type: String,
6169
}
6270

63-
#[derive(Debug, Serialize, Deserialize)]
64-
pub struct OpenAIUsage {
65-
pub input_tokens: u32,
66-
pub input_tokens_details: OpenAIInputTokensDetails,
67-
pub output_tokens: u32,
68-
pub output_tokens_details: OpenAIOutputTokensDetails,
69-
pub total_tokens: u32,
70-
}
71-
72-
#[derive(Debug, Serialize, Deserialize)]
71+
#[derive(Debug, Serialize, Deserialize, Default)]
7372
pub struct OpenAIInputTokensDetails {
7473
pub cached_tokens: u32,
74+
pub audio_tokens: Option<u32>,
7575
}
7676

77-
#[derive(Debug, Serialize, Deserialize)]
77+
#[derive(Debug, Serialize, Deserialize, Default)]
7878
pub struct OpenAIOutputTokensDetails {
7979
pub reasoning_tokens: u32,
80+
pub audio_tokens: Option<u32>,
8081
}
8182

8283
#[derive(Debug, Serialize, Deserialize)]
8384
pub struct OpenAIMetadata {} // Empty object
8485

8586
impl From<OpenAIResponse> for LlmUnifiedResponse {
8687
fn from(res: OpenAIResponse) -> Self {
87-
let content = res
88-
.output
89-
.iter()
90-
.flat_map(|out| out.content.iter().map(|c| c.text.clone()))
91-
.collect::<Vec<_>>()
92-
.join("\n");
88+
let first = res.choices.get(0);
89+
90+
let content = first
91+
.map(|c| c.message.content.clone())
92+
.unwrap_or_default();
93+
94+
let role = first
95+
.map(|c| c.message.role.clone());
96+
97+
let finish_reason = first
98+
.and_then(|c| c.finish_reason.clone());
9399

94100
LlmUnifiedResponse {
95101
provider: "OpenAI".into(),
96102
model: res.model,
97-
role: res.output.get(0).map(|o| o.role.clone()),
103+
role,
98104
content,
99105
usage: Some(LlmUsage {
100-
input_tokens: Some(res.usage.input_tokens),
101-
output_tokens: Some(res.usage.output_tokens),
106+
input_tokens: Some(res.usage.prompt_tokens),
107+
output_tokens: Some(res.usage.completion_tokens),
102108
total_tokens: Some(res.usage.total_tokens),
103109
}),
104-
finish_reason: Some(res.status), // e.g. "completed"
110+
finish_reason,
105111
}
106112
}
107113
}

0 commit comments

Comments
 (0)