@@ -98,21 +98,18 @@ impl OpenAiProvider {
9898 user_prompt : & str ,
9999 request : ProviderRequest ,
100100 ) -> CoreResult < String > {
101- let base = self . responses_base_payload ( system_prompt, user_prompt, request. temperature ) ;
101+ let base = self . responses_base_payload ( system_prompt, user_prompt, Some ( request. temperature ) ) ;
102102
103103 match self
104- . complete_responses_with_param ( & base, "max_output_tokens" , request. max_output_tokens )
104+ . complete_responses_with_fallbacks ( & base, request. max_output_tokens )
105105 . await
106106 {
107107 Ok ( message) => Ok ( message) ,
108108 Err ( err) => {
109- if is_unsupported_param ( & err, "max_output_tokens" ) {
109+ if is_unsupported_param ( & err, "temperature" ) {
110+ let base = self . responses_base_payload ( system_prompt, user_prompt, None ) ;
110111 return self
111- . complete_responses_with_param (
112- & base,
113- "max_completion_tokens" ,
114- request. max_output_tokens ,
115- )
112+ . complete_responses_with_fallbacks ( & base, request. max_output_tokens )
116113 . await ;
117114 }
118115 Err ( err)
@@ -126,33 +123,51 @@ impl OpenAiProvider {
126123 user_prompt : & str ,
127124 request : ProviderRequest ,
128125 ) -> CoreResult < String > {
129- let body = serde_json:: json!( {
130- "model" : self . model,
131- "messages" : [
132- { "role" : "system" , "content" : system_prompt } ,
133- { "role" : "user" , "content" : user_prompt }
134- ] ,
135- "max_tokens" : request. max_output_tokens,
136- "temperature" : request. temperature
137- } ) ;
126+ let body = self . chat_payload (
127+ system_prompt,
128+ user_prompt,
129+ request. max_output_tokens ,
130+ Some ( request. temperature ) ,
131+ ) ;
138132
139- let request = self
133+ let http_request = self
140134 . client
141135 . post ( self . chat_url ( ) )
142136 . bearer_auth ( & self . api_key )
143137 . json ( & body) ;
144138
145- let json = self . send_with_retries ( request) . await ?;
139+ let json = match self . send_with_retries ( http_request) . await {
140+ Ok ( json) => json,
141+ Err ( err) => {
142+ if is_unsupported_param ( & err, "temperature" ) {
143+ let body = self . chat_payload (
144+ system_prompt,
145+ user_prompt,
146+ request. max_output_tokens ,
147+ None ,
148+ ) ;
149+ let http_request = self
150+ . client
151+ . post ( self . chat_url ( ) )
152+ . bearer_auth ( & self . api_key )
153+ . json ( & body) ;
154+ let json = self . send_with_retries ( http_request) . await ?;
155+ return parse_chat_output ( & json) ;
156+ }
157+ return Err ( err) ;
158+ }
159+ } ;
160+
146161 parse_chat_output ( & json)
147162 }
148163
149164 fn responses_base_payload (
150165 & self ,
151166 system_prompt : & str ,
152167 user_prompt : & str ,
153- temperature : f32 ,
168+ temperature : Option < f32 > ,
154169 ) -> Value {
155- serde_json:: json!( {
170+ let mut payload = serde_json:: json!( {
156171 "model" : self . model,
157172 "input" : [
158173 {
@@ -163,9 +178,62 @@ impl OpenAiProvider {
163178 "role" : "user" ,
164179 "content" : [ { "type" : "input_text" , "text" : user_prompt } ]
165180 }
181+ ]
182+ } ) ;
183+
184+ if let Some ( obj) = payload. as_object_mut ( ) {
185+ if let Some ( value) = temperature {
186+ obj. insert ( "temperature" . to_string ( ) , serde_json:: json!( value) ) ;
187+ }
188+ }
189+
190+ payload
191+ }
192+
193+ fn chat_payload (
194+ & self ,
195+ system_prompt : & str ,
196+ user_prompt : & str ,
197+ max_tokens : u32 ,
198+ temperature : Option < f32 > ,
199+ ) -> Value {
200+ let mut payload = serde_json:: json!( {
201+ "model" : self . model,
202+ "messages" : [
203+ { "role" : "system" , "content" : system_prompt } ,
204+ { "role" : "user" , "content" : user_prompt }
166205 ] ,
167- "temperature" : temperature
168- } )
206+ "max_tokens" : max_tokens
207+ } ) ;
208+
209+ if let Some ( obj) = payload. as_object_mut ( ) {
210+ if let Some ( value) = temperature {
211+ obj. insert ( "temperature" . to_string ( ) , serde_json:: json!( value) ) ;
212+ }
213+ }
214+
215+ payload
216+ }
217+
218+ async fn complete_responses_with_fallbacks (
219+ & self ,
220+ base : & Value ,
221+ max_tokens : u32 ,
222+ ) -> CoreResult < String > {
223+ match self
224+ . complete_responses_with_param ( base, "max_output_tokens" , max_tokens)
225+ . await
226+ {
227+ Ok ( message) => Ok ( message) ,
228+ Err ( err) => {
229+ if is_unsupported_param ( & err, "max_output_tokens" ) {
230+ return self
231+ . complete_responses_with_param ( base, "max_completion_tokens" , max_tokens)
232+ . await ;
233+ }
234+ Err ( err)
235+ }
236+ }
169237 }
170238
171239 async fn complete_responses_with_param (
@@ -274,7 +342,7 @@ mod tests {
274342 )
275343 . expect ( "provider" ) ;
276344
277- let payload = provider. responses_base_payload ( "system" , "user" , 0.2 ) ;
345+ let payload = provider. responses_base_payload ( "system" , "user" , Some ( 0.2 ) ) ;
278346 let input = payload
279347 . get ( "input" )
280348 . and_then ( |value| value. as_array ( ) )
@@ -306,6 +374,46 @@ mod tests {
306374 Some ( "user" )
307375 ) ;
308376 }
377+
378+ #[ test]
379+ fn responses_payload_omits_temperature_when_none ( ) {
380+ let provider = OpenAiProvider :: new (
381+ "gpt-5-nano-2025-08-07" . to_string ( ) ,
382+ "https://api.openai.com/v1" . to_string ( ) ,
383+ OpenAiMode :: Responses ,
384+ 5 ,
385+ Some ( "test-key" . to_string ( ) ) ,
386+ )
387+ . expect ( "provider" ) ;
388+
389+ let payload = provider. responses_base_payload ( "system" , "user" , None ) ;
390+ assert ! ( payload. get( "temperature" ) . is_none( ) ) ;
391+ }
392+
393+ #[ test]
394+ fn chat_payload_omits_temperature_when_none ( ) {
395+ let provider = OpenAiProvider :: new (
396+ "gpt-5-nano-2025-08-07" . to_string ( ) ,
397+ "https://api.openai.com/v1" . to_string ( ) ,
398+ OpenAiMode :: Chat ,
399+ 5 ,
400+ Some ( "test-key" . to_string ( ) ) ,
401+ )
402+ . expect ( "provider" ) ;
403+
404+ let payload = provider. chat_payload ( "system" , "user" , 100 , None ) ;
405+ assert ! ( payload. get( "temperature" ) . is_none( ) ) ;
406+ }
407+
408+ #[ test]
409+ fn unsupported_param_matches_openai_message ( ) {
410+ let err = CoreError :: Provider (
411+ "openai error 400 Bad Request: {\" error\" : {\" message\" : \" Unsupported parameter: 'temperature' is not supported with this model.\" , \" type\" : \" invalid_request_error\" , \" param\" : \" temperature\" , \" code\" : null}}"
412+ . to_string ( ) ,
413+ ) ;
414+
415+ assert ! ( is_unsupported_param( & err, "temperature" ) ) ;
416+ }
309417}
310418
311419fn should_retry ( status : StatusCode ) -> bool {
@@ -315,6 +423,8 @@ fn should_retry(status: StatusCode) -> bool {
315423}
316424
317425fn is_unsupported_param ( err : & CoreError , param : & str ) -> bool {
318- let message = err. to_string ( ) ;
319- message. contains ( "unsupported_parameter" ) && message. contains ( param)
426+ let message = err. to_string ( ) . to_lowercase ( ) ;
427+ let param = param. to_lowercase ( ) ;
428+ ( message. contains ( "unsupported_parameter" ) || message. contains ( "unsupported parameter" ) )
429+ && message. contains ( & param)
320430}
0 commit comments