Skip to content

Commit 5775174

Browse files
authored
Never store requests (#3212)
When item ids are sent to Responses API it will load them from the database ignoring the provided values. This adds extra latency. Not having the mode to store requests also allows us to simplify the code. ## Breaking change The `disable_response_storage` configuration option is removed.
1 parent ba631e7 commit 5775174

File tree

17 files changed

+12
-118
lines changed

17 files changed

+12
-118
lines changed

codex-rs/core/src/client.rs

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -157,14 +157,6 @@ impl ModelClient {
157157

158158
let auth_manager = self.auth_manager.clone();
159159

160-
let auth_mode = auth_manager
161-
.as_ref()
162-
.and_then(|m| m.auth())
163-
.as_ref()
164-
.map(|a| a.mode);
165-
166-
let store = prompt.store && auth_mode != Some(AuthMode::ChatGPT);
167-
168160
let full_instructions = prompt.get_full_instructions(&self.config.model_family);
169161
let tools_json = create_tools_json_for_responses_api(&prompt.tools)?;
170162
let reasoning = create_reasoning_param_for_request(
@@ -173,9 +165,7 @@ impl ModelClient {
173165
self.summary,
174166
);
175167

176-
// Request encrypted COT if we are not storing responses,
177-
// otherwise reasoning items will be referenced by ID
178-
let include: Vec<String> = if !store && reasoning.is_some() {
168+
let include: Vec<String> = if reasoning.is_some() {
179169
vec!["reasoning.encrypted_content".to_string()]
180170
} else {
181171
vec![]
@@ -204,7 +194,7 @@ impl ModelClient {
204194
tool_choice: "auto",
205195
parallel_tool_calls: false,
206196
reasoning,
207-
store,
197+
store: false,
208198
stream: true,
209199
include,
210200
prompt_cache_key: Some(self.session_id.to_string()),

codex-rs/core/src/client_common.rs

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,6 @@ pub struct Prompt {
2525
/// Conversation context input items.
2626
pub input: Vec<ResponseItem>,
2727

28-
/// Whether to store response on server side (disable_response_storage = !store).
29-
pub store: bool,
30-
3128
/// Tools available to the model, including additional tools sourced from
3229
/// external MCP servers.
3330
pub(crate) tools: Vec<OpenAiTool>,
@@ -128,7 +125,6 @@ pub(crate) struct ResponsesApiRequest<'a> {
128125
pub(crate) tool_choice: &'static str,
129126
pub(crate) parallel_tool_calls: bool,
130127
pub(crate) reasoning: Option<Reasoning>,
131-
/// true when using the Responses API.
132128
pub(crate) store: bool,
133129
pub(crate) stream: bool,
134130
pub(crate) include: Vec<String>,
@@ -199,7 +195,7 @@ mod tests {
199195
tool_choice: "auto",
200196
parallel_tool_calls: false,
201197
reasoning: None,
202-
store: true,
198+
store: false,
203199
stream: true,
204200
include: vec![],
205201
prompt_cache_key: None,
@@ -229,7 +225,7 @@ mod tests {
229225
tool_choice: "auto",
230226
parallel_tool_calls: false,
231227
reasoning: None,
232-
store: true,
228+
store: false,
233229
stream: true,
234230
include: vec![],
235231
prompt_cache_key: None,

codex-rs/core/src/codex.rs

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,6 @@ impl Codex {
184184
base_instructions: config.base_instructions.clone(),
185185
approval_policy: config.approval_policy,
186186
sandbox_policy: config.sandbox_policy.clone(),
187-
disable_response_storage: config.disable_response_storage,
188187
notify: config.notify.clone(),
189188
cwd: config.cwd.clone(),
190189
};
@@ -301,7 +300,6 @@ pub(crate) struct TurnContext {
301300
pub(crate) approval_policy: AskForApproval,
302301
pub(crate) sandbox_policy: SandboxPolicy,
303302
pub(crate) shell_environment_policy: ShellEnvironmentPolicy,
304-
pub(crate) disable_response_storage: bool,
305303
pub(crate) tools_config: ToolsConfig,
306304
}
307305

@@ -334,8 +332,6 @@ struct ConfigureSession {
334332
approval_policy: AskForApproval,
335333
/// How to sandbox commands executed in the system
336334
sandbox_policy: SandboxPolicy,
337-
/// Disable server-side response storage (send full context each request)
338-
disable_response_storage: bool,
339335

340336
/// Optional external notifier command tokens. Present only when the
341337
/// client wants the agent to spawn a program after each completed
@@ -370,7 +366,6 @@ impl Session {
370366
base_instructions,
371367
approval_policy,
372368
sandbox_policy,
373-
disable_response_storage,
374369
notify,
375370
cwd,
376371
} = configure_session;
@@ -462,7 +457,6 @@ impl Session {
462457
sandbox_policy,
463458
shell_environment_policy: config.shell_environment_policy.clone(),
464459
cwd,
465-
disable_response_storage,
466460
};
467461
let sess = Arc::new(Session {
468462
session_id,
@@ -1117,7 +1111,6 @@ async fn submission_loop(
11171111
sandbox_policy: new_sandbox_policy.clone(),
11181112
shell_environment_policy: prev.shell_environment_policy.clone(),
11191113
cwd: new_cwd.clone(),
1120-
disable_response_storage: prev.disable_response_storage,
11211114
};
11221115

11231116
// Install the new persistent context for subsequent tasks/turns.
@@ -1199,7 +1192,6 @@ async fn submission_loop(
11991192
sandbox_policy,
12001193
shell_environment_policy: turn_context.shell_environment_policy.clone(),
12011194
cwd,
1202-
disable_response_storage: turn_context.disable_response_storage,
12031195
};
12041196
// TODO: record the new environment context in the conversation history
12051197
// no current task, spawn a new one with the per‑turn context
@@ -1604,7 +1596,6 @@ async fn run_turn(
16041596

16051597
let prompt = Prompt {
16061598
input,
1607-
store: !turn_context.disable_response_storage,
16081599
tools,
16091600
base_instructions_override: turn_context.base_instructions.clone(),
16101601
};
@@ -1858,7 +1849,6 @@ async fn run_compact_task(
18581849

18591850
let prompt = Prompt {
18601851
input: turn_input,
1861-
store: !turn_context.disable_response_storage,
18621852
tools: Vec::new(),
18631853
base_instructions_override: Some(compact_instructions.clone()),
18641854
};

codex-rs/core/src/config.rs

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -78,11 +78,6 @@ pub struct Config {
7878
/// Defaults to `false`.
7979
pub show_raw_agent_reasoning: bool,
8080

81-
/// Disable server-side response storage (sends the full conversation
82-
/// context with every request). Currently necessary for OpenAI customers
83-
/// who have opted into Zero Data Retention (ZDR).
84-
pub disable_response_storage: bool,
85-
8681
/// User-provided instructions from AGENTS.md.
8782
pub user_instructions: Option<String>,
8883

@@ -417,11 +412,6 @@ pub struct ConfigToml {
417412
/// Sandbox configuration to apply if `sandbox` is `WorkspaceWrite`.
418413
pub sandbox_workspace_write: Option<SandboxWorkspaceWrite>,
419414

420-
/// Disable server-side response storage (sends the full conversation
421-
/// context with every request). Currently necessary for OpenAI customers
422-
/// who have opted into Zero Data Retention (ZDR).
423-
pub disable_response_storage: Option<bool>,
424-
425415
/// Optional external command to spawn for end-user notifications.
426416
#[serde(default)]
427417
pub notify: Option<Vec<String>>,
@@ -640,7 +630,6 @@ pub struct ConfigOverrides {
640630
pub include_plan_tool: Option<bool>,
641631
pub include_apply_patch_tool: Option<bool>,
642632
pub include_view_image_tool: Option<bool>,
643-
pub disable_response_storage: Option<bool>,
644633
pub show_raw_agent_reasoning: Option<bool>,
645634
pub tools_web_search_request: Option<bool>,
646635
}
@@ -668,7 +657,6 @@ impl Config {
668657
include_plan_tool,
669658
include_apply_patch_tool,
670659
include_view_image_tool,
671-
disable_response_storage,
672660
show_raw_agent_reasoning,
673661
tools_web_search_request: override_tools_web_search_request,
674662
} = overrides;
@@ -802,11 +790,6 @@ impl Config {
802790
.unwrap_or_else(AskForApproval::default),
803791
sandbox_policy,
804792
shell_environment_policy,
805-
disable_response_storage: config_profile
806-
.disable_response_storage
807-
.or(cfg.disable_response_storage)
808-
.or(disable_response_storage)
809-
.unwrap_or(false),
810793
notify: cfg.notify,
811794
user_instructions,
812795
base_instructions,
@@ -1071,7 +1054,6 @@ exclude_slash_tmp = true
10711054
let toml = r#"
10721055
model = "o3"
10731056
approval_policy = "untrusted"
1074-
disable_response_storage = false
10751057
10761058
# Can be used to determine which profile to use if not specified by
10771059
# `ConfigOverrides`.
@@ -1101,7 +1083,6 @@ model_provider = "openai-chat-completions"
11011083
model = "o3"
11021084
model_provider = "openai"
11031085
approval_policy = "on-failure"
1104-
disable_response_storage = true
11051086
11061087
[profiles.gpt5]
11071088
model = "gpt-5"
@@ -1199,7 +1180,6 @@ model_verbosity = "high"
11991180
approval_policy: AskForApproval::Never,
12001181
sandbox_policy: SandboxPolicy::new_read_only_policy(),
12011182
shell_environment_policy: ShellEnvironmentPolicy::default(),
1202-
disable_response_storage: false,
12031183
user_instructions: None,
12041184
notify: None,
12051185
cwd: fixture.cwd(),
@@ -1257,7 +1237,6 @@ model_verbosity = "high"
12571237
approval_policy: AskForApproval::UnlessTrusted,
12581238
sandbox_policy: SandboxPolicy::new_read_only_policy(),
12591239
shell_environment_policy: ShellEnvironmentPolicy::default(),
1260-
disable_response_storage: false,
12611240
user_instructions: None,
12621241
notify: None,
12631242
cwd: fixture.cwd(),
@@ -1330,7 +1309,6 @@ model_verbosity = "high"
13301309
approval_policy: AskForApproval::OnFailure,
13311310
sandbox_policy: SandboxPolicy::new_read_only_policy(),
13321311
shell_environment_policy: ShellEnvironmentPolicy::default(),
1333-
disable_response_storage: true,
13341312
user_instructions: None,
13351313
notify: None,
13361314
cwd: fixture.cwd(),
@@ -1389,7 +1367,6 @@ model_verbosity = "high"
13891367
approval_policy: AskForApproval::OnFailure,
13901368
sandbox_policy: SandboxPolicy::new_read_only_policy(),
13911369
shell_environment_policy: ShellEnvironmentPolicy::default(),
1392-
disable_response_storage: false,
13931370
user_instructions: None,
13941371
notify: None,
13951372
cwd: fixture.cwd(),

codex-rs/core/src/config_profile.rs

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@ pub struct ConfigProfile {
1515
/// [`ModelProviderInfo`] to use.
1616
pub model_provider: Option<String>,
1717
pub approval_policy: Option<AskForApproval>,
18-
pub disable_response_storage: Option<bool>,
1918
pub model_reasoning_effort: Option<ReasoningEffort>,
2019
pub model_reasoning_summary: Option<ReasoningSummary>,
2120
pub model_verbosity: Option<Verbosity>,
@@ -29,7 +28,6 @@ impl From<ConfigProfile> for codex_protocol::mcp_protocol::Profile {
2928
model: config_profile.model,
3029
model_provider: config_profile.model_provider,
3130
approval_policy: config_profile.approval_policy,
32-
disable_response_storage: config_profile.disable_response_storage,
3331
model_reasoning_effort: config_profile.model_reasoning_effort,
3432
model_reasoning_summary: config_profile.model_reasoning_summary,
3533
model_verbosity: config_profile.model_verbosity,

codex-rs/core/tests/suite/client.rs

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -224,19 +224,16 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
224224
let expected_input = serde_json::json!([
225225
{
226226
"type": "message",
227-
"id": null,
228227
"role": "user",
229228
"content": [{ "type": "input_text", "text": "resumed user message" }]
230229
},
231230
{
232231
"type": "message",
233-
"id": null,
234232
"role": "assistant",
235233
"content": [{ "type": "output_text", "text": "resumed assistant message" }]
236234
},
237235
{
238236
"type": "message",
239-
"id": null,
240237
"role": "user",
241238
"content": [{ "type": "input_text", "text": "hello" }]
242239
}
@@ -496,7 +493,6 @@ async fn chatgpt_auth_sends_correct_request() {
496493
"Bearer Access Token"
497494
);
498495
assert_eq!(request_chatgpt_account_id.to_str().unwrap(), "account_id");
499-
assert!(!request_body["store"].as_bool().unwrap());
500496
assert!(request_body["stream"].as_bool().unwrap());
501497
assert_eq!(
502498
request_body["include"][0].as_str().unwrap(),
@@ -578,14 +574,6 @@ async fn prefers_chatgpt_token_when_config_prefers_chatgpt() {
578574
.unwrap();
579575

580576
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
581-
582-
// verify request body flags
583-
let request = &server.received_requests().await.unwrap()[0];
584-
let request_body = request.body_json::<serde_json::Value>().unwrap();
585-
assert!(
586-
!request_body["store"].as_bool().unwrap(),
587-
"store should be false for ChatGPT auth"
588-
);
589577
}
590578

591579
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@@ -662,14 +650,6 @@ async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
662650
.unwrap();
663651

664652
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
665-
666-
// verify request body flags
667-
let request = &server.received_requests().await.unwrap()[0];
668-
let request_body = request.body_json::<serde_json::Value>().unwrap();
669-
assert!(
670-
request_body["store"].as_bool().unwrap(),
671-
"store should be true for API key auth"
672-
);
673653
}
674654

675655
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@@ -990,31 +970,26 @@ async fn history_dedupes_streamed_and_final_messages_across_turns() {
990970
let r3_tail_expected = serde_json::json!([
991971
{
992972
"type": "message",
993-
"id": null,
994973
"role": "user",
995974
"content": [{"type":"input_text","text":"U1"}]
996975
},
997976
{
998977
"type": "message",
999-
"id": null,
1000978
"role": "assistant",
1001979
"content": [{"type":"output_text","text":"Hey there!\n"}]
1002980
},
1003981
{
1004982
"type": "message",
1005-
"id": null,
1006983
"role": "user",
1007984
"content": [{"type":"input_text","text":"U2"}]
1008985
},
1009986
{
1010987
"type": "message",
1011-
"id": null,
1012988
"role": "assistant",
1013989
"content": [{"type":"output_text","text":"Hey there!\n"}]
1014990
},
1015991
{
1016992
"type": "message",
1017-
"id": null,
1018993
"role": "user",
1019994
"content": [{"type":"input_text","text":"U3"}]
1020995
}

0 commit comments

Comments
 (0)