From 34b28477d93e9f3119f1a3cec62d24ae03c1d05a Mon Sep 17 00:00:00 2001 From: ASHWIN Date: Thu, 9 Oct 2025 12:44:59 +0530 Subject: [PATCH] Updated base_coder.py Title: fix -Unable to cancel LLM request in the new update #3992 This change fixes a regression where Ctrl+C failed to cancel an LLM request during the initial "Waiting on response..." phase. The cancellation only worked once the response started streaming, leading to a period where the application felt unresponsive. To fix this, I've wrapped the initial, non-streaming litellm.completion call in its own try...except KeyboardInterrupt block. This ensures that user-initiated cancellations are handled immediately and consistently, whether the application is waiting for the first token or streaming the full response. --- aider/coders/base_coder.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index b824e928693..71721f637e5 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1376,9 +1376,12 @@ def warm_cache_worker(): stream=False, **kwargs, ) + except KeyboardInterrupt: + self.io.tool_output("\nRequest cancelled by user.") + return except Exception as err: self.io.tool_warning(f"Cache warming error: {str(err)}") - continue + return cache_hit_tokens = getattr( completion.usage, "prompt_cache_hit_tokens", 0