@@ -732,24 +732,22 @@ impl CoreRuntime {
732
732
context. prefix = Tokens ( [ prefix. 0 , suffix. 0 ] . concat ( ) ) ;
733
733
context. suffix = Tokens ( vec ! [ ] ) ;
734
734
735
- let output = receiver. recv_async ( ) . await ?;
736
-
737
- // cache the prompt if being asked
738
- if let CachedPrompt :: Future ( sender) = context. prompt_cached . clone ( ) {
739
- assert_eq ! ( context. prefix. len( ) , context. prompt_tokens. len( ) ) ;
735
+ receiver. recv_async ( ) . await ?
736
+ }
737
+ } ;
740
738
741
- let backed = self . back ( batch) . await ?;
742
- let output = output. clone ( ) ;
743
- sender. send_replace ( Some ( CachedItem :: new ( backed, output) ) ) ;
744
- context. prompt_cached = CachedPrompt :: Done ;
739
+ // cache the prompt if being asked
740
+ if let CachedPrompt :: Future ( sender) = context. prompt_cached . clone ( ) {
741
+ assert_eq ! ( context. prefix. len( ) , context. prompt_tokens. len( ) ) ;
745
742
746
- let len = context. prefix . len ( ) ;
747
- log:: info!( "[cache][insert][slot: {batch}][len: {len}]" ) ;
748
- }
743
+ let backed = self . back ( batch) . await ?;
744
+ let output = output. clone ( ) ;
745
+ sender. send_replace ( Some ( CachedItem :: new ( backed, output) ) ) ;
746
+ context. prompt_cached = CachedPrompt :: Done ;
749
747
750
- output
751
- }
752
- } ;
748
+ let len = context . prefix . len ( ) ;
749
+ log :: info! ( "[cache][insert][slot: {batch}][len: {len}]" ) ;
750
+ }
753
751
754
752
let ( token, output) = {
755
753
let output = output. clone ( ) ;
0 commit comments