@@ -148,7 +148,12 @@ func (scd *snowflakeChunkDownloader) schedule() {
148
148
select {
149
149
case nextIdx := <- scd .ChunksChan :
150
150
logger .WithContext (scd .ctx ).Infof ("schedule chunk: %v" , nextIdx + 1 )
151
- go scd .FuncDownload (scd .ctx , scd , nextIdx )
151
+ go GoroutineWrapper (
152
+ scd .ctx ,
153
+ func () {
154
+ scd .FuncDownload (scd .ctx , scd , nextIdx )
155
+ },
156
+ )
152
157
default :
153
158
// no more download
154
159
logger .WithContext (scd .ctx ).Info ("no more download" )
@@ -162,7 +167,12 @@ func (scd *snowflakeChunkDownloader) checkErrorRetry() (err error) {
162
167
errc .Error != context .Canceled &&
163
168
errc .Error != context .DeadlineExceeded {
164
169
// add the index to the chunks channel so that the download will be retried.
165
- go scd .FuncDownload (scd .ctx , scd , errc .Index )
170
+ go GoroutineWrapper (
171
+ scd .ctx ,
172
+ func () {
173
+ scd .FuncDownload (scd .ctx , scd , errc .Index )
174
+ },
175
+ )
166
176
scd .ChunksErrorCounter ++
167
177
logger .WithContext (scd .ctx ).Warningf ("chunk idx: %v, err: %v. retrying (%v/%v)..." ,
168
178
errc .Index , errc .Error , scd .ChunksErrorCounter , maxChunkDownloaderErrorCounter )
@@ -508,55 +518,58 @@ func (scd *streamChunkDownloader) nextResultSet() error {
508
518
}
509
519
510
520
func (scd * streamChunkDownloader ) start () error {
511
- go func () {
512
- readErr := io .EOF
513
-
514
- logger .WithContext (scd .ctx ).Infof (
515
- "start downloading. downloader id: %v, %v/%v rows, %v chunks" ,
516
- scd .id , len (scd .RowSet .RowType ), scd .Total , len (scd .ChunkMetas ))
517
- t := time .Now ()
518
-
519
- defer func () {
520
- if readErr == io .EOF {
521
- logger .WithContext (scd .ctx ).Infof ("downloading done. downloader id: %v" , scd .id )
522
- } else {
523
- logger .WithContext (scd .ctx ).Debugf ("downloading error. downloader id: %v" , scd .id )
524
- }
525
- scd .readErr = readErr
526
- close (scd .rowStream )
527
-
528
- if r := recover (); r != nil {
529
- if err , ok := r .(error ); ok {
530
- readErr = err
521
+ go GoroutineWrapper (
522
+ scd .ctx ,
523
+ func () {
524
+ readErr := io .EOF
525
+
526
+ logger .WithContext (scd .ctx ).Infof (
527
+ "start downloading. downloader id: %v, %v/%v rows, %v chunks" ,
528
+ scd .id , len (scd .RowSet .RowType ), scd .Total , len (scd .ChunkMetas ))
529
+ t := time .Now ()
530
+
531
+ defer func () {
532
+ if readErr == io .EOF {
533
+ logger .WithContext (scd .ctx ).Infof ("downloading done. downloader id: %v" , scd .id )
531
534
} else {
532
- readErr = fmt .Errorf ("%v" , r )
535
+ logger .WithContext (scd .ctx ).Debugf ("downloading error. downloader id: %v" , scd .id )
536
+ }
537
+ scd .readErr = readErr
538
+ close (scd .rowStream )
539
+
540
+ if r := recover (); r != nil {
541
+ if err , ok := r .(error ); ok {
542
+ readErr = err
543
+ } else {
544
+ readErr = fmt .Errorf ("%v" , r )
545
+ }
533
546
}
547
+ }()
548
+
549
+ logger .WithContext (scd .ctx ).Infof ("sending initial set of rows in %vms" , time .Since (t ).Microseconds ())
550
+ t = time .Now ()
551
+ for _ , row := range scd .RowSet .JSON {
552
+ scd .rowStream <- row
534
553
}
535
- }()
536
-
537
- logger .WithContext (scd .ctx ).Infof ("sending initial set of rows in %vms" , time .Since (t ).Microseconds ())
538
- t = time .Now ()
539
- for _ , row := range scd .RowSet .JSON {
540
- scd .rowStream <- row
541
- }
542
- scd .RowSet .JSON = nil
543
-
544
- // Download and parse one chunk at a time. The fetcher will send each
545
- // parsed row to the row stream. When an error occurs, the fetcher will
546
- // stop writing to the row stream so we can stop processing immediately
547
- for i , chunk := range scd .ChunkMetas {
548
- logger .WithContext (scd .ctx ).Infof ("starting chunk fetch %d (%d rows)" , i , chunk .RowCount )
549
- if err := scd .fetcher .fetch (chunk .URL , scd .rowStream ); err != nil {
550
- logger .WithContext (scd .ctx ).Debugf (
551
- "failed chunk fetch %d: %#v, downloader id: %v, %v/%v rows, %v chunks" ,
552
- i , err , scd .id , len (scd .RowSet .RowType ), scd .Total , len (scd .ChunkMetas ))
553
- readErr = fmt .Errorf ("chunk fetch: %w" , err )
554
- break
554
+ scd .RowSet .JSON = nil
555
+
556
+ // Download and parse one chunk at a time. The fetcher will send each
557
+ // parsed row to the row stream. When an error occurs, the fetcher will
558
+ // stop writing to the row stream so we can stop processing immediately
559
+ for i , chunk := range scd .ChunkMetas {
560
+ logger .WithContext (scd .ctx ).Infof ("starting chunk fetch %d (%d rows)" , i , chunk .RowCount )
561
+ if err := scd .fetcher .fetch (chunk .URL , scd .rowStream ); err != nil {
562
+ logger .WithContext (scd .ctx ).Debugf (
563
+ "failed chunk fetch %d: %#v, downloader id: %v, %v/%v rows, %v chunks" ,
564
+ i , err , scd .id , len (scd .RowSet .RowType ), scd .Total , len (scd .ChunkMetas ))
565
+ readErr = fmt .Errorf ("chunk fetch: %w" , err )
566
+ break
567
+ }
568
+ logger .WithContext (scd .ctx ).Infof ("fetched chunk %d (%d rows) in %vms" , i , chunk .RowCount , time .Since (t ).Microseconds ())
569
+ t = time .Now ()
555
570
}
556
- logger .WithContext (scd .ctx ).Infof ("fetched chunk %d (%d rows) in %vms" , i , chunk .RowCount , time .Since (t ).Microseconds ())
557
- t = time .Now ()
558
- }
559
- }()
571
+ },
572
+ )
560
573
return nil
561
574
}
562
575
0 commit comments