@@ -262,10 +262,25 @@ index_shape_dim(A, dim, ::Colon) = (trailingsize(A, dim),)
262
262
# ambiguities for AbstractArray subtypes. See the note in abstractarray.jl
263
263
264
264
# Note that it's most efficient to call checkbounds first, and then to_index
265
- @inline function _getindex (l:: LinearIndexing , A:: AbstractArray , I:: Union{Real, AbstractArray, Colon} ... )
265
+ @inline function _getindex {T,N} (l:: LinearIndexing , A:: AbstractArray{T,N} , I:: Vararg{ Union{Real, AbstractArray, Colon},N} )
266
266
@boundscheck checkbounds (A, I... )
267
267
_unsafe_getindex (l, A, I... )
268
268
end
269
+ # Explicitly allow linear indexing with one non-scalar index
270
+ @inline function _getindex (l:: LinearIndexing , A:: AbstractArray , i:: Union{Real, AbstractArray, Colon} )
271
+ @boundscheck checkbounds (A, i)
272
+ _unsafe_getindex (l, _maybe_linearize (l, A), i)
273
+ end
274
+ # But we can speed up LinearSlow arrays by reshaping them to vectors:
275
+ _maybe_linearize (:: LinearFast , A:: AbstractArray ) = A
276
+ _maybe_linearize (:: LinearSlow , A:: AbstractVector ) = A
277
+ _maybe_linearize (:: LinearSlow , A:: AbstractArray ) = reshape (A, length (A))
278
+
279
+ @inline function _getindex {N} (l:: LinearIndexing , A:: AbstractArray , I:: Vararg{Union{Real, AbstractArray, Colon},N} ) # TODO : DEPRECATE FOR #14770
280
+ @boundscheck checkbounds (A, I... )
281
+ _unsafe_getindex (l, reshape (A, Val{N}), I... )
282
+ end
283
+
269
284
@generated function _unsafe_getindex (:: LinearIndexing , A:: AbstractArray , I:: Union{Real, AbstractArray, Colon} ...)
270
285
N = length (I)
271
286
quote
279
294
end
280
295
281
296
# logical indexing optimization - don't use find (within to_index)
282
- # This is inherently a linear operation in the source, but we could potentially
283
- # use fast dividing integers to speed it up.
284
297
function _unsafe_getindex (:: LinearIndexing , src:: AbstractArray , I:: AbstractArray{Bool} )
285
298
shape = index_shape (src, I)
286
299
dest = similar (src, shape)
305
318
$ (Expr (:meta , :inline ))
306
319
D = eachindex (dest)
307
320
Ds = start (D)
308
- idxlens = index_lengths (src, I... ) # TODO : unsplat?
321
+ idxlens = index_lengths (src, I... )
309
322
@nloops $ N i d-> (1 : idxlens[d]) d-> (@inbounds j_d = getindex (I[d], i_d)) begin
310
323
d, Ds = next (D, Ds)
311
324
@inbounds dest[d] = @ncall $ N getindex src j
@@ -322,10 +335,21 @@ end
322
335
# before redispatching to the _unsafe_batchsetindex!
323
336
_iterable (v:: AbstractArray ) = v
324
337
_iterable (v) = repeated (v)
325
- @inline function _setindex! (l:: LinearIndexing , A:: AbstractArray , x, J:: Union{Real,AbstractArray,Colon} ... )
338
+ @inline function _setindex! {T,N} (l:: LinearIndexing , A:: AbstractArray{T,N} , x, J:: Vararg{ Union{Real,AbstractArray,Colon},N} )
326
339
@boundscheck checkbounds (A, J... )
327
340
_unsafe_setindex! (l, A, x, J... )
328
341
end
342
+ @inline function _setindex! (l:: LinearIndexing , A:: AbstractArray , x, j:: Union{Real,AbstractArray,Colon} )
343
+ @boundscheck checkbounds (A, j)
344
+ _unsafe_setindex! (l, _maybe_linearize (l, A), x, j)
345
+ A
346
+ end
347
+ @inline function _setindex! {N} (l:: LinearIndexing , A:: AbstractArray , x, J:: Vararg{Union{Real, AbstractArray, Colon},N} ) # TODO : DEPRECATE FOR #14770
348
+ @boundscheck checkbounds (A, J... )
349
+ _unsafe_setindex! (l, reshape (A, Val{N}), x, J... )
350
+ A
351
+ end
352
+
329
353
@inline function _unsafe_setindex! (:: LinearIndexing , A:: AbstractArray , x, J:: Union{Real,AbstractArray,Colon} ...)
330
354
_unsafe_batchsetindex! (A, _iterable (x), to_indexes (J... )... )
331
355
end
@@ -445,95 +469,6 @@ for (f, fmod, op) = ((:cummin, :_cummin!, :min), (:cummax, :_cummax!, :max))
445
469
@eval ($ f)(A:: AbstractArray ) = ($ f)(A, 1 )
446
470
end
447
471
448
- # # SubArray index merging
449
- # A view created like V = A[2:3:8, 5:2:17] can later be indexed as V[2:7],
450
- # creating a new 1d view.
451
- # In such cases we have to collapse the 2d space spanned by the ranges.
452
- #
453
- # API:
454
- # merge_indexes(V, indexes::NTuple, index)
455
- # indexes encodes the view's trailing indexes into the parent array,
456
- # and index encodes the subset of these elements that we'll select.
457
- #
458
- # It returns a CartesianIndex or array of CartesianIndexes.
459
-
460
- # Checking 'in' a range is fast -- so check all possibilities and keep the good ones
461
- @generated function merge_indexes {N} (V, indexes:: NTuple{N} , index:: Union{Colon, Range} )
462
- # There may be a vector of cartesian indices in the passed indexes... which
463
- # makes the number of indices more than N. Since we pre-allocate the array
464
- # of CartesianIndexes, we need to figure out how big to make it
465
- M = 0
466
- for T in indexes. parameters
467
- T <: CartesianIndex ? (M += length (T)) : (M += 1 )
468
- end
469
- index_length_expr = index <: Colon ? Symbol (" Istride_" , N+ 1 ) : :(length (index))
470
- quote
471
- Cartesian. @nexprs $ N d-> (I_d = indexes[d])
472
- dimlengths = Cartesian. @ncall $ N index_lengths_dim V. parent length (V. indexes)- N+ 1 I
473
- Istride_1 = 1 # strides of the indexes to merge
474
- Cartesian. @nexprs $ N d-> (Istride_{d+ 1 } = Istride_d* dimlengths[d])
475
- idx_len = $ (index_length_expr)
476
- if idx_len < 0.1 * $ (Symbol (" Istride_" , N+ 1 )) # this has not been carefully tuned
477
- return merge_indexes_div (V, indexes, index, dimlengths)
478
- end
479
- Cartesian. @nexprs $ N d-> (counter_d = 1 ) # counter_0 is the linear index
480
- k = 0
481
- merged = Array (CartesianIndex{$ M}, idx_len)
482
- Cartesian. @nloops $ N i d-> (1 : dimlengths[d]) d-> (counter_{d- 1 } = counter_d + (i_d- 1 )* Istride_d; @inbounds idx_d = I_d[i_d]) begin
483
- if counter_0 in index # this branch is elided for ::Colon
484
- @inbounds merged[k+= 1 ] = Cartesian. @ncall $ N CartesianIndex{$ M} idx
485
- end
486
- end
487
- merged
488
- end
489
- end
490
-
491
- # mapping getindex across the parent and subindices rapidly gets too big to
492
- # automatically inline, but it is crucial that it does so to avoid allocations
493
- # Unlike SubArray's reindex, merge_indexes doesn't drop any indices.
494
- @inline inlinemap (f, t:: Tuple , s:: Tuple ) = (f (t[1 ], s[1 ]), inlinemap (f, tail (t), tail (s))... )
495
- inlinemap (f, t:: Tuple{} , s:: Tuple{} ) = ()
496
- inlinemap (f, t:: Tuple{} , s:: Tuple ) = ()
497
- inlinemap (f, t:: Tuple , s:: Tuple{} ) = ()
498
-
499
- # Otherwise, we fall back to the slow div/rem method, using ind2sub.
500
- @inline merge_indexes {N} (V, indexes:: NTuple{N} , index) =
501
- merge_indexes_div (V, indexes, index, index_lengths_dim (V. parent, length (V. indexes)- N+ 1 , indexes... ))
502
-
503
- @inline merge_indexes_div {N} (V, indexes:: NTuple{N} , index:: Real , dimlengths) =
504
- CartesianIndex (inlinemap (getindex, indexes, ind2sub (dimlengths, index)))
505
- merge_indexes_div {N} (V, indexes:: NTuple{N} , index:: AbstractArray , dimlengths) =
506
- reshape ([CartesianIndex (inlinemap (getindex, indexes, ind2sub (dimlengths, i))) for i in index], size (index))
507
- merge_indexes_div {N} (V, indexes:: NTuple{N} , index:: Colon , dimlengths) =
508
- [CartesianIndex (inlinemap (getindex, indexes, ind2sub (dimlengths, i))) for i in 1 : prod (dimlengths)]
509
-
510
- # Merging indices is particularly difficult in the case where we partially linearly
511
- # index through a multidimensional array. It's easiest if we can simply reduce the
512
- # partial indices to a single linear index into the parent index array.
513
- function merge_indexes {N} (V, indexes:: NTuple{N} , index:: Tuple{Colon, Vararg{Colon}} )
514
- shape = index_shape (indexes[1 ], index... )
515
- reshape (merge_indexes (V, indexes, :), (shape[1 : end - 1 ]. .. , shape[end ]* prod (index_lengths_dim (V. parent, length (V. indexes)- length (indexes)+ 2 , tail (indexes)... ))))
516
- end
517
- @inline merge_indexes {N} (V, indexes:: NTuple{N} , index:: Tuple{Real, Vararg{Real}} ) = merge_indexes (V, indexes, sub2ind (size (indexes[1 ]), index... ))
518
- # In general, it's a little trickier, but we can use the product iterator
519
- # if we replace colons with ranges. This can be optimized further.
520
- function merge_indexes {N} (V, indexes:: NTuple{N} , index:: Tuple )
521
- I = replace_colons (V, indexes, index)
522
- shp = index_shape (indexes[1 ], I... ) # index_shape does no bounds checking
523
- dimlengths = index_lengths_dim (V. parent, length (V. indexes)- N+ 1 , indexes... )
524
- sz = size (indexes[1 ])
525
- reshape ([CartesianIndex (inlinemap (getindex, indexes, ind2sub (dimlengths, sub2ind (sz, i... )))) for i in product (I... )], shp)
526
- end
527
- @inline replace_colons (V, indexes, I) = replace_colons_dim (V, indexes, 1 , I)
528
- @inline replace_colons_dim (V, indexes, dim, I:: Tuple{} ) = ()
529
- @inline replace_colons_dim (V, indexes, dim, I:: Tuple{Colon} ) =
530
- (1 : trailingsize (indexes[1 ], dim)* prod (index_lengths_dim (V. parent, length (V. indexes)- length (indexes)+ 2 , tail (indexes)... )),)
531
- @inline replace_colons_dim (V, indexes, dim, I:: Tuple{Colon, Vararg{Any}} ) =
532
- (1 : size (indexes[1 ], dim), replace_colons_dim (V, indexes, dim+ 1 , tail (I))... )
533
- @inline replace_colons_dim (V, indexes, dim, I:: Tuple{Any, Vararg{Any}} ) =
534
- (I[1 ], replace_colons_dim (V, indexes, dim+ 1 , tail (I))... )
535
-
536
-
537
472
cumsum (A:: AbstractArray , axis:: Integer = 1 ) = cumsum! (similar (A, Base. _cumsum_type (A)), A, axis)
538
473
cumsum! (B, A:: AbstractArray ) = cumsum! (B, A, 1 )
539
474
cumprod (A:: AbstractArray , axis:: Integer = 1 ) = cumprod! (similar (A), A, axis)
0 commit comments