Skip to content

Commit a70e6c3

Browse files
authored
rename @parallel to @distributed. part of #19578 (#25528)
1 parent e216430 commit a70e6c3

File tree

4 files changed

+22
-20
lines changed

4 files changed

+22
-20
lines changed

docs/src/index.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ Distributed.@fetch
4141
Distributed.@fetchfrom
4242
Distributed.@async
4343
Distributed.@sync
44-
Distributed.@parallel
44+
Distributed.@distributed
4545
Distributed.@everywhere
4646
Distributed.clear!(::Any, ::Any; ::Any)
4747
Distributed.remoteref_id

src/Distributed.jl

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ export
2626
@fetch,
2727
@fetchfrom,
2828
@everywhere,
29-
@parallel,
29+
@distributed,
3030

3131
AbstractWorkerPool,
3232
addprocs,
@@ -78,6 +78,8 @@ include("pmap.jl")
7878
include("managers.jl") # LocalManager and SSHManager
7979
include("precompile.jl")
8080

81+
@eval @deprecate $(Symbol("@parallel")) $(Symbol("@distributed"))
82+
8183
function _require_callback(mod::Symbol)
8284
if Base.toplevel_load[] && myid() == 1 && nprocs() > 1
8385
# broadcast top-level import/using from node 1 (only)

src/macros.jl

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -279,38 +279,38 @@ function make_pfor_body(var, body)
279279
end
280280

281281
"""
282-
@parallel
282+
@distributed
283283
284-
A parallel for loop of the form :
284+
A distributed memory, parallel for loop of the form :
285285
286-
@parallel [reducer] for var = range
286+
@distributed [reducer] for var = range
287287
body
288288
end
289289
290290
The specified range is partitioned and locally executed across all workers. In case an
291-
optional reducer function is specified, `@parallel` performs local reductions on each worker
291+
optional reducer function is specified, `@distributed` performs local reductions on each worker
292292
with a final reduction on the calling process.
293293
294-
Note that without a reducer function, `@parallel` executes asynchronously, i.e. it spawns
294+
Note that without a reducer function, `@distributed` executes asynchronously, i.e. it spawns
295295
independent tasks on all available workers and returns immediately without waiting for
296296
completion. To wait for completion, prefix the call with [`@sync`](@ref), like :
297297
298-
@sync @parallel for var = range
298+
@sync @distributed for var = range
299299
body
300300
end
301301
"""
302-
macro parallel(args...)
302+
macro distributed(args...)
303303
na = length(args)
304304
if na==1
305305
loop = args[1]
306306
elseif na==2
307307
reducer = args[1]
308308
loop = args[2]
309309
else
310-
throw(ArgumentError("wrong number of arguments to @parallel"))
310+
throw(ArgumentError("wrong number of arguments to @distributed"))
311311
end
312312
if !isa(loop,Expr) || loop.head !== :for
313-
error("malformed @parallel loop")
313+
error("malformed @distributed loop")
314314
end
315315
var = loop.args[1].args[1]
316316
r = loop.args[1].args[2]

test/distributed_exec.jl

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ include(joinpath(Sys.BINDIR, "..", "share", "julia", "test", "testenv.jl"))
1111
# Test a few "remote" invocations when no workers are present
1212
@test remote(myid)() == 1
1313
@test pmap(identity, 1:100) == [1:100...]
14-
@test 100 == @parallel (+) for i in 1:100
14+
@test 100 == @distributed (+) for i in 1:100
1515
1
1616
end
1717

@@ -272,14 +272,14 @@ end
272272
test_regular_io_ser(Future())
273273
test_regular_io_ser(RemoteChannel())
274274

275-
# Test @parallel load balancing - all processors should get either M or M+1
275+
# Test @distributed load balancing - all processors should get either M or M+1
276276
# iterations out of the loop range for some M.
277-
ids = @parallel((a,b)->[a;b], for i=1:7; myid(); end)
277+
ids = @distributed((a,b)->[a;b], for i=1:7; myid(); end)
278278
workloads = Int[sum(ids .== i) for i in 2:nprocs()]
279279
@test maximum(workloads) - minimum(workloads) <= 1
280280

281-
# @parallel reduction should work even with very short ranges
282-
@test @parallel(+, for i=1:2; i; end) == 3
281+
# @distributed reduction should work even with very short ranges
282+
@test @distributed(+, for i=1:2; i; end) == 3
283283

284284
@test_throws ArgumentError sleep(-1)
285285
@test_throws ArgumentError timedwait(()->false, 0.1, pollint=-0.5)
@@ -698,7 +698,7 @@ end
698698

699699
# issue #8207
700700
let A = Any[]
701-
@parallel (+) for i in (push!(A,1); 1:2)
701+
@distributed (+) for i in (push!(A,1); 1:2)
702702
i
703703
end
704704
@test length(A) == 1
@@ -817,13 +817,13 @@ end
817817

818818
# issue #16451
819819
rng=RandomDevice()
820-
retval = @parallel (+) for _ in 1:10
820+
retval = @distributed (+) for _ in 1:10
821821
rand(rng)
822822
end
823823
@test retval > 0.0 && retval < 10.0
824824

825825
rand(rng)
826-
retval = @parallel (+) for _ in 1:10
826+
retval = @distributed (+) for _ in 1:10
827827
rand(rng)
828828
end
829829
@test retval > 0.0 && retval < 10.0
@@ -1267,7 +1267,7 @@ foreach(wait, refs)
12671267
#6760
12681268
if true
12691269
a = 2
1270-
x = @parallel (vcat) for k=1:2
1270+
x = @distributed (vcat) for k=1:2
12711271
sin(a)
12721272
end
12731273
end

0 commit comments

Comments
 (0)