@@ -92,9 +92,9 @@ let var"'" = Diffractor.PrimeDerivativeBack
92
92
@test @inferred (sin' (1.0 )) == cos (1.0 )
93
93
@test @inferred (sin'' (1.0 )) == - sin (1.0 )
94
94
@test sin''' (1.0 ) == - cos (1.0 )
95
- @test sin'''' (1.0 ) == sin (1.0 ) broken = VERSION >= v " 1.8 "
96
- @test sin''''' (1.0 ) == cos (1.0 ) broken = VERSION >= v " 1.8"
97
- @test sin'''''' (1.0 ) == - sin (1.0 ) broken = VERSION >= v " 1.8"
95
+ @test sin'''' (1.0 ) == sin (1.0 )
96
+ @test sin''''' (1.0 ) == cos (1.0 ) # broken = VERSION >= v"1.8"
97
+ @test sin'''''' (1.0 ) == - sin (1.0 ) # broken = VERSION >= v"1.8"
98
98
99
99
f_getfield (x) = getfield ((x,), 1 )
100
100
@test f_getfield' (1 ) == 1
@@ -219,6 +219,68 @@ z45, delta45 = frule_via_ad(DiffractorRuleConfig(), (0,1), x -> log(exp(x)), 2)
219
219
@test z45 ≈ 2.0
220
220
@test delta45 ≈ 1.0
221
221
222
+ # PR #82 - getindex on non-numeric arrays
223
+ @test gradient (ls -> ls[1 ](1. ), [Base. Fix1 (* , 1. )])[1 ][1 ] isa Tangent{<: Base.Fix1 }
224
+
225
+ @testset " broadcast" begin
226
+ @test gradient (x -> sum (x ./ x), [1 ,2 ,3 ]) == ([0 ,0 ,0 ],) # derivatives_given_output
227
+ @test gradient (x -> sum (sqrt .(atan .(x, transpose (x)))), [1 ,2 ,3 ])[1 ] ≈ [0.2338 , - 0.0177 , - 0.0661 ] atol= 1e-3
228
+ @test gradient (x -> sum (exp .(log .(x))), [1 ,2 ,3 ]) == ([1 ,1 ,1 ],)
229
+
230
+ @test gradient (x -> sum ((exp∘ log). (x)), [1 ,2 ,3 ]) == ([1 ,1 ,1 ],) # frule_via_ad
231
+ exp_log (x) = exp (log (x))
232
+ @test gradient (x -> sum (exp_log .(x)), [1 ,2 ,3 ]) == ([1 ,1 ,1 ],)
233
+ @test gradient ((x,y) -> sum (x ./ y), [1 2 ; 3 4 ], [1 ,2 ]) == ([1 1 ; 0.5 0.5 ], [- 3 , - 1.75 ])
234
+ @test gradient ((x,y) -> sum (x ./ y), [1 2 ; 3 4 ], 5 ) == ([0.2 0.2 ; 0.2 0.2 ], - 0.4 )
235
+ @test gradient (x -> sum ((y -> y/ x). ([1 ,2 ,3 ])), 4 ) == (- 0.375 ,) # closure
236
+
237
+ @test gradient (x -> sum (sum, (x,) ./ x), [1 ,2 ,3 ])[1 ] ≈ [- 4.1666 , 0.3333 , 1.1666 ] atol= 1e-3 # array of arrays
238
+ @test gradient (x -> sum (sum, Ref (x) ./ x), [1 ,2 ,3 ])[1 ] ≈ [- 4.1666 , 0.3333 , 1.1666 ] atol= 1e-3
239
+ @test gradient (x -> sum (sum, (x,) ./ x), [1 ,2 ,3 ])[1 ] ≈ [- 4.1666 , 0.3333 , 1.1666 ] atol= 1e-3
240
+ @test gradient (x -> sum (sum, (x,) .* transpose (x)), [1 ,2 ,3 ])[1 ] ≈ [12 , 12 , 12 ] # must not take the * fast path
241
+
242
+ @test gradient (x -> sum (x ./ 4 ), [1 ,2 ,3 ]) == ([0.25 , 0.25 , 0.25 ],)
243
+ @test gradient (x -> sum ([1 ,2 ,3 ] ./ x), 4 ) == (- 0.375 ,) # x/y rule
244
+ @test gradient (x -> sum (x.^ 2 ), [1 ,2 ,3 ]) == ([2.0 , 4.0 , 6.0 ],) # x.^2 rule
245
+ @test gradient (x -> sum ([1 ,2 ,3 ] ./ x.^ 2 ), 4 ) == (- 0.1875 ,) # scalar^2 rule
246
+
247
+ @test gradient (x -> sum ((1 ,2 ,3 ) .- x), (1 ,2 ,3 )) == (Tangent {Tuple{Int,Int,Int}} (- 1.0 , - 1.0 , - 1.0 ),)
248
+ @test gradient (x -> sum (transpose ([1 ,2 ,3 ]) .- x), (1 ,2 ,3 )) == (Tangent {Tuple{Int,Int,Int}} (- 3.0 , - 3.0 , - 3.0 ),)
249
+ @test gradient (x -> sum ([1 2 3 ] .+ x .^ 2 ), (1 ,2 ,3 )) == (Tangent {Tuple{Int,Int,Int}} (6.0 , 12.0 , 18.0 ),)
250
+
251
+ @test gradient (x -> sum (x .> 2 ), [1 ,2 ,3 ]) |> only |> iszero # Bool output
252
+ @test gradient (x -> sum (1 .+ iseven .(x)), [1 ,2 ,3 ]) |> only |> iszero
253
+ @test gradient ((x,y) -> sum (x .== y), [1 ,2 ,3 ], [1 2 3 ]) == (NoTangent (), NoTangent ())
254
+ @test gradient (x -> sum (x .+ [1 ,2 ,3 ]), true ) |> only |> iszero # Bool input
255
+ @test gradient (x -> sum (x ./ [1 ,2 ,3 ]), [true false ]) |> only |> iszero
256
+ @test gradient (x -> sum (x .* transpose ([1 ,2 ,3 ])), (true , false )) |> only |> iszero
257
+
258
+ tup_adj = gradient ((x,y) -> sum (2 .* x .+ log .(y)), (1 ,2 ), transpose ([3 ,4 ,5 ]))
259
+ @test tup_adj[1 ] == Tangent {Tuple{Int64, Int64}} (6.0 , 6.0 )
260
+ @test tup_adj[2 ] ≈ [0.6666666666666666 0.5 0.4 ]
261
+ @test tup_adj[2 ] isa Transpose
262
+ @test gradient (x -> sum (atan .(x, (1 ,2 ,3 ))), Diagonal ([4 ,5 ,6 ]))[1 ] isa Diagonal
263
+
264
+ @test gradient (x -> sum ((y -> (x* y)). ([1 ,2 ,3 ])), 4.0 ) == (6.0 ,) # closure
265
+ end
266
+
267
+ @testset " broadcast, 2nd order" begin
268
+ @test gradient (x -> gradient (y -> sum (y .* y), x)[1 ] |> sum, [1 ,2 ,3.0 ])[1 ] == [2 ,2 ,2 ] # calls "split broadcasting generic" with f = unthunk
269
+ @test gradient (x -> gradient (y -> sum (y .* x), x)[1 ]. ^ 3 |> sum, [1 ,2 ,3.0 ])[1 ] == [3 ,12 ,27 ]
270
+ @test_broken gradient (x -> gradient (y -> sum (y .* 2 .* y' ), x)[1 ] |> sum, [1 ,2 ,3.0 ])[1 ] == [12 , 12 , 12 ] # Control flow support not fully implemented yet for higher-order
271
+
272
+ @test_broken gradient (x -> sum (gradient (x -> sum (x .^ 2 .+ x' ), x)[1 ]), [1 ,2 ,3.0 ])[1 ] == [6 ,6 ,6 ] # BoundsError: attempt to access 18-element Vector{Core.Compiler.BasicBlock} at index [0]
273
+ @test_broken gradient (x -> sum (gradient (x -> sum ((x .+ 1 ) .* x .- x), x)[1 ]), [1 ,2 ,3.0 ])[1 ] == [2 ,2 ,2 ]
274
+ @test_broken gradient (x -> sum (gradient (x -> sum (x .* x ./ 2 ), x)[1 ]), [1 ,2 ,3.0 ])[1 ] == [1 ,1 ,1 ]
275
+
276
+ @test_broken gradient (x -> sum (gradient (x -> sum (exp .(x)), x)[1 ]), [1 ,2 ,3 ])[1 ] ≈ exp .(1 : 3 ) # MethodError: no method matching copy(::Nothing)
277
+ @test_broken gradient (x -> sum (gradient (x -> sum (atan .(x, x' )), x)[1 ]), [1 ,2 ,3.0 ])[1 ] ≈ [0 ,0 ,0 ]
278
+ @test_broken gradient (x -> sum (gradient (x -> sum (transpose (x) .* x), x)[1 ]), [1 ,2 ,3 ]) == ([6 ,6 ,6 ],) # accum(a::Transpose{Float64, Vector{Float64}}, b::ChainRulesCore.Tangent{Transpose{Int64, Vector{Int64}}, NamedTuple{(:parent,), Tuple{ChainRulesCore.NoTangent}}})
279
+ @test_broken gradient (x -> sum (gradient (x -> sum (transpose (x) ./ x.^ 2 ), x)[1 ]), [1 ,2 ,3 ])[1 ] ≈ [27.675925925925927 , - 0.824074074074074 , - 2.1018518518518516 ]
280
+
281
+ @test_broken gradient (z -> gradient (x -> sum ((y -> (x^ 2 * y)). ([1 ,2 ,3 ])), z)[1 ], 5.0 ) == (12.0 ,)
282
+ end
283
+
222
284
# Higher order control flow not yet supported (https://github.com/JuliaDiff/Diffractor.jl/issues/24)
223
285
# include("pinn.jl")
224
286
0 commit comments