Lines Matching full:backward
131 out.sum().backward()
228 result.sum().backward(go, create_graph=True)
246 def backward(ctx, grad_output): member in TestAutograd.test_function.MyFunction
275 def backward(ctx, grad_output): member in TestAutograd.test_once_differentiable.MyFunction
301 def backward(ctx, grad): member in TestAutograd.test_function_returns_input.MyFunction
306 MyFunction.apply(v).backward()
311 MyFunction.apply(v.clone()).backward()
321 def backward(ctx, grad): member in TestAutograd.test_function_returns_undefined_tensor.MyFunction
324 # Test that undefined tensors returned from custom backward function
328 MyFunction.apply(x).backward()
331 MyFunction.apply(x**2).backward()
334 MyFunction.apply(x).sum().backward()
348 def backward(ctx, grad): member in TestAutograd.test_materialize_grads.MyFunction
353 torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
363 def backward(ctx, grad): member in TestAutograd.test_dont_materialize_grads.MyFunction
368 torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
382 def backward(ctx, g0, g1): member in TestAutograd.test_set_materialize_non_diff_grads.Func
388 out.backward()
396 def backward(self, grad_output): member in TestAutograd.test_legacy_function_deprecation_exception.MyFunction
413 def backward(ctx, input): member in TestAutograd.SimulateBackwardError
414 raise Exception("Simulate error on backward pass") # noqa: TRY002
422 with self.assertRaisesRegex(Exception, "Simulate error on backward pass"):
423 t3.sum().backward()
441 def backward(ctx, *grads): member in TestAutograd.test_custom_function_non_tensor_inputs_outputs.MyFunction
471 # Validate running backward.
472 torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
496 def backward(ctx, *args): member in TestAutograd.test_custom_function_no_tensors.MyFunction
519 def backward(ctx, grad_output): member in TestAutograd.test_invalid_gradients.MyFunction
524 MyFunction.apply(input).sum().backward()
546 y.backward()
575 x.sin().sum().backward()
598 x.sin().sum().backward()
614 def backward(ctx, gO): member in TestAutograd.test_will_engine_execute_node.MyFunction
644 # .backward(inputs=) is OK
646 torch.autograd.backward(out, inputs=(a, b), retain_graph=True)
649 # .backward() is OK
652 torch.autograd.backward(out, retain_graph=True)
675 with self.assertRaisesRegex(RuntimeError, "during the backward pass"):
693 def backward(ctx, gO): member in TestAutograd.test_custom_function_vmap_defaults.MySquare
712 def backward(ctx, gO): member in TestAutograd.test_custom_function_setup_context_simple.MySquare
737 def backward(ctx, gO, _): member in TestAutograd.test_custom_function_setup_context_multi_output.MySquare
758 def backward(ctx, gO): member in TestAutograd.test_custom_function_setup_context_multi_input.MyReshape
769 def backward(ctx, gO): member in TestAutograd.test_custom_function_setup_context_multi_input.MyReshapeRef
808 y.backward(grad_output, retain_graph=True)
811 y.backward(grad_output, create_graph=create_graph)
832 params.backward(backward_grad_tensor, create_graph=create_graph)
875 c.sum().backward()
885 z.backward(torch.ones(2, 2), create_graph=True)
894 grad_sum.backward(torch.ones(2, 2))
904 z.backward(torch.ones(2, 2), create_graph=True)
1044 torch.autograd.backward(out.sum(), inputs=(edge_x, y))
1045 torch.autograd.backward(out.sum(), inputs=(x, y))
1046 torch.autograd.backward(out.sum(), inputs=(x, edge_y))
1047 torch.autograd.backward(out.sum(), inputs=(edge_x, edge_y))
1086 def backward(ctx, grad_output): member in TestAutograd.test_grad_fn_input_metadata.Test
1105 # Compute fn backward in two steps
1171 x.backward(grad_output)
1201 # but avoid segfault during backward of other nonleaf tensors
1213 x_list[0].backward()
1220 x_list[i].backward()
1270 sum.backward()
1284 tensor1.sum().backward()
1285 tensor2.sum().backward()
1298 tensor.sum().backward()
1327 loss.backward()
1330 loss_copy.backward()
1345 loss.backward()
1348 loss_copy.backward()
1368 tensor.sum().backward()
1403 tensor.sum().backward()
1421 x.sum().backward()
1449 b.sum().backward(retain_graph=True)
1459 b.sum().backward(inputs=(b,))
1475 b.clone().backward()
1483 b.clone().backward()
1508 b.backward()
1525 out.sum().backward(inputs=[b])
1573 c.backward(retain_graph=True)
1579 c.backward(inputs=(a, b), retain_graph=True)
1600 # Compute rest of backward pass
1601 torch.autograd.backward(intermediates, d_intermediates)
1609 x.backward()
1670 def backward(ctx, x): member in TestAutograd.test_grad_unreachable_discovery.MyFunc
1687 torch.autograd.backward(x, inputs=(y,)) # allow_unused is implicitly True!
1748 z.backward(torch.ones(5, 5), retain_graph=True)
1752 z.backward(torch.ones(5, 5), retain_graph=True)
1756 z.backward(torch.ones(5, 5), retain_graph=True)
1766 z.backward(torch.ones(5, 5), retain_graph=True)
1772 z.backward(torch.ones(5, 5))
1784 def backward(ctx, gO): member in TestAutograd._get_mul2.Mul2
1823 b.sum().backward()
1837 b.sum().backward()
1845 (var + mean).sum().backward()
1858 (var + mean).sum().backward()
1872 def backward(ctx, g1, _a, g2): member in TestAutograd.test_grad_fn_prehooks_multiple_outputs.DoubleMul2
1888 (c + d).sum().backward()
1911 b.sum().backward()
1915 # Remove hooks during backward
1935 b.sum().backward()
1972 loss.backward()
1998 z.sum().backward()
2016 def backward(ctx, grad_x, grad_y): member in TestAutograd.test_hook_none.NoneGradientFunction
2030 sum(rx, ry).sum().backward()
2043 out.backward(retain_graph=True)
2045 out.backward(retain_graph=True)
2053 out.backward()
2062 a.sum().backward()
2070 a.sum().backward()
2080 x.sum().backward()
2098 def backward(ctx, g1, g2): member in TestAutograd.test_retains_grad_inplace_multiple_outputs.DoubleMul
2111 (var + mean).sum().backward()
2130 (view + view2).sum().backward()
2132 # The old grad_fn, slice, wouldn't be part of the graph during backward
2159 b.sum().backward()
2177 b.sum().backward()
2188 def backward(ctx, g1, g2): member in TestAutograd.test_tensor_hooks_inplace_multiple_outputs.DoubleMul
2217 (out1 + out2 * 3).sum().backward()
2243 (view + view2).sum().backward()
2259 z.sum().backward()
2268 v.backward(grad_output)
2272 a.backward(grad_output)
2337 # input needs to require grad so we can call a backward pass
2353 s = r.sum().backward()
2372 r.sum().backward()
2391 torch.autograd.backward([z, c], [grad_z, grad_c])
2410 torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)])
2430 torch.autograd.backward(fn(), gradient, inputs=[x, y])
2435 torch.autograd.backward(fn(), gradient, inputs=[x])
2440 torch.autograd.backward(fn(), gradient, inputs=[y])
2445 torch.autograd.backward(fn(), gradient, inputs=y)
2453 lambda: torch.autograd.backward(fn(), gradient, inputs=[]),
2464 out.backward(
2477 # backward doesn't have an allow_unused flag, so the behavior of backward
2480 out.backward(
2492 torch.autograd.backward([y, z], [go_y, go_z])
2506 def backward(ctx, *grad): member in TestAutograd.test_save_output_nr.MultiOutputFn
2519 def backward(ctx, grad_b): member in TestAutograd.test_save_output_nr.TestFn
2523 TestFn.apply(b).sum().backward()
2590 def backward(ctx, grad_output): member in TestAutograd.test_free_deep_graph_pyfunction.MyOp
2609 # get an error in the backward that would complain that we've
2622 loss.backward()
2639 self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
2642 self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
2921 def backward(self, dy): member in TestAutograd.test_no_grad_python_function.MyOp
2938 indexed_var.sum().backward()
3014 y[idx].sum().backward()
3025 y[idx].sum().backward()
3036 y[idx].sum().backward()
3051 y[idx].sum().backward()
3059 # trigger a version check on `tensor` during the backward pass, which
3066 tensor.backward(torch.zeros_like(tensor))
3082 def backward(ctx, grad_output): member in TestAutograd.test_saved_variables_deprecated.MyFunction
3090 MyFunction.apply(x, y).sum().backward()
3111 # Make sure backward isn't called on these
3118 b.backward(torch.ones(5, 5))
3160 a.sum().backward()
3168 a.sum().backward()
3201 torch.autograd.backward([x, x], [grad_output, grad_output])
3210 torch.autograd.backward([b], [None])
3216 c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
3220 lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double)),
3227 c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
3228 c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
3233 c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
3234 c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
3240 c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
3241 c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
3248 c.backward(torch.ones_like(c), create_graph=True)
3252 "Using backward() with create_graph=True" in str(w.message)
3262 "Using backward() with create_graph=True" in str(w.message)
3294 # Add doesn't need it's inputs to do backward, so it shouldn't raise
3295 q.backward(torch.ones(5, 5), retain_graph=True)
3297 self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
3304 w.backward(torch.ones(5, 5), retain_graph=True)
3305 # r doesn't use the modified value in backward, so it should succeed
3306 r.backward(torch.ones(5, 5), retain_graph=True)
3308 self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
3319 r.backward(torch.ones(5, 5), retain_graph=True)
3321 w.backward(torch.ones(5, 5), retain_graph=True)
3323 self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
3331 y.backward(torch.ones(5, 5))
3335 self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
3346 def backward(ctx, grad_output): member in TestAutograd.test_mark_non_differentiable.MyFunction
3353 y.sum().backward()
3366 def backward(ctx, grad_a, grad_b): member in TestAutograd.test_mark_non_differentiable_mixed.MyFunction
3375 b.sum().backward()
3390 def backward(ctx, grad_output): member in TestAutograd.test_mark_non_differentiable_none.MyFunction
3395 (r * x).sum().backward()
3405 def backward(ctx, grad1, grad2): member in TestAutograd.test_return_duplicate.DoubleDuplicate
3426 def backward(ctx, grad1, grad2): member in TestAutograd.test_return_duplicate_inplace.DoubleInplace
3449 y.backward(torch.ones(*size))
3462 y.backward(torch.ones(*size))
3472 y.backward(torch.randn(2, 3, 4))
3553 o.sum().backward()
3562 outputs[0].backward(grad_output)
3576 out.backward(grad)
3580 out.backward(grad)
3612 def backward(self, grad_output): member in TestAutograd.test_gc_in_destructor.CollectOnDelete
3619 CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward()
3628 def backward(ctx, grad_x): member in TestAutograd.test_naughty_autograd_function_attribute_access.Id
3676 def backward(ctx, g): member in TestAutograd.test_naughty_anomaly_access.MyFunction
3681 y.backward()
3697 def backward(ctx, grad_x): member in TestAutograd.test_naughty_autograd_function_stashing_ctx.Id
3703 loss.backward(retain_graph=True)
3726 def backward(ctx, grad_output): member in TestAutograd.test_custom_autograd_repeated_grad_grad.Mult
3754 def backward(ctx, grad_output): member in TestAutograd.test_custom_autograd_no_early_free.Double
3758 # this is equivalent, but uses the output of .forward() in .backward()
3761 def backward(ctx, grad_output): member in TestAutograd.test_custom_autograd_no_early_free.Double2
3797 z.sum().backward()
3806 (y + a).sum().backward(retain_graph=True)
3809 (y + a).sum().backward() # this won't backprop to x
3835 y.backward()
3839 fvar.double().sum().backward()
3843 dvar.float().sum().backward()
3866 y.cpu().sum().backward()
3915 o.backward()
3945 def backward(ctx, grad_a, grad_b): member in TestAutograd.test_return_leaf.Identity
3960 (q + p + x).sum().backward()
3973 def backward(ctx, grad_a, grad_b): member in TestAutograd.test_return_leaf_inplace.Inplace
3983 q.sum().backward()
3995 x.sum().backward()
4015 RuntimeError, "modified by an inplace operation", lambda: z.backward()
4044 def backward(self, grad_output): member in TestAutograd.test_no_grad_input.MyFunction
4055 # This tests checks backward engine for a very subtle bug that appreared
4078 # This op will just return grad_output two times in backward
4092 out.backward(grad_output)
4106 def backward(ctx, grad_output): member in TestAutograd.test_save_none_for_backward.MyFn
4114 y.sum().backward()
4124 def backward(ctx, grad_output): member in TestAutograd.test_too_many_grads.MyFn
4129 y.sum().backward()
4156 def backward(ctx, grad_output, ignored): member in TestAutograd.test_dep_nograd.F1
4165 def backward(ctx, grad_output): member in TestAutograd.test_dep_nograd.F2
4174 c.backward(torch.ones(c.size()))
4233 def backward(ctx, grad_output): member in TestAutograd.test_simple_reentrant.Reenter
4235 ctx.output_var.sum().backward()
4241 out.sum().backward()
4261 def backward(ctx, grad): member in TestAutograd.test_reentrant_child_error.ReentrantFunc
4262 # Reentrant backward in child will throw an error.
4263 reentrant_root.backward()
4268 d.sum().backward()
4284 torch.autograd.backward(r1, grad)
4285 torch.autograd.backward(r2, grad)
4338 (D.sum() + U.sum()).backward()
4369 out.backward(torch.randn(out.size()))
4375 input.pow(exponent).sum().backward()
4391 t.backward(retain_graph=True)
4393 t.backward(retain_graph=True)
4395 t.backward(retain_graph=True)
4436 t.backward()
4453 out.backward()
4465 out.backward()
4496 out.backward()
4537 # Errors when not called in a backward
4539 RuntimeError, "should only be called during the backward pass"
4548 "expects the current backward to be executed with multithreading disabled",
4550 t.backward()
4626 b.backward()
4669 s.backward()
4672 # expecting the corresponding backward nodes to have the same numbers
4682 if e.name == fwd_name or (bwd_name in e.name and "Backward" in e.name):
4699 f"autograd::engine::evaluate_function: {bwd_name}Backward{idx}",
4701 self.assertEqual(ops[2].name, f"{bwd_name}Backward{idx}")
5020 # Issue #21875: Fail faster (when we try to modify the view vs. in backward())
5058 gradient_penalty.backward()
5075 s.backward()
5078 # Issue #21875: Fail faster (when we try to modify the view vs. in backward())
5112 def backward(ctx, gO): member in TestAutograd.test_anomaly_detect_nan.MyFunc
5123 out.backward() # Should not fail
5133 out.backward()
5144 out.backward()
5184 def backward(ctx, gO): member in TestAutograd.test_nested_anomaly_detect_nan.MyFunc
5198 def backward(ctx, gO): member in TestAutograd.test_nested_anomaly_detect_nan.MyFunc2
5215 gsum.backward() # should not fail
5227 gsum.backward()
5240 gsum.backward()
5302 # backward call in grad mode will work
5358 def backward(ctx, gO): member in TestAutograd.test_nested_anomaly_printstack_cleanup.get_ref.MyFunc
5368 def backward(ctx, gO): member in TestAutograd.test_nested_anomaly_printstack_cleanup.get_ref.MyFunc2
5381 ginp.backward()
5404 def backward(ctx, gO): member in TestAutograd.test_anomaly_mode_no_check_nan.MyFunc
5415 out.backward(retain_graph=True)
5422 out.backward(retain_graph=True)
5424 out.backward()
5436 def backward(ctx, grad): member in TestAutograd.test_no_grad_copy.MyFunc
5447 def backward(ctx, grad): member in TestAutograd.test_no_grad_copy.NonContGradFunc
5453 NonContGradFunc.apply(MyFunc.apply(a, b)).backward()
5458 MyFunc.apply(a, b)[1][0].backward()
5477 def backward(ctx, grad): member in TestAutograd.test_no_grad_copy_sparse.MyFunc
5489 def backward(ctx, grad): member in TestAutograd.test_no_grad_copy_sparse.NonContGradFunc
5509 loss.backward(retain_graph=True)
5520 loss.backward(retain_graph=True)
5526 loss.backward(retain_graph=True)
5538 loss.backward(retain_graph=True)
5602 def backward(ctx, grad_out): member in TestAutograd.test_gradcheck_nondeterministic.NonDetFunc
5617 with self.assertRaisesRegex(RuntimeError, "Backward is not reentrant"):
5624 with self.assertRaisesRegex(RuntimeError, "Backward is not reentrant"):
5883 # when backward not multiplied by grad_output (non-sparse case)
5891 RuntimeError, "backward not multiplied by grad_output"
5900 # when backward not multiplied by grad_output (sparse case)
5908 RuntimeError, "backward not multiplied by grad_output"
5937 def backward(ctx, x): member in TestAutograd.test_gradcheck_backward_mul_by_grad_output.check.Test
5960 # when encounter runtime error while running backward
5977 "Expected backward function to handle undefined output grads",
6198 def backward(ctx, grad_out): member in TestAutograd.test_gradcheck_get_analytical_jacobian.NonDetFunc
6493 # If both fail, backward AD failure "hides" forward AD failure
6615 def backward(ctx, grad):
6624 MyFunction.apply(v).backward()
6649 def backward(ctx, x): member in TestAutograd.test_deep_reentrant.DeepReentrant
6653 DeepReentrant.apply(ctx.x).sum().backward()
6660 DeepReentrant.apply(v).sum().backward()
6665 DeepReentrant.apply(v2).sum().backward()
6676 def backward(ctx, x): member in TestAutograd.test_reentrant_priority.MyFunction
6689 def backward(ctx, x): member in TestAutograd.test_reentrant_priority.Reentrant
6694 Reentrant.apply(ctx.x).backward()
6700 v.backward()
6701 # The tasks for the Reentrant and MyFunction backward() will be added
6702 # to the queue in the autograd engine at the same time. The backward
6704 # backward tasks to the queue. We want to ensure all the reentrant tasks
6705 # are prioritized over the MyFunction backward task regardless of their
6735 mean_combined.backward()
6766 loss.backward() # triggers recomputation to check it runs in bfloat
6833 loss.backward()
6838 loss.backward()
6843 loss.backward()
6861 def backward(ctx, grad_out): member in TestAutograd.test_checkpointing_without_reentrant_custom_function_works.MyFunc
6865 # A recomputation is only triggered if your backward has a new
6883 out.sum().backward()
6915 out.backward()
6990 out.backward()
6999 out.backward()
7004 out.backward()
7010 out.backward()
7020 out.backward()
7030 out.backward()
7039 out.backward()
7054 # Recomputed variables only persist within a particular backward call.
7055 # If _saved_result is accessed outside of a backward, it will trigger
7063 # Backward clears the saved variable
7064 d.sum().backward()
7121 mean_combined.backward()
7123 mean_combined_no_checkpoint.backward()
7140 c.backward()
7172 out.sum().backward()
7184 def backward(ctx, x): member in TestAutograd.test_checkpointing_without_reentrant_saved_object_identity.Test
7192 Test.apply(a, b).backward()
7196 checkpoint(Test.apply, a, b, use_reentrant=False).backward()
7202 without reentrant autograd, for both backward() and autograd.grad().
7207 b.backward()
7212 c.backward()
7247 b.backward()
7254 c.backward()
7277 out.sum().backward()
7311 out_no_checkpoint.backward()
7312 out_checkpoint.backward()
7337 def backward(ctx, grad): member in TestAutograd.test_callback_adds_callback.MyFunc
7343 b.sum().backward()
7361 loss.backward()
7381 def backward(ctx, input): member in TestAutograd._test_reentrant_with_callbacks.MyFunc
7395 def backward(ctx, input): member in TestAutograd._test_reentrant_with_callbacks.MyReentrantFunc
7399 # Reentrant backward call.
7403 tmp_out.backward()
7409 torch.autograd.backward([t3])
7444 out.backward()
7454 loss.backward()
7466 out.backward()
7475 loss.backward()
7489 # TODO: I don't think we have a backward saving a list of tensors
7503 out2.sum().backward()
7539 out.sum().backward()
7657 out.sum().backward()
7689 def backward(ctx, g): member in TestAutograd.test_custom_function_saved_tensors.getFn.MyFn
7715 y.sum().backward()
7764 def backward(ctx, x): member in TestAutograd.test_autograd_node_isinstance.Func
7772 out.sum().backward()
7855 def backward(ctx, grad): member in TestAutograd.test_inplace_not_requires_grad.MyFn
7907 # This indicator is used to track how many times the backward function was called
7922 def backward(ctx, ga): member in TestAutograd._do_test_autograd_simple_views_python.IdOneOutput
7936 def backward(ctx, ga, gab): member in TestAutograd._do_test_autograd_simple_views_python.IdTwoOutput
7956 def backward(ctx, grad): member in TestAutograd._do_test_autograd_simple_views_python.ViewOfTemp
8017 # Was the custom backward called properly
8019 ga_nz[0] = True # For the case where the backward is called
8025 fn(a, b).abs().backward()
8049 def backward(ctx, x): member in TestAutograd.test_autograd_inplace_views_creation_meta.Func
8157 def backward(ctx, x): member in TestAutograd.test_autograd_print_tensor.Func
8194 b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
8202 b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
8219 # This indicator is used to track how many times the backward function was called
8232 def backward(ctx, grad): member in TestAutograd.test_autograd_multiple_views_python.ComplexView
8244 out.sum().backward()
8273 def backward(ctx, grad): member in TestAutograd.test_autograd_python_custom_function_inplace.MyAdder
8282 c.sum().backward()
8289 c.sum().backward()
8295 c.sum().backward()
8308 def backward(ctx, grad): member in TestAutograd.test_autograd_python_custom_function_inplace.MyAdderBad
8329 def backward(ctx, ga, gab): member in TestAutograd.test_autograd_python_custom_function_inplace.MyBadAdder
8336 (c * d).sum().backward()
8343 (c * d).sum().backward()
8362 def backward(ctx, ga, gab): member in TestAutograd.test_autograd_python_custom_function_inplace.MyOutPlaceAdder
8386 def backward(ctx, grad_output): member in TestAutograd.test_custom_function_mark_dirty_not_differentiable.get_custom_fn.InplaceMul
8431 z_complex.sum().abs().backward()
8444 def backward(ctx, gx): member in TestAutograd.test_custom_function_return_view_in_nograd.Alias
8486 def backward(ctx): member in TestAutograd.test_custom_function_preserve_torch_function_when_return_as_is.Fn
8500 def backward(ctx, go): member in TestAutograd.test_grad_mode_restored_reentrant.MyFunction
8513 MyFunction.apply(inp).sum().backward()
8515 MyFunction.apply(inp).sum().backward(create_graph=True)
8521 c.backward()
8527 c.backward()
8533 def backward(ctx, foo): member in TestAutograd.test_custom_function_error.BadFw
8547 def backward(ctx, foo): member in TestAutograd.test_custom_function_error.BadBw2
8563 with self.assertRaisesRegex(RuntimeError, "must implement either the backward"):
8564 BadBw.apply(inp).sum().backward()
8567 RuntimeError, "Implementing both 'backward' and 'vjp'"
8569 BadBw2.apply(inp).sum().backward()
8806 out.backward()
8810 # When saved for backward, but not saved for forward
8831 out.backward()
8920 def backward(ctx, grad): member in TestAutograd.test_custom_function_local_inplace.MyFn
9041 def backward(ctx, gO): member in TestAutograd.test_custom_function_cycle.MyFn
9056 out.sum().backward()
9067 # The backward clears the saved_variables but not the __dict__
9075 # If BackwardHook saves grad_output, it can create a cycle when we perform backward
9106 tmp.exp().sum().backward(create_graph=True)
9109 "Using backward() with create_graph=True" in str(w[0].message)
9112 # Remove the backward + create_graph=True cycle
9145 def backward(ctx, grad): member in TestAutograd.test_hook_closure_cycle.Function
9181 out.backward(retain_graph=True)
9187 out.backward(retain_graph=True)
9234 torch.autograd.backward((out1, out2), (grad_out1, grad_out2))
9258 c.sum().backward()
9294 y.sum().backward()
9296 y.sum().backward()
9306 y.sum().backward()
9308 y.sum().backward()
9311 # double backward
9320 g.sum().backward()
9322 g.sum().backward()
9378 b.backward()
9384 b.backward()
9396 y.sum().backward()
9424 y.sum().backward()
9432 y.sum().backward()
9440 y.sum().backward()
9448 y.sum().backward()
9460 y.sum().backward()
9477 y.sum().backward()
9478 z.sum().backward()
9537 out.backward()
9550 def backward(ctx, grad_out): member in TestAutograd.test_saved_tensor_hooks_custom_function_intermediates.Func
9558 out.backward()
9584 out_test.sum().backward()
9596 def backward(ctx, grad_output): member in TestAutograd.test_saved_tensors_hook_version_counter_not_shared.Test
9615 b.backward()
9621 b.sum().backward()
9629 c.sum().backward()
9640 d.sum().backward()
9682 y.sum().backward()
9691 g.sum().backward()
9699 g.sum().backward()
9708 g.sum().backward()
9718 g.sum().backward()
9747 y.sum().backward()
9812 out.sum().backward()
9831 def backward(ctx, gO): member in TestAutograd.test_multi_grad_all_hooks.Foo
9848 static torch::autograd::variable_list backward(
9885 out.sum().backward(inputs=(t2, t3), retain_graph=True)
9889 out.sum().backward(inputs=(t1, t4), retain_graph=True)
9892 out.sum().backward(inputs=(t1, t3), retain_graph=True)
9902 def backward(ctx, gO): member in TestAutograd.test_multi_grad_all_hooks.Func
9907 out.sum().backward(inputs=(t2, t3), retain_graph=True)
9911 out.sum().backward(inputs=(t1, t3), retain_graph=True)
9961 (out[0] + out[1]).sum().backward()
9973 (out[0] + out[1]).sum().backward()
10003 # last in the backward pass
10011 # Inplace modify so that the backward for
10026 get_out().sum().backward()
10242 def backward(ctx, gO): member in TestAutograd.test_setup_context_when_forward_has_default_args.PowFunction
10259 def backward(cls, ctx, gO): member in TestAutograd.test_setup_context_when_forward_has_default_args.PowFunctionWithClassmethod
10577 # - For backward AD (regular ops)
10654 # convolution backward will return a undefined tensor in that position.
10815 # backward mode check
10820 # Check that backward gradients properly propagates through packing/unpacking
11001 # Default detach is both forward and backward non-differentiable
11125 y.backward()
11131 …# tested here instead of adding a SampleInput as the backward for this case is non-differentiable …
11147 # test that double backward raises an error for the case where 2 zeros in src
11161 RuntimeError, "Double backward is unsupported for"
11175 m.sum().backward()
11208 other.detach().requires_grad_()._values().backward(
11229 def backward(ctx, grad_x): member in TestAutogradDeviceType.test_sparse_backward.FixedGradientFunction
11249 ).sum().abs().backward()
11257 ).sum().abs().backward()
11261 (fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().abs().backward()
11271 converted.sum().backward()
11374 a.sum().backward()
11384 x[i].sum().backward()
11407 def backward(ctx, grad): member in TestAutogradDeviceType._test_reentrant_parent_error_on_cpu.ReentrantFunc
11408 # Reentrant backward in child will take much longer.
11409 reentrant_root.backward()
11418 torch.autograd.backward([t5.sum(), t7.sum()])
11497 # (x + 2) and ((x + 2) * m) should be kept alive for backward, while the
11530 out.sum().backward()
11604 y.sum().backward()
11618 def backward(ctx, grad_output): member in TestAutogradDeviceType.test_backward_device.Identity
11623 Identity.apply(v).backward()
11630 output.backward()
11676 def backward(ctx, grad_output): member in TestAutogradDeviceType.test_simple_reentrant_cross_device.ReentrantFunc
11680 (new_param**2).sum().backward()
11683 (new_param**2).sum().backward()
11689 out.sum().backward()
11696 out.sum().backward()
11703 out.sum().backward()
11733 out.sum().backward()
11741 x.sum().backward()
11751 v2.sum().backward()
11762 x.sum().backward()
11777 c.sum().backward()
11848 res.sum().backward()
11885 def backward(ctx, grad): member in TestAutogradDeviceType.test_inplace_on_view_python.PyAdd
11906 x.sum().backward()
11919 s1.sum().backward()
11949 def backward(ctx, grad): member in TestAutogradDeviceType.test_inplace_on_view_undefined_grad_output.InplaceFunc
11953 out.backward()
11989 a.sum().backward()
11991 b.sum().backward()
11996 (a * b).sum().backward()
12007 c.sum().backward()
12011 (c * d).sum().backward()
12022 out_c_inter.abs().backward()
12037 out_inter.abs().backward()
12057 # Test warning during backward are always propagated as python warnings (gh-50209)
12062 with self.assertWarnsRegex(UserWarning, "Warn from backward"):
12063 b.backward()
12071 b.backward()
12124 out.sum().backward()
12147 out.sum().backward()
12171 out.sum().backward()
12184 out.backward()
12208 out.backward()
12247 out.abs().backward()
12254 out.abs().backward()
12265 out.backward()
12274 msg = "Trying to backward outside of the 'allow_mutation_on_saved_tensors' context"
12276 out.backward()
12285 out.backward()
12372 out.backward(torch.ones_like(out))
12580 # add is safe since it doesn't save any variable for backward
12586 out.backward(torch.ones_like(out))
12590 err_msg = "Inference tensors cannot be saved for backward"
12611 err_msg = "Inference tensors cannot be saved for backward"
12731 # simple multithreaded backward that create threads in the beginning of training
12736 y.sum().backward()
12742 # simple multithreaded backward with only shared inputs (i.e. This is common
12746 y.sum().backward()
12750 # Since we are calling backward from multiple threads
12751 # and all threads share the same input, when we do backward
12768 # Multihooks should behave independently per execution of backward
12769 # Test that the hook fired the number of times we ran backward
12795 out.backward(inputs=(t2, t3), retain_graph=True)
12816 def backward(ctx, gO): member in TestMultithreadAutograd.test_multi_grad_all_hooks.Func
12828 out.backward(inputs=(t2, t3), retain_graph=True)
12840 # Multihooks should behave independently per execution of backward
12841 # Test that the hook fired the number of times we ran backward
12868 out.backward(inputs=(t2, t3), retain_graph=True)
12874 # Raise an error in one thread's backward
12888 def backward(ctx, gO): member in TestMultithreadAutograd.test_multi_grad_any_hooks.Func
12900 out.backward(inputs=(t2, t3), retain_graph=True)
12908 # the custom backward
12946 # then calls backward()/grad() on BOTH threads, like a Y pattern from input at the
12951 # Case 1: multiple backward with python threads, retain_graph=False
12958 y.sum().backward()
12973 # multiple backward with python threads, no error with retain_graph=True
12976 y.sum().backward(retain_graph=True)
12988 # multiple backward with jit threads (fork/join primitive)
13041 def backward(ctx, *grad): member in TestMultithreadAutograd.test_preserve_backtrace.Foo
13046 Foo.apply(t).sum().backward()
13102 def backward(ctx, gO): member in TestMultithreadAutograd.test_custom_function_propagates_errors_from_device_thread.MyFunc
13110 out.backward()
13313 # And backward is performed on the original graph
13314 out.backward()
13326 # do backward again, but skip over the part of the graph where
13328 x.backward(retain_graph=True)
13334 out.backward(retain_graph=True)
13351 out.backward()
13357 # when backward/recomputation is performed.
13363 out.backward()
13395 out.backward()
13405 out.backward()
13422 out.backward()
13430 out.backward()
13440 out.backward()
13441 # Why do one forward and backward?
13446 out.backward()
13451 f().backward()
13456 out.backward()
13545 out[1].sum().backward()
13618 out.sum().backward()
13670 out.sum().backward()
13713 # Case 1: If graph goes away without backward, make sure there's no reference cycle
13724 # Case 2: After backward, even if retain_graph=True, the storage should go away
13731 out.sum().backward(retain_graph=True)
13753 out.sum().backward()
13764 out.sum().backward()
13806 def backward(ctx, grad, _unused): function
13811 "mylib::sin_with_extra", backward, setup_context=setup_context
13848 out.sum().backward(retain_graph=True)
13850 with self.assertRaisesRegex(RuntimeError, "Trying to backward an extra time"):
13851 out.sum().backward(retain_graph=True)
13861 out.backward(grad)
13887 nt_out.backward(nt_grad)
13899 out.backward(grad)
13921 nt_out.backward(nt_grad)
13956 t_view_copy.backward(grad)
13957 t_view.backward(grad.clone())
13959 # forward and backward give the same shape + result
13962 # backward results are per-dispatch-key in derivatives.yaml
13982 foo(inp).backward()
13989 foo(nt).backward(
14007 def backward(ctx, gO): member in TestAutogradMultipleDispatch.test_backward_single_threaded.TestFn
14015 TestFn.apply(inp, None).sum().backward()
14018 TestFn.apply(inp, None).sum().backward()
14035 def backward(ctx, gO): member in TestAutogradMultipleDispatch.test_backward_tls_stash.TestFn
14043 TestFn.apply(inp, None).sum().backward()
14055 def backward(ctx, gO): member in TestAutogradMultipleDispatch.test_is_retain_graph.TestFn
14066 out.sum().backward(retain_graph=True)
14068 out.sum().backward(retain_graph=False)
14101 c.sum().backward()