diff --git a/.ci/docker/requirements.txt b/.ci/docker/requirements.txt
index 89dd788ae7..0e95c62c6b 100644
--- a/.ci/docker/requirements.txt
+++ b/.ci/docker/requirements.txt
@@ -36,7 +36,7 @@ datasets
 transformers
 torchmultimodal-nightly # needs to be updated to stable as soon as it's avaialable
 onnx
-onnxscript
+onnxscript>=0.2.2
 onnxruntime
 evaluate
 accelerate>=0.20.1
@@ -69,5 +69,5 @@ pycocotools
 semilearn==0.3.2
 torchao==0.5.0
 segment_anything==1.0
-torchrec==1.0.0; platform_system == "Linux"
+torchrec==1.1.0; platform_system == "Linux"
 fbgemm-gpu==1.1.0; platform_system == "Linux"
diff --git a/.jenkins/build.sh b/.jenkins/build.sh
index 4a869d35a7..7705a429cd 100755
--- a/.jenkins/build.sh
+++ b/.jenkins/build.sh
@@ -22,10 +22,14 @@ sudo apt-get install -y pandoc
 #Install PyTorch Nightly for test.
 # Nightly - pip install --pre torch torchvision torchaudio -f https://download.pytorch.org/whl/nightly/cu102/torch_nightly.html
 # Install 2.5 to merge all 2.4 PRs - uncomment to install nightly binaries (update the version as needed).
-# sudo pip uninstall -y torch torchvision torchaudio torchtext torchdata
-# sudo pip3 install torch==2.6.0 torchvision --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu124
-# sudo pip uninstall -y fbgemm-gpu torchrec
-# sudo pip3 install fbgemm-gpu==1.1.0 torchrec==1.0.0 --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu124
+sudo pip uninstall -y torch torchvision torchaudio torchtext torchdata torchrl tensordict
+pip3 install torch==2.7.0 torchvision torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu126
+#sudo pip uninstall -y fbgemm-gpu
+#sudo pip3 install --pre fbgemm-gpu --index-url https://download.pytorch.org/whl/nightly/cu126/
+#pip install tensordict-nightly
+#pip install torchrl-nightly
+#sudo pip3 install fbgemm-gpu==1.1.0 torchrec==1.0.0 --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu126
+
 
 # Install two language tokenizers for Translation with TorchText tutorial
 python -m spacy download en_core_web_sm
diff --git a/.jenkins/validate_tutorials_built.py b/.jenkins/validate_tutorials_built.py
index 984632156e..3ed1e0c028 100644
--- a/.jenkins/validate_tutorials_built.py
+++ b/.jenkins/validate_tutorials_built.py
@@ -51,7 +51,14 @@
     "intermediate_source/text_to_speech_with_torchaudio",
     "intermediate_source/tensorboard_profiler_tutorial", # reenable after 2.0 release.
     "advanced_source/semi_structured_sparse", # reenable after 3303 is fixed.
-    "recipes_source/recipes/reasoning_about_shapes"
+    "intermediate_source/mario_rl_tutorial", # reenable after 3302 is fixed
+    "intermediate_source/reinforcement_ppo", # reenable after 3302 is fixed
+    "intermediate_source/pinmem_nonblock", # reenable after 3302 is fixed
+    "intermediate_source/dqn_with_rnn_tutorial", # reenable after 3302 is fixed
+    "advanced_source/pendulum", # reenable after 3302 is fixed
+    "advanced_source/coding_ddpg", # reenable after 3302 is fixed
+    "intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixed
+    "recipes_source/recipes/reasoning_about_shapes" # reenable after 3326 is fixed
 ]
 
 def tutorial_source_dirs() -> List[Path]:
diff --git a/intermediate_source/torch_export_tutorial.py b/intermediate_source/torch_export_tutorial.py
index 3ca6d09a52..20b1b4023e 100644
--- a/intermediate_source/torch_export_tutorial.py
+++ b/intermediate_source/torch_export_tutorial.py
@@ -995,7 +995,7 @@ def forward(self, x):
 #    with torch.no_grad():
 #        pt2_path = torch._inductor.aoti_compile_and_package(ep)
 #
-#    # Load and run the .so file in Python.
+#    # Load and run the .pt2 file in Python.
 #    # To load and run it in a C++ environment, see:
 #    # https://pytorch.org/docs/main/torch.compiler_aot_inductor.html
 #    aoti_compiled = torch._inductor.aoti_load_package(pt2_path)
diff --git a/recipes_source/foreach_map.py b/recipes_source/foreach_map.py
new file mode 100644
index 0000000000..655c0c5aa2
--- /dev/null
+++ b/recipes_source/foreach_map.py
@@ -0,0 +1,198 @@
+"""
+(beta) Explicit horizontal fusion with foreach_map and torch.compile
+============================================================
+
+**Author:** `Michael Lazos <https://github.com/mlazos>`_
+"""
+
+#########################################################
+#  Horizontal fusion is a key optimization in ML compilers. In eager,
+#  this is typically expressed using the torch._foreach* ops which parallelizes
+#  operations across a list of tensors. However, supporting all possible permutations
+#  of arguments is quite difficult (e.g. mixtures of scalars and lists). Foreach_map
+#  allows conversion of any pointwise op in ``torch`` to a horiztonally fused foreach
+#  variant. In this tutorial, we will demonstrate how to implement the Adam optimizer
+#  with ``foreach_map`` to generate a fully fused kernel.  
+# 
+#
+# .. note::
+#
+#    This tutorial requires PyTorch 2.7.0 or later.
+
+#####################################################################
+# Model Setup
+# ~~~~~~~~~~~~~~~~~~~~~
+# For this example, we'll use a simple sequence of linear layers.
+# We instantiate an independent copy to compare the two optimizer implementations.
+#
+import torch
+
+# exit cleanly if we are on a device that doesn't support ``torch.compile``
+if torch.cuda.get_device_capability() < (7, 0):
+    print("Exiting because torch.compile is not supported on this device.")
+    import sys
+    sys.exit(0)
+
+# Create simple model
+model = torch.nn.Sequential(
+    *[torch.nn.Linear(1024, 1024, False, device="cuda") for _ in range(10)]
+)
+model_copy = torch.nn.Sequential(
+    *[torch.nn.Linear(1024, 1024, False, device="cuda") for _ in range(10)]
+)
+input = torch.rand(1024, device="cuda")
+
+# run forward pass
+output = model(input)
+output_copy = model_copy(input)
+
+# run backward to populate the grads for our optimizer below
+output.sum().backward()
+output_copy.sum().backward()
+
+#####################################################################
+# Helper functions for foreach_map implementation
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# In this section, we'll begin our implementation of the Adam optimizer.
+#
+from torch._higher_order_ops.foreach_map import foreach_map
+
+# Helper function to extract optimizer states from a torch.optim.Adam instance
+def get_inputs(optim):
+    steps = []
+    params = []
+    grads = []
+    exp_avgs = []
+    exp_avg_sqs = []
+    for group in optim.param_groups:
+        for p in group["params"]:
+            params.append(p)
+            grads.append(p.grad)
+            state = optim.state[p]
+            exp_avgs.append(state["exp_avg"])
+            exp_avg_sqs.append(state["exp_avg_sq"])
+            steps.append(state["step"])
+
+    return steps, params, exp_avgs, exp_avg_sqs
+
+
+# Functions to update the different optimizer states
+def update_exp_avg_sq(exp_avg_sq, grad, beta2):
+    return exp_avg_sq.mul(beta2).addcmul(grad, grad, value=1 - beta2)
+
+def update_param(param, step, exp_avg, exp_avg_sq, beta1, beta2, lr, eps):
+    bias_correction1 = 1 - torch.pow(beta1, step)
+    bias_correction2 = (1 - torch.pow(beta2, step)).sqrt()
+    step_size = (lr / bias_correction1).neg()
+    denom = (exp_avg_sq.sqrt() / (bias_correction2 * step_size)).add(eps / step_size)
+    return torch.add(param, torch.div(exp_avg, denom))
+
+# Our full Adam implementation
+def foreach_map_adam(
+    steps,
+    params,
+    exp_avgs,
+    exp_avg_sqs,
+    weight_decay=0,
+    beta1=0.9,
+    beta2=0.999,
+    lr=1e-3,
+    eps=1e-8,
+):
+    with torch.no_grad():
+        grads = [param.grad for param in params]
+        # update step
+        updated_steps = foreach_map(lambda x: x + 1, steps)
+        torch._foreach_copy_(steps, updated_steps)
+
+        if weight_decay != 0:
+            foreach_map(torch.add, (grads,), alpha=weight_decay)
+
+        # Higher-order operators (HOPs) cannot have multiple outputs at the moment
+        # need to call foreach_map once for each output
+        exp_avgs_updated = foreach_map(torch.lerp, exp_avgs, grads, 1 - beta1)
+        exp_avgs_sq_updated = foreach_map(update_exp_avg_sq, exp_avg_sqs, grads, beta2)
+        params_updated = foreach_map(
+            update_param,
+            params,
+            steps,
+            exp_avgs_updated,
+            exp_avgs_sq_updated,
+            beta1,
+            beta2,
+            lr,
+            eps,
+        )
+        # Higher-order operators (HOPs) don't support input mutation today
+        # so manually  update the states in-place
+        torch._foreach_copy_(exp_avgs, exp_avgs_updated)
+        torch._foreach_copy_(exp_avg_sqs, exp_avgs_sq_updated)
+        torch._foreach_copy_(params, params_updated)
+    return
+
+#####################################################################
+# Setting up and running the compiled kernel
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# In this section, we'll run our Adam optimizer 
+# and compare the results
+#
+# .. note::
+#
+#    ``torch.compile`` is only supported on CUDA devices that have a compute capability of 7.0 or higher.
+opt_eager = torch.optim.Adam(model.parameters(), lr=torch.tensor(0.01))
+opt_eager_copy = torch.optim.Adam(model_copy.parameters(), lr=torch.tensor(0.01))
+
+# warm up the optimizer state dict
+opt_eager.step()
+opt_eager_copy.step()
+
+inputs = get_inputs(opt_eager_copy)
+compiled_adam = torch.compile(foreach_map_adam)
+
+# optionally view the output code
+torch._logging.set_logs(output_code=True)
+
+# Warmup runs to compile the function
+for _ in range(5):
+    opt_eager.step()
+    compiled_adam(*inputs)
+
+for eager_p, compile_p in zip(opt_eager.param_groups[0]["params"], opt_eager_copy.param_groups[0]["params"]):
+    torch.allclose(eager_p, compile_p)
+
+# Benchmark performance
+
+ # Let's define a helpful benchmarking function:
+import torch.utils.benchmark as benchmark
+
+def benchmark_torch_function_in_microseconds(f, *args, **kwargs):
+    t0 = benchmark.Timer(
+        stmt="f(*args, **kwargs)", globals={"args": args, "kwargs": kwargs, "f": f}
+    )
+    return t0.blocked_autorange().mean * 1e6
+
+eager_runtime = benchmark_torch_function_in_microseconds(opt_eager.step)
+compiled_runtime = benchmark_torch_function_in_microseconds(lambda: compiled_adam(*inputs))
+
+assert eager_runtime > compiled_runtime
+   
+print(f"eager runtime: {eager_runtime}us")
+print(f"compiled runtime: {compiled_runtime}us")
+
+
+
+######################################################################
+# Conclusion
+# ~~~~~~~~~~
+# In this tutorial, we successfully implemented a custom fully-fused Adam optimizer using foreach_map. 
+# By leveraging the power of foreach_map and torch.compile, we were able to create an optimized version of the Adam 
+# optimizer that can be used in various machine learning applications. This tutorial provides a comprehensive guide 
+# on how to use foreach_map and torch.compile to optimize machine learning models, and serves as a 
+# valuable resource for developers looking to improve the performance of their models with horizontal fusion.
+#
+# See also:
+#
+# * `Compiled optimizer tutorial <https://pytorch.org/tutorials/recipes/compiling_optimizer.html>`__ - an intro into the compiled optimizer.
+# * `Compiling the optimizer with PT2 <https://dev-discuss.pytorch.org/t/compiling-the-optimizer-with-pt2/1669>`__ - deeper technical details on the compiled optimizer.
diff --git a/recipes_source/recipes_index.rst b/recipes_source/recipes_index.rst
index f136c4b9c6..1ad3b0d97a 100644
--- a/recipes_source/recipes_index.rst
+++ b/recipes_source/recipes_index.rst
@@ -335,6 +335,15 @@ Recipes are bite-sized, actionable examples of how to use specific PyTorch featu
    :link: ../recipes/compiling_optimizer_lr_scheduler.html
    :tags: Model-Optimization
 
+.. (beta) Explicit horizontal fusion with foreach_map and torch.compile
+
+.. customcarditem::
+   :header: (beta) Explicit horizontal fusion with foreach_map and torch.compile
+   :card_description: Horizontally fuse pointwise ops with torch.compile
+   :image: ../_static/img/thumbnails/cropped/generic-pytorch-logo.png
+   :link: ../recipes/foreach_map.py
+   :tags: Model-Optimization
+
 .. Using User-Defined Triton Kernels with ``torch.compile``
 
 .. customcarditem::
diff --git a/recipes_source/torch_export_aoti_python.py b/recipes_source/torch_export_aoti_python.py
index c0cbb7e280..ff311f071e 100644
--- a/recipes_source/torch_export_aoti_python.py
+++ b/recipes_source/torch_export_aoti_python.py
@@ -176,7 +176,7 @@
 model_path = os.path.join(os.getcwd(), "resnet18.pt2")
 
 compiled_model = torch._inductor.aoti_load_package(model_path)
-example_inputs = (torch.randn(2, 3, 224, 224, device=device),)
+example_inputs = torch.randn(2, 3, 224, 224, device=device)
 
 with torch.inference_mode():
     output = compiled_model(example_inputs)
@@ -238,11 +238,11 @@ def timed(fn):
 
 torch._dynamo.reset()
 
-model = torch._inductor.aoti_load_package(model_path)
-example_inputs = (torch.randn(1, 3, 224, 224, device=device),)
+compiled_model = torch._inductor.aoti_load_package(model_path)
+example_inputs = torch.randn(1, 3, 224, 224, device=device)
 
 with torch.inference_mode():
-    _, time_taken = timed(lambda: model(example_inputs))
+    _, time_taken = timed(lambda: compiled_model(example_inputs))
     print(f"Time taken for first inference for AOTInductor is {time_taken:.2f} ms")