From 3c0a8e5fd70d017b5e0d70ca79e32dd4134043f8 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Tue, 29 Apr 2025 14:13:38 -0700 Subject: [PATCH 01/32] Move back to 2.7 --- .ci/docker/requirements.txt | 6 +++--- .jenkins/build.sh | 7 ++----- .jenkins/validate_tutorials_built.py | 5 ----- 3 files changed, 5 insertions(+), 13 deletions(-) diff --git a/.ci/docker/requirements.txt b/.ci/docker/requirements.txt index 0e95c62c6b..e6802cb045 100644 --- a/.ci/docker/requirements.txt +++ b/.ci/docker/requirements.txt @@ -14,7 +14,7 @@ tqdm==4.66.1 numpy==1.24.4 matplotlib librosa -torch==2.6 +torch==2.7 torchvision torchdata networkx @@ -67,7 +67,7 @@ iopath pygame==2.6.0 pycocotools semilearn==0.3.2 -torchao==0.5.0 +torchao==0.10.0 segment_anything==1.0 torchrec==1.1.0; platform_system == "Linux" -fbgemm-gpu==1.1.0; platform_system == "Linux" +fbgemm-gpu==1.2.0; platform_system == "Linux" diff --git a/.jenkins/build.sh b/.jenkins/build.sh index 8786859d7d..58483c168b 100755 --- a/.jenkins/build.sh +++ b/.jenkins/build.sh @@ -22,13 +22,10 @@ sudo apt-get install -y pandoc #Install PyTorch Nightly for test. # Nightly - pip install --pre torch torchvision torchaudio -f https://download.pytorch.org/whl/nightly/cu102/torch_nightly.html # Install 2.5 to merge all 2.4 PRs - uncomment to install nightly binaries (update the version as needed). -# sudo pip uninstall -y torch torchvision torchaudio torchtext torchdata -# sudo pip3 install torch==2.6.0 torchvision --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu124 # sudo pip uninstall -y fbgemm-gpu torchrec +# sudo pip uninstall -y torch torchvision torchaudio torchtext torchdata torchrl tensordict # sudo pip3 install fbgemm-gpu==1.1.0 torchrec==1.0.0 --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu124 -sudo pip uninstall -y torch torchvision torchaudio torchtext torchdata torchrl tensordict -pip3 install torch==2.7.0 torchvision torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu126 -#sudo pip uninstall -y fbgemm-gpu +# pip3 install torch==2.7.0 torchvision torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu126 # Install two language tokenizers for Translation with TorchText tutorial python -m spacy download en_core_web_sm python -m spacy download de_core_news_sm diff --git a/.jenkins/validate_tutorials_built.py b/.jenkins/validate_tutorials_built.py index 3ed1e0c028..380f0b458b 100644 --- a/.jenkins/validate_tutorials_built.py +++ b/.jenkins/validate_tutorials_built.py @@ -51,11 +51,6 @@ "intermediate_source/text_to_speech_with_torchaudio", "intermediate_source/tensorboard_profiler_tutorial", # reenable after 2.0 release. "advanced_source/semi_structured_sparse", # reenable after 3303 is fixed. - "intermediate_source/mario_rl_tutorial", # reenable after 3302 is fixed - "intermediate_source/reinforcement_ppo", # reenable after 3302 is fixed - "intermediate_source/pinmem_nonblock", # reenable after 3302 is fixed - "intermediate_source/dqn_with_rnn_tutorial", # reenable after 3302 is fixed - "advanced_source/pendulum", # reenable after 3302 is fixed "advanced_source/coding_ddpg", # reenable after 3302 is fixed "intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixed "recipes_source/recipes/reasoning_about_shapes" # reenable after 3326 is fixed From a66f05eacfba8e907c39ef3d287a19a18c3ef83f Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Tue, 29 Apr 2025 14:31:27 -0700 Subject: [PATCH 02/32] Update --- conf.py | 208 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 122 insertions(+), 86 deletions(-) diff --git a/conf.py b/conf.py index a12a05d21c..4256cfe729 100644 --- a/conf.py +++ b/conf.py @@ -29,34 +29,44 @@ # import os import sys -sys.path.insert(0, os.path.abspath('.')) -sys.path.insert(0, os.path.abspath('./.jenkins')) -import pytorch_sphinx_theme -import torch -import numpy + +sys.path.insert(0, os.path.abspath(".")) +sys.path.insert(0, os.path.abspath("./.jenkins")) +import distutils.file_util import gc import glob import random -import shutil -from custom_directives import IncludeDirective, GalleryItemDirective, CustomGalleryItemDirective, CustomCalloutItemDirective, CustomCardItemDirective -import distutils.file_util import re -from get_sphinx_filenames import SPHINX_SHOULD_RUN +import shutil +from pathlib import Path + +import numpy import pandocfilters -import pypandoc import plotly.io as pio -from pathlib import Path -pio.renderers.default = 'sphinx_gallery' +import pypandoc +import pytorch_sphinx_theme +import torch +from custom_directives import ( + CustomCalloutItemDirective, + CustomCardItemDirective, + CustomGalleryItemDirective, + GalleryItemDirective, + IncludeDirective, +) +from get_sphinx_filenames import SPHINX_SHOULD_RUN + +pio.renderers.default = "sphinx_gallery" try: import torchvision except ImportError: import warnings + warnings.warn('unable to load "torchvision" package') import pytorch_sphinx_theme -rst_epilog =""" +rst_epilog = """ .. |edit| image:: /_static/pencil-16.png :width: 16px :height: 16px @@ -69,21 +79,21 @@ # needs_sphinx = '1.0' html_meta = { - 'description': 'Master PyTorch with our step-by-step tutorials for all skill levels. Start your journey to becoming a PyTorch expert today!', - 'keywords': 'PyTorch, tutorials, Getting Started, deep learning, AI', - 'author': 'PyTorch Contributors' + "description": "Master PyTorch with our step-by-step tutorials for all skill levels. Start your journey to becoming a PyTorch expert today!", + "keywords": "PyTorch, tutorials, Getting Started, deep learning, AI", + "author": "PyTorch Contributors", } # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinxcontrib.katex', - 'sphinx.ext.intersphinx', - 'sphinx_copybutton', - 'sphinx_gallery.gen_gallery', - 'sphinx_design', - 'sphinx_sitemap' + "sphinxcontrib.katex", + "sphinx.ext.intersphinx", + "sphinx_copybutton", + "sphinx_gallery.gen_gallery", + "sphinx_design", + "sphinx_sitemap", ] intersphinx_mapping = { @@ -97,32 +107,45 @@ # -- Sphinx-gallery configuration -------------------------------------------- + def reset_seeds(gallery_conf, fname): torch.cuda.empty_cache() + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + torch._dynamo.reset() torch.manual_seed(42) torch.set_default_device(None) random.seed(10) numpy.random.seed(10) gc.collect() + sphinx_gallery_conf = { - 'examples_dirs': ['beginner_source', 'intermediate_source', - 'advanced_source', 'recipes_source', 'prototype_source'], - 'gallery_dirs': ['beginner', 'intermediate', 'advanced', 'recipes', 'prototype'], - 'filename_pattern': re.compile(SPHINX_SHOULD_RUN), - 'promote_jupyter_magic': True, - 'backreferences_dir': None, - 'first_notebook_cell': ("# For tips on running notebooks in Google Colab, see\n" - "# https://pytorch.org/tutorials/beginner/colab\n" - "%matplotlib inline"), - 'reset_modules': (reset_seeds), - 'ignore_pattern': r'_torch_export_nightly_tutorial.py', - 'pypandoc': {'extra_args': ['--mathjax', '--toc'], - 'filters': ['.jenkins/custom_pandoc_filter.py'], + "examples_dirs": [ + "beginner_source", + "intermediate_source", + "advanced_source", + "recipes_source", + "prototype_source", + ], + "gallery_dirs": ["beginner", "intermediate", "advanced", "recipes", "prototype"], + "filename_pattern": re.compile(SPHINX_SHOULD_RUN), + "promote_jupyter_magic": True, + "backreferences_dir": None, + "first_notebook_cell": ( + "# For tips on running notebooks in Google Colab, see\n" + "# https://pytorch.org/tutorials/beginner/colab\n" + "%matplotlib inline" + ), + "reset_modules": (reset_seeds), + "ignore_pattern": r"_torch_export_nightly_tutorial.py", + "pypandoc": { + "extra_args": ["--mathjax", "--toc"], + "filters": [".jenkins/custom_pandoc_filter.py"], }, } -html_baseurl = 'https://pytorch.org/tutorials/' # needed for sphinx-sitemap +html_baseurl = "https://pytorch.org/tutorials/" # needed for sphinx-sitemap sitemap_locales = [None] sitemap_excludes = [ "search.html", @@ -130,7 +153,7 @@ def reset_seeds(gallery_conf, fname): ] sitemap_url_scheme = "{link}" -if os.getenv('GALLERY_PATTERN'): +if os.getenv("GALLERY_PATTERN"): # GALLERY_PATTERN is to be used when you want to work on a single # tutorial. Previously this was fed into filename_pattern, but # if you do that, you still end up parsing all of the other Python @@ -138,9 +161,11 @@ def reset_seeds(gallery_conf, fname): # ignore_pattern also skips parsing. # See https://github.com/sphinx-gallery/sphinx-gallery/issues/721 # for a more detailed description of the issue. - sphinx_gallery_conf['ignore_pattern'] = r'/(?!' + re.escape(os.getenv('GALLERY_PATTERN')) + r')[^/]+$' + sphinx_gallery_conf["ignore_pattern"] = ( + r"/(?!" + re.escape(os.getenv("GALLERY_PATTERN")) + r")[^/]+$" + ) -for i in range(len(sphinx_gallery_conf['examples_dirs'])): +for i in range(len(sphinx_gallery_conf["examples_dirs"])): gallery_dir = Path(sphinx_gallery_conf["gallery_dirs"][i]) source_dir = Path(sphinx_gallery_conf["examples_dirs"][i]) @@ -152,21 +177,21 @@ def reset_seeds(gallery_conf, fname): distutils.file_util.copy_file(f, gallery_subdir_path, update=True) # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'PyTorch Tutorials' -copyright = '2024, PyTorch' -author = 'PyTorch contributors' +project = "PyTorch Tutorials" +copyright = "2024, PyTorch" +author = "PyTorch contributors" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -182,17 +207,22 @@ def reset_seeds(gallery_conf, fname): # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = 'en' +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'src/pytorch-sphinx-theme/docs*'] -exclude_patterns += sphinx_gallery_conf['examples_dirs'] -exclude_patterns += ['*/index.rst'] +exclude_patterns = [ + "_build", + "Thumbs.db", + ".DS_Store", + "src/pytorch-sphinx-theme/docs*", +] +exclude_patterns += sphinx_gallery_conf["examples_dirs"] +exclude_patterns += ["*/index.rst"] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -220,7 +250,7 @@ def reset_seeds(gallery_conf, fname): # # Add any paths that contain custom static files (such as style sheets) here, # # relative to this directory. They are copied after the builtin static files, # # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # # Custom sidebar templates, maps document names to template names. # html_sidebars = { @@ -229,23 +259,23 @@ def reset_seeds(gallery_conf, fname): # } -html_theme = 'pytorch_sphinx_theme' +html_theme = "pytorch_sphinx_theme" html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] -html_logo = '_static/img/pytorch-logo-dark.svg' +html_logo = "_static/img/pytorch-logo-dark.svg" html_theme_options = { - 'pytorch_project': 'tutorials', - 'collapse_navigation': False, - 'display_version': True, - 'navigation_with_keys': True, - 'logo_only': False, - 'analytics_id': 'GTM-T8XT4PS', + "pytorch_project": "tutorials", + "collapse_navigation": False, + "display_version": True, + "navigation_with_keys": True, + "logo_only": False, + "analytics_id": "GTM-T8XT4PS", } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = 'PyTorchTutorialsdoc' +htmlhelp_basename = "PyTorchTutorialsdoc" # -- Options for LaTeX output --------------------------------------------- @@ -254,15 +284,12 @@ def reset_seeds(gallery_conf, fname): # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -272,8 +299,13 @@ def reset_seeds(gallery_conf, fname): # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'PyTorchTutorials.tex', 'PyTorch Tutorials', - 'Sasank, PyTorch contributors', 'manual'), + ( + master_doc, + "PyTorchTutorials.tex", + "PyTorch Tutorials", + "Sasank, PyTorch contributors", + "manual", + ), ] @@ -281,10 +313,7 @@ def reset_seeds(gallery_conf, fname): # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'pytorchtutorials', 'PyTorch Tutorials', - [author], 1) -] +man_pages = [(master_doc, "pytorchtutorials", "PyTorch Tutorials", [author], 1)] # -- Options for Texinfo output ------------------------------------------- @@ -293,40 +322,47 @@ def reset_seeds(gallery_conf, fname): # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'PyTorchTutorials', 'PyTorch Tutorials', - author, 'PyTorchTutorials', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "PyTorchTutorials", + "PyTorch Tutorials", + author, + "PyTorchTutorials", + "One line description of project.", + "Miscellaneous", + ), ] html_css_files = [ - 'https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css', - 'css/custom.css', - 'css/custom2.css' - ] + "https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css", + "css/custom.css", + "css/custom2.css", +] html_js_files = [ "js/custom.js", ] + def setup(app): # NOTE: in Sphinx 1.8+ `html_css_files` is an official configuration value # and can be moved outside of this function (and the setup(app) function # can be deleted). - #html_css_files = [ + # html_css_files = [ # 'https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css', # 'css/custom.css' - #] + # ] # In Sphinx 1.8 it was renamed to `add_css_file`, 1.7 and prior it is # `add_stylesheet` (deprecated in 1.8). - #add_css = getattr(app, 'add_css_file', app.add_stylesheet) - #for css_file in html_css_files: + # add_css = getattr(app, 'add_css_file', app.add_stylesheet) + # for css_file in html_css_files: # add_css(css_file) # Custom CSS - #app.add_stylesheet('css/pytorch_theme.css') + # app.add_stylesheet('css/pytorch_theme.css') # app.add_stylesheet('https://fonts.googleapis.com/css?family=Lato') # Custom directives - app.add_directive('includenodoc', IncludeDirective) - app.add_directive('galleryitem', GalleryItemDirective) - app.add_directive('customgalleryitem', CustomGalleryItemDirective) - app.add_directive('customcarditem', CustomCardItemDirective) - app.add_directive('customcalloutitem', CustomCalloutItemDirective) + app.add_directive("includenodoc", IncludeDirective) + app.add_directive("galleryitem", GalleryItemDirective) + app.add_directive("customgalleryitem", CustomGalleryItemDirective) + app.add_directive("customcarditem", CustomCardItemDirective) + app.add_directive("customcalloutitem", CustomCalloutItemDirective) From e00d84e537118699a1d5d9e2dbf2c09d76819f59 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Tue, 29 Apr 2025 15:20:57 -0700 Subject: [PATCH 03/32] Update --- conf.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/conf.py b/conf.py index 4256cfe729..8bfe584e9d 100644 --- a/conf.py +++ b/conf.py @@ -114,9 +114,14 @@ def reset_seeds(gallery_conf, fname): torch.backends.cudnn.benchmark = False torch._dynamo.reset() torch.manual_seed(42) + torch.cuda.manual_seed_all(42) torch.set_default_device(None) random.seed(10) numpy.random.seed(10) + torch.set_grad_enabled(True) + torch.set_default_dtype(torch.float32) + torch.set_default_tensor_type(torch.FloatTensor) + logging.getLogger().setLevel(logging.WARNING) gc.collect() From 0c465f28b51ef4cebe1a2c55451ba9f946a6b4b1 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Tue, 29 Apr 2025 15:30:46 -0700 Subject: [PATCH 04/32] Update --- conf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/conf.py b/conf.py index 8bfe584e9d..842aa2b47f 100644 --- a/conf.py +++ b/conf.py @@ -121,6 +121,8 @@ def reset_seeds(gallery_conf, fname): torch.set_grad_enabled(True) torch.set_default_dtype(torch.float32) torch.set_default_tensor_type(torch.FloatTensor) + import logging + logging.getLogger().setLevel(logging.WARNING) gc.collect() From 22302ec265e7fefc0a053945614656a7c0612713 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Tue, 29 Apr 2025 15:52:48 -0700 Subject: [PATCH 05/32] Update --- recipes_source/torch_logs.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/recipes_source/torch_logs.py b/recipes_source/torch_logs.py index b5c3f0bd8a..ca66df1374 100644 --- a/recipes_source/torch_logs.py +++ b/recipes_source/torch_logs.py @@ -30,49 +30,48 @@ # change logging settings at the command line. The equivalent environment # variable setting is shown for each example. +torch._dynamo.reset() import torch # exit cleanly if we are on a device that doesn't support torch.compile if torch.cuda.get_device_capability() < (7, 0): print("Skipping because torch.compile is not supported on this device.") else: + @torch.compile() def fn(x, y): z = x + y return z + 2 - inputs = (torch.ones(2, 2, device="cuda"), torch.zeros(2, 2, device="cuda")) - -# print separator and reset dynamo -# between each example + # print separator and reset dynamo + # between each example def separator(name): print(f"==================={name}=========================") torch._dynamo.reset() - separator("Dynamo Tracing") -# View dynamo tracing -# TORCH_LOGS="+dynamo" + # View dynamo tracing + # TORCH_LOGS="+dynamo" torch._logging.set_logs(dynamo=logging.DEBUG) fn(*inputs) separator("Traced Graph") -# View traced graph -# TORCH_LOGS="graph" + # View traced graph + # TORCH_LOGS="graph" torch._logging.set_logs(graph=True) fn(*inputs) separator("Fusion Decisions") -# View fusion decisions -# TORCH_LOGS="fusion" + # View fusion decisions + # TORCH_LOGS="fusion" torch._logging.set_logs(fusion=True) fn(*inputs) separator("Output Code") -# View output code generated by inductor -# TORCH_LOGS="output_code" + # View output code generated by inductor + # TORCH_LOGS="output_code" torch._logging.set_logs(output_code=True) fn(*inputs) From d8f60eac22b2a815dccdd5509dc9fcea284674c9 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Tue, 29 Apr 2025 16:13:23 -0700 Subject: [PATCH 06/32] Update --- recipes_source/torch_logs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipes_source/torch_logs.py b/recipes_source/torch_logs.py index ca66df1374..c6ded8530d 100644 --- a/recipes_source/torch_logs.py +++ b/recipes_source/torch_logs.py @@ -30,8 +30,8 @@ # change logging settings at the command line. The equivalent environment # variable setting is shown for each example. -torch._dynamo.reset() import torch +torch._dynamo.reset() # exit cleanly if we are on a device that doesn't support torch.compile if torch.cuda.get_device_capability() < (7, 0): From 6071c565ca74bbf4aa7960f7cc4f36a6f5159b36 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Tue, 29 Apr 2025 16:37:05 -0700 Subject: [PATCH 07/32] Update --- .jenkins/validate_tutorials_built.py | 3 ++- recipes_source/torch_logs.py | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.jenkins/validate_tutorials_built.py b/.jenkins/validate_tutorials_built.py index 380f0b458b..b850fb039b 100644 --- a/.jenkins/validate_tutorials_built.py +++ b/.jenkins/validate_tutorials_built.py @@ -53,7 +53,8 @@ "advanced_source/semi_structured_sparse", # reenable after 3303 is fixed. "advanced_source/coding_ddpg", # reenable after 3302 is fixed "intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixed - "recipes_source/recipes/reasoning_about_shapes" # reenable after 3326 is fixed + "recipes_source/recipes/reasoning_about_shapes", # reenable after 3326 is fixed + "advanced_source/dynamic_quantization_tutorial" ] def tutorial_source_dirs() -> List[Path]: diff --git a/recipes_source/torch_logs.py b/recipes_source/torch_logs.py index c6ded8530d..8b38639f49 100644 --- a/recipes_source/torch_logs.py +++ b/recipes_source/torch_logs.py @@ -31,7 +31,6 @@ # variable setting is shown for each example. import torch -torch._dynamo.reset() # exit cleanly if we are on a device that doesn't support torch.compile if torch.cuda.get_device_capability() < (7, 0): From 65bd04acec1cab9c429f56a4d064b9b2b8a4da2b Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Tue, 29 Apr 2025 16:55:13 -0700 Subject: [PATCH 08/32] Update --- recipes_source/torch_logs.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/recipes_source/torch_logs.py b/recipes_source/torch_logs.py index 8b38639f49..14f33853e0 100644 --- a/recipes_source/torch_logs.py +++ b/recipes_source/torch_logs.py @@ -32,6 +32,12 @@ import torch +# Reset torch dynamo and empty CUDA cache before starting +torch._dynamo.reset() +if torch.cuda.is_available(): + torch.cuda.empty_cache() + + # exit cleanly if we are on a device that doesn't support torch.compile if torch.cuda.get_device_capability() < (7, 0): print("Skipping because torch.compile is not supported on this device.") From 9ef0ab88424808428b4d7a209237975f2988e4cc Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Tue, 29 Apr 2025 17:01:17 -0700 Subject: [PATCH 09/32] Update --- .jenkins/metadata.json | 3 +++ .jenkins/validate_tutorials_built.py | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.jenkins/metadata.json b/.jenkins/metadata.json index 6e82d054b4..0514266bd6 100644 --- a/.jenkins/metadata.json +++ b/.jenkins/metadata.json @@ -1,4 +1,7 @@ { + "recipes_source/torch_logs.py": { + "duration": 0 + }, "intermediate_source/ax_multiobjective_nas_tutorial.py": { "extra_files": ["intermediate_source/mnist_train_nas.py"], "duration": 2000 diff --git a/.jenkins/validate_tutorials_built.py b/.jenkins/validate_tutorials_built.py index b850fb039b..94fc921b8f 100644 --- a/.jenkins/validate_tutorials_built.py +++ b/.jenkins/validate_tutorials_built.py @@ -54,7 +54,6 @@ "advanced_source/coding_ddpg", # reenable after 3302 is fixed "intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixed "recipes_source/recipes/reasoning_about_shapes", # reenable after 3326 is fixed - "advanced_source/dynamic_quantization_tutorial" ] def tutorial_source_dirs() -> List[Path]: From 9d047493f21ce13364c9a051c907acd54fc34d38 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Tue, 29 Apr 2025 17:49:38 -0700 Subject: [PATCH 10/32] Update --- .jenkins/validate_tutorials_built.py | 1 + 1 file changed, 1 insertion(+) diff --git a/.jenkins/validate_tutorials_built.py b/.jenkins/validate_tutorials_built.py index 94fc921b8f..b850fb039b 100644 --- a/.jenkins/validate_tutorials_built.py +++ b/.jenkins/validate_tutorials_built.py @@ -54,6 +54,7 @@ "advanced_source/coding_ddpg", # reenable after 3302 is fixed "intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixed "recipes_source/recipes/reasoning_about_shapes", # reenable after 3326 is fixed + "advanced_source/dynamic_quantization_tutorial" ] def tutorial_source_dirs() -> List[Path]: From 536135e009f2b0aa6a79652821c2487b4f8ed2e1 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Tue, 29 Apr 2025 17:51:26 -0700 Subject: [PATCH 11/32] Update --- .jenkins/validate_tutorials_built.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.jenkins/validate_tutorials_built.py b/.jenkins/validate_tutorials_built.py index b850fb039b..a023fd6886 100644 --- a/.jenkins/validate_tutorials_built.py +++ b/.jenkins/validate_tutorials_built.py @@ -54,7 +54,8 @@ "advanced_source/coding_ddpg", # reenable after 3302 is fixed "intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixed "recipes_source/recipes/reasoning_about_shapes", # reenable after 3326 is fixed - "advanced_source/dynamic_quantization_tutorial" + "advanced_source/dynamic_quantization_tutorial", + "intermediate_source/memory_format_tutorial" ] def tutorial_source_dirs() -> List[Path]: From ea13112fc3de36e986fa52fd76a8d56c58a3f02d Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Tue, 29 Apr 2025 18:32:13 -0700 Subject: [PATCH 12/32] Update --- .jenkins/validate_tutorials_built.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.jenkins/validate_tutorials_built.py b/.jenkins/validate_tutorials_built.py index a023fd6886..3aa57d6e63 100644 --- a/.jenkins/validate_tutorials_built.py +++ b/.jenkins/validate_tutorials_built.py @@ -55,7 +55,8 @@ "intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixed "recipes_source/recipes/reasoning_about_shapes", # reenable after 3326 is fixed "advanced_source/dynamic_quantization_tutorial", - "intermediate_source/memory_format_tutorial" + "intermediate_source/memory_format_tutorial", + "intermediate_source/custom_function_conv_bn_tutorial" ] def tutorial_source_dirs() -> List[Path]: From db6dbd53b01e2c08582ed8a2d3865ba273f50d81 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Tue, 29 Apr 2025 20:28:59 -0700 Subject: [PATCH 13/32] Update --- .jenkins/validate_tutorials_built.py | 1 - 1 file changed, 1 deletion(-) diff --git a/.jenkins/validate_tutorials_built.py b/.jenkins/validate_tutorials_built.py index 3aa57d6e63..4069888347 100644 --- a/.jenkins/validate_tutorials_built.py +++ b/.jenkins/validate_tutorials_built.py @@ -55,7 +55,6 @@ "intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixed "recipes_source/recipes/reasoning_about_shapes", # reenable after 3326 is fixed "advanced_source/dynamic_quantization_tutorial", - "intermediate_source/memory_format_tutorial", "intermediate_source/custom_function_conv_bn_tutorial" ] From 19006b3706513260d54eed8c1b7adb122ad207ff Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Tue, 29 Apr 2025 21:09:37 -0700 Subject: [PATCH 14/32] Update --- .jenkins/validate_tutorials_built.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.jenkins/validate_tutorials_built.py b/.jenkins/validate_tutorials_built.py index 4069888347..a023fd6886 100644 --- a/.jenkins/validate_tutorials_built.py +++ b/.jenkins/validate_tutorials_built.py @@ -55,7 +55,7 @@ "intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixed "recipes_source/recipes/reasoning_about_shapes", # reenable after 3326 is fixed "advanced_source/dynamic_quantization_tutorial", - "intermediate_source/custom_function_conv_bn_tutorial" + "intermediate_source/memory_format_tutorial" ] def tutorial_source_dirs() -> List[Path]: From 7dcb0fe066e50959854325ec568181bbb40c3911 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Wed, 30 Apr 2025 08:18:17 -0700 Subject: [PATCH 15/32] Update --- .jenkins/validate_tutorials_built.py | 1 - 1 file changed, 1 deletion(-) diff --git a/.jenkins/validate_tutorials_built.py b/.jenkins/validate_tutorials_built.py index a023fd6886..21aaf9de5b 100644 --- a/.jenkins/validate_tutorials_built.py +++ b/.jenkins/validate_tutorials_built.py @@ -54,7 +54,6 @@ "advanced_source/coding_ddpg", # reenable after 3302 is fixed "intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixed "recipes_source/recipes/reasoning_about_shapes", # reenable after 3326 is fixed - "advanced_source/dynamic_quantization_tutorial", "intermediate_source/memory_format_tutorial" ] From bfd652e6768a544375ddbf78779b33c57ed82450 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Wed, 30 Apr 2025 08:27:27 -0700 Subject: [PATCH 16/32] Update --- .jenkins/validate_tutorials_built.py | 1 - conf.py | 8 ++++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.jenkins/validate_tutorials_built.py b/.jenkins/validate_tutorials_built.py index 21aaf9de5b..94fc921b8f 100644 --- a/.jenkins/validate_tutorials_built.py +++ b/.jenkins/validate_tutorials_built.py @@ -54,7 +54,6 @@ "advanced_source/coding_ddpg", # reenable after 3302 is fixed "intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixed "recipes_source/recipes/reasoning_about_shapes", # reenable after 3326 is fixed - "intermediate_source/memory_format_tutorial" ] def tutorial_source_dirs() -> List[Path]: diff --git a/conf.py b/conf.py index 842aa2b47f..fbe5de66eb 100644 --- a/conf.py +++ b/conf.py @@ -121,9 +121,13 @@ def reset_seeds(gallery_conf, fname): torch.set_grad_enabled(True) torch.set_default_dtype(torch.float32) torch.set_default_tensor_type(torch.FloatTensor) - import logging + + # Reset torch modules that might have been modified by tutorials + import importlib + importlib.reload(torch) + if 'torchvision' in sys.modules: + importlib.reload(torchvision) - logging.getLogger().setLevel(logging.WARNING) gc.collect() From 34b91c909c136070ac695fdf783fa28f7b8c2e4e Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Wed, 30 Apr 2025 08:41:06 -0700 Subject: [PATCH 17/32] Update --- conf.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/conf.py b/conf.py index fbe5de66eb..68a1db5929 100644 --- a/conf.py +++ b/conf.py @@ -122,11 +122,10 @@ def reset_seeds(gallery_conf, fname): torch.set_default_dtype(torch.float32) torch.set_default_tensor_type(torch.FloatTensor) - # Reset torch modules that might have been modified by tutorials - import importlib - importlib.reload(torch) - if 'torchvision' in sys.modules: - importlib.reload(torchvision) + # Clean up any global state without reloading + if hasattr(torch, "_C"): + if hasattr(torch._C, "_jit_clear_class_registry"): + torch._C._jit_clear_class_registry() gc.collect() From ffab05c5e290dca448a36a14362c103e1686881d Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Wed, 30 Apr 2025 09:15:12 -0700 Subject: [PATCH 18/32] Update --- conf.py | 219 +++++++++++++++-------------------- recipes_source/torch_logs.py | 30 +++-- 2 files changed, 106 insertions(+), 143 deletions(-) diff --git a/conf.py b/conf.py index 68a1db5929..7ca52a9e50 100644 --- a/conf.py +++ b/conf.py @@ -29,44 +29,34 @@ # import os import sys - -sys.path.insert(0, os.path.abspath(".")) -sys.path.insert(0, os.path.abspath("./.jenkins")) -import distutils.file_util +sys.path.insert(0, os.path.abspath('.')) +sys.path.insert(0, os.path.abspath('./.jenkins')) +import pytorch_sphinx_theme +import torch +import numpy import gc import glob import random -import re import shutil -from pathlib import Path - -import numpy +from custom_directives import IncludeDirective, GalleryItemDirective, CustomGalleryItemDirective, CustomCalloutItemDirective, CustomCardItemDirective +import distutils.file_util +import re +from get_sphinx_filenames import SPHINX_SHOULD_RUN import pandocfilters -import plotly.io as pio import pypandoc -import pytorch_sphinx_theme -import torch -from custom_directives import ( - CustomCalloutItemDirective, - CustomCardItemDirective, - CustomGalleryItemDirective, - GalleryItemDirective, - IncludeDirective, -) -from get_sphinx_filenames import SPHINX_SHOULD_RUN - -pio.renderers.default = "sphinx_gallery" +import plotly.io as pio +from pathlib import Path +pio.renderers.default = 'sphinx_gallery' try: import torchvision except ImportError: import warnings - warnings.warn('unable to load "torchvision" package') import pytorch_sphinx_theme -rst_epilog = """ +rst_epilog =""" .. |edit| image:: /_static/pencil-16.png :width: 16px :height: 16px @@ -79,21 +69,21 @@ # needs_sphinx = '1.0' html_meta = { - "description": "Master PyTorch with our step-by-step tutorials for all skill levels. Start your journey to becoming a PyTorch expert today!", - "keywords": "PyTorch, tutorials, Getting Started, deep learning, AI", - "author": "PyTorch Contributors", + 'description': 'Master PyTorch with our step-by-step tutorials for all skill levels. Start your journey to becoming a PyTorch expert today!', + 'keywords': 'PyTorch, tutorials, Getting Started, deep learning, AI', + 'author': 'PyTorch Contributors' } # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - "sphinxcontrib.katex", - "sphinx.ext.intersphinx", - "sphinx_copybutton", - "sphinx_gallery.gen_gallery", - "sphinx_design", - "sphinx_sitemap", + 'sphinxcontrib.katex', + 'sphinx.ext.intersphinx', + 'sphinx_copybutton', + 'sphinx_gallery.gen_gallery', + 'sphinx_design', + 'sphinx_sitemap' ] intersphinx_mapping = { @@ -107,55 +97,45 @@ # -- Sphinx-gallery configuration -------------------------------------------- - def reset_seeds(gallery_conf, fname): torch.cuda.empty_cache() torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False torch._dynamo.reset() torch.manual_seed(42) - torch.cuda.manual_seed_all(42) torch.set_default_device(None) random.seed(10) numpy.random.seed(10) torch.set_grad_enabled(True) - torch.set_default_dtype(torch.float32) - torch.set_default_tensor_type(torch.FloatTensor) - # Clean up any global state without reloading - if hasattr(torch, "_C"): - if hasattr(torch._C, "_jit_clear_class_registry"): - torch._C._jit_clear_class_registry() + # Reset any patching from memory_format_tutorial + if hasattr(torch, "old_attrs"): + for (m, attrs) in torch.old_attrs.items(): + for (k, v) in attrs.items(): + setattr(m, k, v) + delattr(torch, "old_attrs") - gc.collect() + gc.collect() sphinx_gallery_conf = { - "examples_dirs": [ - "beginner_source", - "intermediate_source", - "advanced_source", - "recipes_source", - "prototype_source", - ], - "gallery_dirs": ["beginner", "intermediate", "advanced", "recipes", "prototype"], - "filename_pattern": re.compile(SPHINX_SHOULD_RUN), - "promote_jupyter_magic": True, - "backreferences_dir": None, - "first_notebook_cell": ( - "# For tips on running notebooks in Google Colab, see\n" - "# https://pytorch.org/tutorials/beginner/colab\n" - "%matplotlib inline" - ), - "reset_modules": (reset_seeds), - "ignore_pattern": r"_torch_export_nightly_tutorial.py", - "pypandoc": { - "extra_args": ["--mathjax", "--toc"], - "filters": [".jenkins/custom_pandoc_filter.py"], + 'examples_dirs': ['beginner_source', 'intermediate_source', + 'advanced_source', 'recipes_source', 'prototype_source'], + 'gallery_dirs': ['beginner', 'intermediate', 'advanced', 'recipes', 'prototype'], + 'filename_pattern': re.compile(SPHINX_SHOULD_RUN), + 'promote_jupyter_magic': True, + 'backreferences_dir': None, + 'first_notebook_cell': ("# For tips on running notebooks in Google Colab, see\n" + "# https://pytorch.org/tutorials/beginner/colab\n" + "%matplotlib inline"), + 'reset_modules': (reset_seeds), + 'ignore_pattern': r'_torch_export_nightly_tutorial.py', + 'pypandoc': {'extra_args': ['--mathjax', '--toc'], + 'filters': ['.jenkins/custom_pandoc_filter.py'], }, } -html_baseurl = "https://pytorch.org/tutorials/" # needed for sphinx-sitemap +html_baseurl = 'https://pytorch.org/tutorials/' # needed for sphinx-sitemap sitemap_locales = [None] sitemap_excludes = [ "search.html", @@ -163,7 +143,7 @@ def reset_seeds(gallery_conf, fname): ] sitemap_url_scheme = "{link}" -if os.getenv("GALLERY_PATTERN"): +if os.getenv('GALLERY_PATTERN'): # GALLERY_PATTERN is to be used when you want to work on a single # tutorial. Previously this was fed into filename_pattern, but # if you do that, you still end up parsing all of the other Python @@ -171,11 +151,9 @@ def reset_seeds(gallery_conf, fname): # ignore_pattern also skips parsing. # See https://github.com/sphinx-gallery/sphinx-gallery/issues/721 # for a more detailed description of the issue. - sphinx_gallery_conf["ignore_pattern"] = ( - r"/(?!" + re.escape(os.getenv("GALLERY_PATTERN")) + r")[^/]+$" - ) + sphinx_gallery_conf['ignore_pattern'] = r'/(?!' + re.escape(os.getenv('GALLERY_PATTERN')) + r')[^/]+$' -for i in range(len(sphinx_gallery_conf["examples_dirs"])): +for i in range(len(sphinx_gallery_conf['examples_dirs'])): gallery_dir = Path(sphinx_gallery_conf["gallery_dirs"][i]) source_dir = Path(sphinx_gallery_conf["examples_dirs"][i]) @@ -187,21 +165,21 @@ def reset_seeds(gallery_conf, fname): distutils.file_util.copy_file(f, gallery_subdir_path, update=True) # Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] +templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = ".rst" +source_suffix = '.rst' # The master toctree document. -master_doc = "index" +master_doc = 'index' # General information about the project. -project = "PyTorch Tutorials" -copyright = "2024, PyTorch" -author = "PyTorch contributors" +project = 'PyTorch Tutorials' +copyright = '2024, PyTorch' +author = 'PyTorch contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -217,22 +195,17 @@ def reset_seeds(gallery_conf, fname): # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = "en" +language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = [ - "_build", - "Thumbs.db", - ".DS_Store", - "src/pytorch-sphinx-theme/docs*", -] -exclude_patterns += sphinx_gallery_conf["examples_dirs"] -exclude_patterns += ["*/index.rst"] +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'src/pytorch-sphinx-theme/docs*'] +exclude_patterns += sphinx_gallery_conf['examples_dirs'] +exclude_patterns += ['*/index.rst'] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" +pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -260,7 +233,7 @@ def reset_seeds(gallery_conf, fname): # # Add any paths that contain custom static files (such as style sheets) here, # # relative to this directory. They are copied after the builtin static files, # # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] +html_static_path = ['_static'] # # Custom sidebar templates, maps document names to template names. # html_sidebars = { @@ -269,23 +242,23 @@ def reset_seeds(gallery_conf, fname): # } -html_theme = "pytorch_sphinx_theme" +html_theme = 'pytorch_sphinx_theme' html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] -html_logo = "_static/img/pytorch-logo-dark.svg" +html_logo = '_static/img/pytorch-logo-dark.svg' html_theme_options = { - "pytorch_project": "tutorials", - "collapse_navigation": False, - "display_version": True, - "navigation_with_keys": True, - "logo_only": False, - "analytics_id": "GTM-T8XT4PS", + 'pytorch_project': 'tutorials', + 'collapse_navigation': False, + 'display_version': True, + 'navigation_with_keys': True, + 'logo_only': False, + 'analytics_id': 'GTM-T8XT4PS', } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = "PyTorchTutorialsdoc" +htmlhelp_basename = 'PyTorchTutorialsdoc' # -- Options for LaTeX output --------------------------------------------- @@ -294,12 +267,15 @@ def reset_seeds(gallery_conf, fname): # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. # # 'preamble': '', + # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -309,13 +285,8 @@ def reset_seeds(gallery_conf, fname): # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - ( - master_doc, - "PyTorchTutorials.tex", - "PyTorch Tutorials", - "Sasank, PyTorch contributors", - "manual", - ), + (master_doc, 'PyTorchTutorials.tex', 'PyTorch Tutorials', + 'Sasank, PyTorch contributors', 'manual'), ] @@ -323,7 +294,10 @@ def reset_seeds(gallery_conf, fname): # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "pytorchtutorials", "PyTorch Tutorials", [author], 1)] +man_pages = [ + (master_doc, 'pytorchtutorials', 'PyTorch Tutorials', + [author], 1) +] # -- Options for Texinfo output ------------------------------------------- @@ -332,47 +306,40 @@ def reset_seeds(gallery_conf, fname): # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ( - master_doc, - "PyTorchTutorials", - "PyTorch Tutorials", - author, - "PyTorchTutorials", - "One line description of project.", - "Miscellaneous", - ), + (master_doc, 'PyTorchTutorials', 'PyTorch Tutorials', + author, 'PyTorchTutorials', 'One line description of project.', + 'Miscellaneous'), ] html_css_files = [ - "https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css", - "css/custom.css", - "css/custom2.css", -] + 'https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css', + 'css/custom.css', + 'css/custom2.css' + ] html_js_files = [ "js/custom.js", ] - def setup(app): # NOTE: in Sphinx 1.8+ `html_css_files` is an official configuration value # and can be moved outside of this function (and the setup(app) function # can be deleted). - # html_css_files = [ + #html_css_files = [ # 'https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css', # 'css/custom.css' - # ] + #] # In Sphinx 1.8 it was renamed to `add_css_file`, 1.7 and prior it is # `add_stylesheet` (deprecated in 1.8). - # add_css = getattr(app, 'add_css_file', app.add_stylesheet) - # for css_file in html_css_files: + #add_css = getattr(app, 'add_css_file', app.add_stylesheet) + #for css_file in html_css_files: # add_css(css_file) # Custom CSS - # app.add_stylesheet('css/pytorch_theme.css') + #app.add_stylesheet('css/pytorch_theme.css') # app.add_stylesheet('https://fonts.googleapis.com/css?family=Lato') # Custom directives - app.add_directive("includenodoc", IncludeDirective) - app.add_directive("galleryitem", GalleryItemDirective) - app.add_directive("customgalleryitem", CustomGalleryItemDirective) - app.add_directive("customcarditem", CustomCardItemDirective) - app.add_directive("customcalloutitem", CustomCalloutItemDirective) + app.add_directive('includenodoc', IncludeDirective) + app.add_directive('galleryitem', GalleryItemDirective) + app.add_directive('customgalleryitem', CustomGalleryItemDirective) + app.add_directive('customcarditem', CustomCardItemDirective) + app.add_directive('customcalloutitem', CustomCalloutItemDirective) diff --git a/recipes_source/torch_logs.py b/recipes_source/torch_logs.py index 14f33853e0..b5c3f0bd8a 100644 --- a/recipes_source/torch_logs.py +++ b/recipes_source/torch_logs.py @@ -32,51 +32,47 @@ import torch -# Reset torch dynamo and empty CUDA cache before starting -torch._dynamo.reset() -if torch.cuda.is_available(): - torch.cuda.empty_cache() - - # exit cleanly if we are on a device that doesn't support torch.compile if torch.cuda.get_device_capability() < (7, 0): print("Skipping because torch.compile is not supported on this device.") else: - @torch.compile() def fn(x, y): z = x + y return z + 2 + inputs = (torch.ones(2, 2, device="cuda"), torch.zeros(2, 2, device="cuda")) - # print separator and reset dynamo - # between each example + +# print separator and reset dynamo +# between each example def separator(name): print(f"==================={name}=========================") torch._dynamo.reset() + separator("Dynamo Tracing") - # View dynamo tracing - # TORCH_LOGS="+dynamo" +# View dynamo tracing +# TORCH_LOGS="+dynamo" torch._logging.set_logs(dynamo=logging.DEBUG) fn(*inputs) separator("Traced Graph") - # View traced graph - # TORCH_LOGS="graph" +# View traced graph +# TORCH_LOGS="graph" torch._logging.set_logs(graph=True) fn(*inputs) separator("Fusion Decisions") - # View fusion decisions - # TORCH_LOGS="fusion" +# View fusion decisions +# TORCH_LOGS="fusion" torch._logging.set_logs(fusion=True) fn(*inputs) separator("Output Code") - # View output code generated by inductor - # TORCH_LOGS="output_code" +# View output code generated by inductor +# TORCH_LOGS="output_code" torch._logging.set_logs(output_code=True) fn(*inputs) From c7b3aee2be7c4dd6de6b360e9d0f28065eec34a5 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Wed, 30 Apr 2025 09:45:53 -0700 Subject: [PATCH 19/32] Update --- conf.py | 7 ------- intermediate_source/memory_format_tutorial.py | 14 ++++++++++++++ 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/conf.py b/conf.py index 7ca52a9e50..3606f674a8 100644 --- a/conf.py +++ b/conf.py @@ -107,13 +107,6 @@ def reset_seeds(gallery_conf, fname): random.seed(10) numpy.random.seed(10) torch.set_grad_enabled(True) - - # Reset any patching from memory_format_tutorial - if hasattr(torch, "old_attrs"): - for (m, attrs) in torch.old_attrs.items(): - for (k, v) in attrs.items(): - setattr(m, k, v) - delattr(torch, "old_attrs") gc.collect() diff --git a/intermediate_source/memory_format_tutorial.py b/intermediate_source/memory_format_tutorial.py index 26bc5c9d53..4267cd2e21 100644 --- a/intermediate_source/memory_format_tutorial.py +++ b/intermediate_source/memory_format_tutorial.py @@ -376,6 +376,20 @@ def attribute(m): for (k, v) in attrs.items(): setattr(m, k, v) +# Recover Tensor +torch.Tensor = old_attrs[torch.Tensor]["Tensor"] +# Recover nn.functional +torch.nn.functional = old_attrs[torch.nn.functional]["nn.functional"] +# Recover torch +torch.is_cuda = old_attrs[torch]["is_cuda"] +torch.has_names = old_attrs[torch]["has_names"] +torch.numel = old_attrs[torch]["numel"] +torch.stride = old_attrs[torch]["stride"] +torch.is_contiguous = old_attrs[torch]["is_contiguous"] +torch.__class__ = old_attrs[torch]["__class__"] +del old_attrs + + ###################################################################### # Work to do # ---------- From 91b93f1735469ee07cf49340ce332d6b9af4e602 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Wed, 30 Apr 2025 10:18:10 -0700 Subject: [PATCH 20/32] Update --- intermediate_source/memory_format_tutorial.py | 24 +++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/intermediate_source/memory_format_tutorial.py b/intermediate_source/memory_format_tutorial.py index 4267cd2e21..23c512b62d 100644 --- a/intermediate_source/memory_format_tutorial.py +++ b/intermediate_source/memory_format_tutorial.py @@ -376,19 +376,23 @@ def attribute(m): for (k, v) in attrs.items(): setattr(m, k, v) -# Recover Tensor +import gc +import sys + torch.Tensor = old_attrs[torch.Tensor]["Tensor"] -# Recover nn.functional -torch.nn.functional = old_attrs[torch.nn.functional]["nn.functional"] -# Recover torch -torch.is_cuda = old_attrs[torch]["is_cuda"] -torch.has_names = old_attrs[torch]["has_names"] -torch.numel = old_attrs[torch]["numel"] -torch.stride = old_attrs[torch]["stride"] -torch.is_contiguous = old_attrs[torch]["is_contiguous"] -torch.__class__ = old_attrs[torch]["__class__"] +torch.nn.functional = old_attrs[torch.nn.functional] +for attr_name in ["is_cuda", "has_names", "numel", "stride", "is_contiguous", "__class__"]: + if attr_name in old_attrs[torch]: + setattr(torch, attr_name, old_attrs[torch][attr_name]) + +# Force garbage collection del old_attrs +gc.collect() +# Reset torch modules +torch._dynamo.reset() +if torch.cuda.is_available(): + torch.cuda.empty_cache() ###################################################################### # Work to do From f3bc0ca3df13454ae49487bef3611d6e03766417 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Wed, 30 Apr 2025 11:31:40 -0700 Subject: [PATCH 21/32] Update --- conf.py | 2 +- intermediate_source/memory_format_tutorial.py | 13 +++---------- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/conf.py b/conf.py index 3606f674a8..05cfa11ca1 100644 --- a/conf.py +++ b/conf.py @@ -102,13 +102,13 @@ def reset_seeds(gallery_conf, fname): torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False torch._dynamo.reset() + torch._inductor.config.force_disable_caches = True torch.manual_seed(42) torch.set_default_device(None) random.seed(10) numpy.random.seed(10) torch.set_grad_enabled(True) - gc.collect() sphinx_gallery_conf = { diff --git a/intermediate_source/memory_format_tutorial.py b/intermediate_source/memory_format_tutorial.py index 23c512b62d..ab06eeb583 100644 --- a/intermediate_source/memory_format_tutorial.py +++ b/intermediate_source/memory_format_tutorial.py @@ -379,17 +379,10 @@ def attribute(m): import gc import sys -torch.Tensor = old_attrs[torch.Tensor]["Tensor"] -torch.nn.functional = old_attrs[torch.nn.functional] -for attr_name in ["is_cuda", "has_names", "numel", "stride", "is_contiguous", "__class__"]: - if attr_name in old_attrs[torch]: - setattr(torch, attr_name, old_attrs[torch][attr_name]) +if torch.nn.functional in old_attrs: + for k, v in old_attrs[torch.nn.functional].items(): + setattr(torch.nn.functional, k, v) -# Force garbage collection -del old_attrs -gc.collect() - -# Reset torch modules torch._dynamo.reset() if torch.cuda.is_available(): torch.cuda.empty_cache() From fe0bf0035d1c2d94dc843b8ef2a0ebc0747bffd8 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Wed, 30 Apr 2025 12:14:46 -0700 Subject: [PATCH 22/32] Update --- intermediate_source/memory_format_tutorial.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/intermediate_source/memory_format_tutorial.py b/intermediate_source/memory_format_tutorial.py index ab06eeb583..460650200a 100644 --- a/intermediate_source/memory_format_tutorial.py +++ b/intermediate_source/memory_format_tutorial.py @@ -379,14 +379,17 @@ def attribute(m): import gc import sys -if torch.nn.functional in old_attrs: - for k, v in old_attrs[torch.nn.functional].items(): - setattr(torch.nn.functional, k, v) - torch._dynamo.reset() if torch.cuda.is_available(): torch.cuda.empty_cache() +gc.collect() + +if hasattr(torch, "_check_wrapper_patched"): + delattr(torch, "_check_wrapper_patched") + +old_attrs.clear() + ###################################################################### # Work to do # ---------- From 4cb2f59b55b9f6e4ff59a591d9898d99f589f034 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Wed, 30 Apr 2025 12:52:00 -0700 Subject: [PATCH 23/32] Update --- intermediate_source/memory_format_tutorial.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/intermediate_source/memory_format_tutorial.py b/intermediate_source/memory_format_tutorial.py index 460650200a..4ed971b91a 100644 --- a/intermediate_source/memory_format_tutorial.py +++ b/intermediate_source/memory_format_tutorial.py @@ -385,10 +385,12 @@ def attribute(m): gc.collect() -if hasattr(torch, "_check_wrapper_patched"): - delattr(torch, "_check_wrapper_patched") - -old_attrs.clear() +# Clear any references to the wrapper functions +del old_attrs +del contains_cl +del print_inputs +del check_wrapper +del attribute ###################################################################### # Work to do From 27295ea0fadd337c35785fd85164554f4a74ca77 Mon Sep 17 00:00:00 2001 From: Svetlana Karslioglu Date: Wed, 30 Apr 2025 20:22:46 -0700 Subject: [PATCH 24/32] Update --- .jenkins/validate_tutorials_built.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.jenkins/validate_tutorials_built.py b/.jenkins/validate_tutorials_built.py index 94fc921b8f..1a1420f52e 100644 --- a/.jenkins/validate_tutorials_built.py +++ b/.jenkins/validate_tutorials_built.py @@ -50,10 +50,8 @@ "intermediate_source/flask_rest_api_tutorial", "intermediate_source/text_to_speech_with_torchaudio", "intermediate_source/tensorboard_profiler_tutorial", # reenable after 2.0 release. - "advanced_source/semi_structured_sparse", # reenable after 3303 is fixed. - "advanced_source/coding_ddpg", # reenable after 3302 is fixed - "intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixed - "recipes_source/recipes/reasoning_about_shapes", # reenable after 3326 is fixed + "intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixe + "intermediate_source/memory_format_tutorial" ] def tutorial_source_dirs() -> List[Path]: From e5e95fc0464674746df3142bb77efd36f2dbba8c Mon Sep 17 00:00:00 2001 From: Alanna Burke Date: Thu, 1 May 2025 14:42:10 -0400 Subject: [PATCH 25/32] Removing Conda install references. --- advanced_source/sharding.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/advanced_source/sharding.rst b/advanced_source/sharding.rst index 7dfeeb88bf..8044463b0c 100644 --- a/advanced_source/sharding.rst +++ b/advanced_source/sharding.rst @@ -14,9 +14,10 @@ Requirements: - python >= 3.7 We highly recommend CUDA when using torchRec. If using CUDA: - cuda >= 11.0 +.. Should these be updated? .. code:: python - # install conda to make installying pytorch with cudatoolkit 11.3 easier. + # install conda to make installying pytorch with cudatoolkit 11.3 easier. !sudo rm Miniconda3-py37_4.9.2-Linux-x86_64.sh Miniconda3-py37_4.9.2-Linux-x86_64.sh.* !sudo wget https://repo.anaconda.com/miniconda/Miniconda3-py37_4.9.2-Linux-x86_64.sh !sudo chmod +x Miniconda3-py37_4.9.2-Linux-x86_64.sh @@ -213,7 +214,7 @@ embedding table placement using planner and generate sharded model using ) sharders = [cast(ModuleSharder[torch.nn.Module], EmbeddingBagCollectionSharder())] plan: ShardingPlan = planner.collective_plan(module, sharders, pg) - + sharded_model = DistributedModelParallel( module, env=ShardingEnv.from_process_group(pg), @@ -234,7 +235,7 @@ ranks. .. code:: python import multiprocess - + def spmd_sharing_simulation( sharding_type: ShardingType = ShardingType.TABLE_WISE, world_size = 2, @@ -254,7 +255,7 @@ ranks. ) p.start() processes.append(p) - + for p in processes: p.join() assert 0 == p.exitcode @@ -333,4 +334,3 @@ With data parallel, we will repeat the tables for all devices. rank:0,sharding plan: {'': {'large_table_0': ParameterSharding(sharding_type='data_parallel', compute_kernel='batched_dense', ranks=[0, 1], sharding_spec=None), 'large_table_1': ParameterSharding(sharding_type='data_parallel', compute_kernel='batched_dense', ranks=[0, 1], sharding_spec=None), 'small_table_0': ParameterSharding(sharding_type='data_parallel', compute_kernel='batched_dense', ranks=[0, 1], sharding_spec=None), 'small_table_1': ParameterSharding(sharding_type='data_parallel', compute_kernel='batched_dense', ranks=[0, 1], sharding_spec=None)}} rank:1,sharding plan: {'': {'large_table_0': ParameterSharding(sharding_type='data_parallel', compute_kernel='batched_dense', ranks=[0, 1], sharding_spec=None), 'large_table_1': ParameterSharding(sharding_type='data_parallel', compute_kernel='batched_dense', ranks=[0, 1], sharding_spec=None), 'small_table_0': ParameterSharding(sharding_type='data_parallel', compute_kernel='batched_dense', ranks=[0, 1], sharding_spec=None), 'small_table_1': ParameterSharding(sharding_type='data_parallel', compute_kernel='batched_dense', ranks=[0, 1], sharding_spec=None)}} - From 3bd2b22768ed0e1629643c2d37cf1d1e1bb5e964 Mon Sep 17 00:00:00 2001 From: Alanna Burke Date: Thu, 1 May 2025 14:42:19 -0400 Subject: [PATCH 26/32] Removing Conda install references. --- advanced_source/torch_script_custom_ops.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/advanced_source/torch_script_custom_ops.rst b/advanced_source/torch_script_custom_ops.rst index 0a0e6e2bd7..f59aa6e883 100644 --- a/advanced_source/torch_script_custom_ops.rst +++ b/advanced_source/torch_script_custom_ops.rst @@ -190,6 +190,8 @@ Environment setup We need an installation of PyTorch and OpenCV. The easiest and most platform independent way to get both is to via Conda:: +.. these need to be updated + conda install -c pytorch pytorch conda install opencv From 1257ab1d46105161ffef8f7b1e6e04df8149da66 Mon Sep 17 00:00:00 2001 From: Alanna Burke Date: Thu, 1 May 2025 14:42:25 -0400 Subject: [PATCH 27/32] Removing Conda install references. --- intermediate_source/dist_tuto.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/intermediate_source/dist_tuto.rst b/intermediate_source/dist_tuto.rst index 1b622aa277..080bffc57f 100644 --- a/intermediate_source/dist_tuto.rst +++ b/intermediate_source/dist_tuto.rst @@ -523,6 +523,7 @@ for an available MPI implementation. The following steps install the MPI backend, by installing PyTorch `from source `__. +.. needs an update 1. Create and activate your Anaconda environment, install all the pre-requisites following `the guide `__, but do From 9d49d11cc7b85f07ccc973c770f9028dd3865275 Mon Sep 17 00:00:00 2001 From: Alanna Burke Date: Thu, 1 May 2025 14:53:47 -0400 Subject: [PATCH 28/32] Removing Conda install references. --- beginner_source/introyt/captumyt.py | 237 +++++++++++++++------------- 1 file changed, 128 insertions(+), 109 deletions(-) diff --git a/beginner_source/introyt/captumyt.py b/beginner_source/introyt/captumyt.py index abf2391d25..25998ddd90 100644 --- a/beginner_source/introyt/captumyt.py +++ b/beginner_source/introyt/captumyt.py @@ -106,14 +106,7 @@ - Matplotlib version 3.3.4, since Captum currently uses a Matplotlib function whose arguments have been renamed in later versions -To install Captum in an Anaconda or pip virtual environment, use the -appropriate command for your environment below: - -With ``conda``: - -.. code-block:: sh - - conda install pytorch torchvision captum flask-compress matplotlib=3.3.4 -c pytorch +To install Captum, use the appropriate command for your environment below: With ``pip``: @@ -127,51 +120,56 @@ A First Example --------------- - + To start, let’s take a simple, visual example. We’ll start with a ResNet model pretrained on the ImageNet dataset. We’ll get a test input, and use different **Feature Attribution** algorithms to examine how the input images affect the output, and see a helpful visualization of this input attribution map for some test images. - -First, some imports: -""" +First, some imports: -import torch -import torch.nn.functional as F -import torchvision.transforms as transforms -import torchvision.models as models +""" -import captum -from captum.attr import IntegratedGradients, Occlusion, LayerGradCam, LayerAttribution -from captum.attr import visualization as viz +import json import os, sys -import json -import numpy as np -from PIL import Image +import captum import matplotlib.pyplot as plt + +import numpy as np +import torch +import torch.nn.functional as F +import torchvision.models as models +import torchvision.transforms as transforms +from captum.attr import ( + IntegratedGradients, + LayerAttribution, + LayerGradCam, + Occlusion, + visualization as viz, +) from matplotlib.colors import LinearSegmentedColormap +from PIL import Image ######################################################################### # Now we’ll use the TorchVision model library to download a pretrained # ResNet. Since we’re not training, we’ll place it in evaluation mode for # now. -# +# -model = models.resnet18(weights='IMAGENET1K_V1') +model = models.resnet18(weights="IMAGENET1K_V1") model = model.eval() ####################################################################### # The place where you got this interactive notebook should also have an # ``img`` folder with a file ``cat.jpg`` in it. -# +# -test_img = Image.open('img/cat.jpg') +test_img = Image.open("img/cat.jpg") test_img_data = np.asarray(test_img) plt.imshow(test_img_data) plt.show() @@ -183,26 +181,23 @@ # range of values. We’ll also pull in the list of human-readable labels # for the categories our model recognizes - that should be in the ``img`` # folder as well. -# +# # model expects 224x224 3-color image -transform = transforms.Compose([ - transforms.Resize(224), - transforms.CenterCrop(224), - transforms.ToTensor() -]) +transform = transforms.Compose( + [transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor()] +) # standard ImageNet normalization transform_normalize = transforms.Normalize( - mean=[0.485, 0.456, 0.406], - std=[0.229, 0.224, 0.225] - ) + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] +) transformed_img = transform(test_img) input_img = transform_normalize(transformed_img) -input_img = input_img.unsqueeze(0) # the model requires a dummy batch dimension +input_img = input_img.unsqueeze(0) # the model requires a dummy batch dimension -labels_path = 'img/imagenet_class_index.json' +labels_path = "img/imagenet_class_index.json" with open(labels_path) as json_data: idx_to_labels = json.load(json_data) @@ -210,89 +205,96 @@ ###################################################################### # Now, we can ask the question: What does our model think this image # represents? -# +# output = model(input_img) output = F.softmax(output, dim=1) prediction_score, pred_label_idx = torch.topk(output, 1) pred_label_idx.squeeze_() predicted_label = idx_to_labels[str(pred_label_idx.item())][1] -print('Predicted:', predicted_label, '(', prediction_score.squeeze().item(), ')') +print("Predicted:", predicted_label, "(", prediction_score.squeeze().item(), ")") ###################################################################### # We’ve confirmed that ResNet thinks our image of a cat is, in fact, a # cat. But *why* does the model think this is an image of a cat? -# +# # For the answer to that, we turn to Captum. -# +# ########################################################################## # Feature Attribution with Integrated Gradients # --------------------------------------------- -# +# # **Feature attribution** attributes a particular output to features of # the input. It uses a specific input - here, our test image - to generate # a map of the relative importance of each input feature to a particular # output feature. -# +# # `Integrated # Gradients `__ is one of # the feature attribution algorithms available in Captum. Integrated # Gradients assigns an importance score to each input feature by # approximating the integral of the gradients of the model’s output with # respect to the inputs. -# +# # In our case, we’re going to be taking a specific element of the output # vector - that is, the one indicating the model’s confidence in its # chosen category - and use Integrated Gradients to understand what parts # of the input image contributed to this output. -# +# # Once we have the importance map from Integrated Gradients, we’ll use the # visualization tools in Captum to give a helpful representation of the # importance map. Captum’s ``visualize_image_attr()`` function provides a # variety of options for customizing display of your attribution data. # Here, we pass in a custom Matplotlib color map. -# +# # Running the cell with the ``integrated_gradients.attribute()`` call will # usually take a minute or two. -# +# # Initialize the attribution algorithm with the model integrated_gradients = IntegratedGradients(model) -# Ask the algorithm to attribute our output target to -attributions_ig = integrated_gradients.attribute(input_img, target=pred_label_idx, n_steps=200) +# Ask the algorithm to attribute our output target to +attributions_ig = integrated_gradients.attribute( + input_img, target=pred_label_idx, n_steps=200 +) # Show the original image for comparison -_ = viz.visualize_image_attr(None, np.transpose(transformed_img.squeeze().cpu().detach().numpy(), (1,2,0)), - method="original_image", title="Original Image") +_ = viz.visualize_image_attr( + None, + np.transpose(transformed_img.squeeze().cpu().detach().numpy(), (1, 2, 0)), + method="original_image", + title="Original Image", +) -default_cmap = LinearSegmentedColormap.from_list('custom blue', - [(0, '#ffffff'), - (0.25, '#0000ff'), - (1, '#0000ff')], N=256) +default_cmap = LinearSegmentedColormap.from_list( + "custom blue", [(0, "#ffffff"), (0.25, "#0000ff"), (1, "#0000ff")], N=256 +) -_ = viz.visualize_image_attr(np.transpose(attributions_ig.squeeze().cpu().detach().numpy(), (1,2,0)), - np.transpose(transformed_img.squeeze().cpu().detach().numpy(), (1,2,0)), - method='heat_map', - cmap=default_cmap, - show_colorbar=True, - sign='positive', - title='Integrated Gradients') +_ = viz.visualize_image_attr( + np.transpose(attributions_ig.squeeze().cpu().detach().numpy(), (1, 2, 0)), + np.transpose(transformed_img.squeeze().cpu().detach().numpy(), (1, 2, 0)), + method="heat_map", + cmap=default_cmap, + show_colorbar=True, + sign="positive", + title="Integrated Gradients", +) ####################################################################### # In the image above, you should see that Integrated Gradients gives us # the strongest signal around the cat’s location in the image. -# +# ########################################################################## # Feature Attribution with Occlusion # ---------------------------------- -# +# # Gradient-based attribution methods help to understand the model in terms # of directly computing out the output changes with respect to the input. # *Perturbation-based attribution* methods approach this more directly, by @@ -300,7 +302,7 @@ # `Occlusion `__ is one such method. # It involves replacing sections of the input image, and examining the # effect on the output signal. -# +# # Below, we set up Occlusion attribution. Similarly to configuring a # convolutional neural network, you can specify the size of the target # region, and a stride length to determine the spacing of individual @@ -310,42 +312,45 @@ # image with the positive attribution regions. The masking gives a very # instructive view of what regions of our cat photo the model found to be # most “cat-like”. -# +# occlusion = Occlusion(model) -attributions_occ = occlusion.attribute(input_img, - target=pred_label_idx, - strides=(3, 8, 8), - sliding_window_shapes=(3,15, 15), - baselines=0) +attributions_occ = occlusion.attribute( + input_img, + target=pred_label_idx, + strides=(3, 8, 8), + sliding_window_shapes=(3, 15, 15), + baselines=0, +) -_ = viz.visualize_image_attr_multiple(np.transpose(attributions_occ.squeeze().cpu().detach().numpy(), (1,2,0)), - np.transpose(transformed_img.squeeze().cpu().detach().numpy(), (1,2,0)), - ["original_image", "heat_map", "heat_map", "masked_image"], - ["all", "positive", "negative", "positive"], - show_colorbar=True, - titles=["Original", "Positive Attribution", "Negative Attribution", "Masked"], - fig_size=(18, 6) - ) +_ = viz.visualize_image_attr_multiple( + np.transpose(attributions_occ.squeeze().cpu().detach().numpy(), (1, 2, 0)), + np.transpose(transformed_img.squeeze().cpu().detach().numpy(), (1, 2, 0)), + ["original_image", "heat_map", "heat_map", "masked_image"], + ["all", "positive", "negative", "positive"], + show_colorbar=True, + titles=["Original", "Positive Attribution", "Negative Attribution", "Masked"], + fig_size=(18, 6), +) ###################################################################### # Again, we see greater significance placed on the region of the image # that contains the cat. -# +# ######################################################################### # Layer Attribution with Layer GradCAM # ------------------------------------ -# +# # **Layer Attribution** allows you to attribute the activity of hidden # layers within your model to features of your input. Below, we’ll use a # layer attribution algorithm to examine the activity of one of the # convolutional layers within our model. -# +# # GradCAM computes the gradients of the target output with respect to the # given layer, averages for each output channel (dimension 2 of output), # and multiplies the average gradient for each channel by the layer @@ -353,19 +358,21 @@ # designed for convnets; since the activity of convolutional layers often # maps spatially to the input, GradCAM attributions are often upsampled # and used to mask the input. -# +# # Layer attribution is set up similarly to input attribution, except that # in addition to the model, you must specify a hidden layer within the # model that you wish to examine. As above, when we call ``attribute()``, # we specify the target class of interest. -# +# layer_gradcam = LayerGradCam(model, model.layer3[1].conv2) attributions_lgc = layer_gradcam.attribute(input_img, target=pred_label_idx) -_ = viz.visualize_image_attr(attributions_lgc[0].cpu().permute(1,2,0).detach().numpy(), - sign="all", - title="Layer 3 Block 1 Conv 2") +_ = viz.visualize_image_attr( + attributions_lgc[0].cpu().permute(1, 2, 0).detach().numpy(), + sign="all", + title="Layer 3 Block 1 Conv 2", +) ########################################################################## @@ -373,7 +380,7 @@ # `LayerAttribution `__ # base class to upsample this attribution data for comparison to the input # image. -# +# upsamp_attr_lgc = LayerAttribution.interpolate(attributions_lgc, input_img.shape[2:]) @@ -381,53 +388,63 @@ print(upsamp_attr_lgc.shape) print(input_img.shape) -_ = viz.visualize_image_attr_multiple(upsamp_attr_lgc[0].cpu().permute(1,2,0).detach().numpy(), - transformed_img.permute(1,2,0).numpy(), - ["original_image","blended_heat_map","masked_image"], - ["all","positive","positive"], - show_colorbar=True, - titles=["Original", "Positive Attribution", "Masked"], - fig_size=(18, 6)) +_ = viz.visualize_image_attr_multiple( + upsamp_attr_lgc[0].cpu().permute(1, 2, 0).detach().numpy(), + transformed_img.permute(1, 2, 0).numpy(), + ["original_image", "blended_heat_map", "masked_image"], + ["all", "positive", "positive"], + show_colorbar=True, + titles=["Original", "Positive Attribution", "Masked"], + fig_size=(18, 6), +) ####################################################################### # Visualizations such as this can give you novel insights into how your # hidden layers respond to your input. -# +# ########################################################################## # Visualization with Captum Insights # ---------------------------------- -# +# # Captum Insights is an interpretability visualization widget built on top # of Captum to facilitate model understanding. Captum Insights works # across images, text, and other features to help users understand feature # attribution. It allows you to visualize attribution for multiple # input/output pairs, and provides visualization tools for image, text, # and arbitrary data. -# +# # In this section of the notebook, we’ll visualize multiple image # classification inferences with Captum Insights. -# +# # First, let’s gather some image and see what the model thinks of them. # For variety, we’ll take our cat, a teapot, and a trilobite fossil: -# +# -imgs = ['img/cat.jpg', 'img/teapot.jpg', 'img/trilobite.jpg'] +imgs = ["img/cat.jpg", "img/teapot.jpg", "img/trilobite.jpg"] for img in imgs: img = Image.open(img) transformed_img = transform(img) input_img = transform_normalize(transformed_img) - input_img = input_img.unsqueeze(0) # the model requires a dummy batch dimension + input_img = input_img.unsqueeze(0) # the model requires a dummy batch dimension output = model(input_img) output = F.softmax(output, dim=1) prediction_score, pred_label_idx = torch.topk(output, 1) pred_label_idx.squeeze_() predicted_label = idx_to_labels[str(pred_label_idx.item())][1] - print('Predicted:', predicted_label, '/', pred_label_idx.item(), ' (', prediction_score.squeeze().item(), ')') + print( + "Predicted:", + predicted_label, + "/", + pred_label_idx.item(), + " (", + prediction_score.squeeze().item(), + ")", + ) ########################################################################## @@ -437,9 +454,9 @@ # imported below. The ``AttributionVisualizer`` expects batches of data, # so we’ll bring in Captum’s ``Batch`` helper class. And we’ll be looking # at images specifically, so well also import ``ImageFeature``. -# +# # We configure the ``AttributionVisualizer`` with the following arguments: -# +# # - An array of models to be examined (in our case, just the one) # - A scoring function, which allows Captum Insights to pull out the # top-k predictions from a model @@ -447,15 +464,17 @@ # - A list of features to look for - in our case, an ``ImageFeature`` # - A dataset, which is an iterable object returning batches of inputs # and labels - just like you’d use for training -# +# from captum.insights import AttributionVisualizer, Batch from captum.insights.attr_vis.features import ImageFeature + # Baseline is all-zeros input - this may differ depending on your data def baseline_func(input): return input * 0 + # merging our image transforms from above def full_img_transform(input): i = Image.open(input) @@ -478,7 +497,7 @@ def full_img_transform(input): input_transforms=[], ) ], - dataset=[Batch(input_imgs, labels=[282,849,69])] + dataset=[Batch(input_imgs, labels=[282, 849, 69])], ) @@ -488,12 +507,12 @@ def full_img_transform(input): # configure different attribution algorithms in a visual widget, after # which it will compute and display the attributions. *That* process will # take a few minutes. -# +# # Running the cell below will render the Captum Insights widget. You can # then choose attributions methods and their arguments, filter model # responses based on predicted class or prediction correctness, see the # model’s predictions with associated probabilities, and view heatmaps of # the attribution compared with the original image. -# +# visualizer.render() From 1d59f857091824f8907edbe66e6bf0fa4d544430 Mon Sep 17 00:00:00 2001 From: Alanna Burke Date: Thu, 1 May 2025 14:53:52 -0400 Subject: [PATCH 29/32] Removing Conda install references. --- .../introyt/tensorboardyt_tutorial.py | 158 +++++++++--------- 1 file changed, 83 insertions(+), 75 deletions(-) diff --git a/beginner_source/introyt/tensorboardyt_tutorial.py b/beginner_source/introyt/tensorboardyt_tutorial.py index 49d321bd6d..b932ed6eb2 100644 --- a/beginner_source/introyt/tensorboardyt_tutorial.py +++ b/beginner_source/introyt/tensorboardyt_tutorial.py @@ -24,13 +24,6 @@ To run this tutorial, you’ll need to install PyTorch, TorchVision, Matplotlib, and TensorBoard. -With ``conda``: - -.. code-block:: sh - - conda install pytorch torchvision -c pytorch - conda install matplotlib tensorboard - With ``pip``: .. code-block:: sh @@ -43,14 +36,18 @@ Introduction ------------ - + In this notebook, we’ll be training a variant of LeNet-5 against the Fashion-MNIST dataset. Fashion-MNIST is a set of image tiles depicting various garments, with ten class labels indicating the type of garment -depicted. +depicted. """ +# Image display +import matplotlib.pyplot as plt +import numpy as np + # PyTorch model and training necessities import torch import torch.nn as nn @@ -61,10 +58,6 @@ import torchvision import torchvision.transforms as transforms -# Image display -import matplotlib.pyplot as plt -import numpy as np - # PyTorch TensorBoard support from torch.utils.tensorboard import SummaryWriter @@ -79,51 +72,59 @@ ###################################################################### # Showing Images in TensorBoard # ----------------------------- -# +# # Let’s start by adding sample images from our dataset to TensorBoard: -# +# # Gather datasets and prepare them for consumption transform = transforms.Compose( - [transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,))]) + [transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))] +) # Store separate training and validations splits in ./data -training_set = torchvision.datasets.FashionMNIST('./data', - download=True, - train=True, - transform=transform) -validation_set = torchvision.datasets.FashionMNIST('./data', - download=True, - train=False, - transform=transform) - -training_loader = torch.utils.data.DataLoader(training_set, - batch_size=4, - shuffle=True, - num_workers=2) - - -validation_loader = torch.utils.data.DataLoader(validation_set, - batch_size=4, - shuffle=False, - num_workers=2) +training_set = torchvision.datasets.FashionMNIST( + "./data", download=True, train=True, transform=transform +) +validation_set = torchvision.datasets.FashionMNIST( + "./data", download=True, train=False, transform=transform +) + +training_loader = torch.utils.data.DataLoader( + training_set, batch_size=4, shuffle=True, num_workers=2 +) + + +validation_loader = torch.utils.data.DataLoader( + validation_set, batch_size=4, shuffle=False, num_workers=2 +) # Class labels -classes = ('T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', - 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle Boot') +classes = ( + "T-shirt/top", + "Trouser", + "Pullover", + "Dress", + "Coat", + "Sandal", + "Shirt", + "Sneaker", + "Bag", + "Ankle Boot", +) + # Helper function for inline image display def matplotlib_imshow(img, one_channel=False): if one_channel: img = img.mean(dim=0) - img = img / 2 + 0.5 # unnormalize + img = img / 2 + 0.5 # unnormalize npimg = img.numpy() if one_channel: plt.imshow(npimg, cmap="Greys") else: plt.imshow(np.transpose(npimg, (1, 2, 0))) + # Extract a batch of 4 images dataiter = iter(training_loader) images, labels = next(dataiter) @@ -138,14 +139,14 @@ def matplotlib_imshow(img, one_channel=False): # minibatch of our input data. Below, we use the ``add_image()`` call on # ``SummaryWriter`` to log the image for consumption by TensorBoard, and # we also call ``flush()`` to make sure it’s written to disk right away. -# +# # Default log_dir argument is "runs" - but it's good to be specific # torch.utils.tensorboard.SummaryWriter is imported above -writer = SummaryWriter('runs/fashion_mnist_experiment_1') +writer = SummaryWriter("runs/fashion_mnist_experiment_1") # Write image data to TensorBoard log dir -writer.add_image('Four Fashion-MNIST Images', img_grid) +writer.add_image("Four Fashion-MNIST Images", img_grid) writer.flush() # To view, start TensorBoard on the command line with: @@ -157,17 +158,18 @@ def matplotlib_imshow(img, one_channel=False): # If you start TensorBoard at the command line and open it in a new # browser tab (usually at `localhost:6006 `__), you should # see the image grid under the IMAGES tab. -# +# # Graphing Scalars to Visualize Training # -------------------------------------- -# +# # TensorBoard is useful for tracking the progress and efficacy of your # training. Below, we’ll run a training loop, track some metrics, and save # the data for TensorBoard’s consumption. -# +# # Let’s define a model to categorize our image tiles, and an optimizer and # loss function for training: -# +# + class Net(nn.Module): def __init__(self): @@ -187,7 +189,7 @@ def forward(self, x): x = F.relu(self.fc2(x)) x = self.fc3(x) return x - + net = Net() criterion = nn.CrossEntropyLoss() @@ -197,7 +199,7 @@ def forward(self, x): ########################################################################## # Now let’s train a single epoch, and evaluate the training vs. validation # set losses every 1000 batches: -# +# print(len(validation_loader)) for epoch in range(1): # loop over the dataset multiple times @@ -213,44 +215,50 @@ def forward(self, x): optimizer.step() running_loss += loss.item() - if i % 1000 == 999: # Every 1000 mini-batches... - print('Batch {}'.format(i + 1)) + if i % 1000 == 999: # Every 1000 mini-batches... + print("Batch {}".format(i + 1)) # Check against the validation set running_vloss = 0.0 - + # In evaluation mode some model specific operations can be omitted eg. dropout layer - net.train(False) # Switching to evaluation mode, eg. turning off regularisation + net.train( + False + ) # Switching to evaluation mode, eg. turning off regularisation for j, vdata in enumerate(validation_loader, 0): vinputs, vlabels = vdata voutputs = net(vinputs) vloss = criterion(voutputs, vlabels) running_vloss += vloss.item() - net.train(True) # Switching back to training mode, eg. turning on regularisation - + net.train( + True + ) # Switching back to training mode, eg. turning on regularisation + avg_loss = running_loss / 1000 avg_vloss = running_vloss / len(validation_loader) - + # Log the running loss averaged per batch - writer.add_scalars('Training vs. Validation Loss', - { 'Training' : avg_loss, 'Validation' : avg_vloss }, - epoch * len(training_loader) + i) + writer.add_scalars( + "Training vs. Validation Loss", + {"Training": avg_loss, "Validation": avg_vloss}, + epoch * len(training_loader) + i, + ) running_loss = 0.0 -print('Finished Training') +print("Finished Training") writer.flush() ######################################################################### # Switch to your open TensorBoard and have a look at the SCALARS tab. -# +# # Visualizing Your Model # ---------------------- -# +# # TensorBoard can also be used to examine the data flow within your model. # To do this, call the ``add_graph()`` method with a model and sample # input: -# +# # Again, grab a single mini-batch of images dataiter = iter(training_loader) @@ -266,10 +274,10 @@ def forward(self, x): # When you switch over to TensorBoard, you should see a GRAPHS tab. # Double-click the “NET” node to see the layers and data flow within your # model. -# +# # Visualizing Your Dataset with Embeddings # ---------------------------------------- -# +# # The 28-by-28 image tiles we’re using can be modeled as 784-dimensional # vectors (28 \* 28 = 784). It can be instructive to project this to a # lower-dimensional representation. The ``add_embedding()`` method will @@ -277,9 +285,10 @@ def forward(self, x): # and display them as an interactive 3D chart. The ``add_embedding()`` # method does this automatically by projecting to the three dimensions # with highest variance. -# +# # Below, we’ll take a sample of our data, and generate such an embedding: -# +# + # Select a random subset of data and corresponding labels def select_n_random(data, labels, n=100): @@ -288,6 +297,7 @@ def select_n_random(data, labels, n=100): perm = torch.randperm(len(data)) return data[perm][:n], labels[perm][:n] + # Extract a random subset of data images, labels = select_n_random(training_set.data, training_set.targets) @@ -296,9 +306,7 @@ def select_n_random(data, labels, n=100): # log embeddings features = images.view(-1, 28 * 28) -writer.add_embedding(features, - metadata=class_labels, - label_img=images.unsqueeze(1)) +writer.add_embedding(features, metadata=class_labels, label_img=images.unsqueeze(1)) writer.flush() writer.close() @@ -309,19 +317,19 @@ def select_n_random(data, labels, n=100): # zoom the model. Examine it at large and small scales, and see whether # you can spot patterns in the projected data and the clustering of # labels. -# +# # For better visibility, it’s recommended to: -# +# # - Select “label” from the “Color by” drop-down on the left. # - Toggle the Night Mode icon along the top to place the # light-colored images on a dark background. -# +# # Other Resources # --------------- -# +# # For more information, have a look at: -# +# # - PyTorch documentation on `torch.utils.tensorboard.SummaryWriter `__ -# - Tensorboard tutorial content in the `PyTorch.org Tutorials `__ +# - Tensorboard tutorial content in the `PyTorch.org Tutorials `__ # - For more information about TensorBoard, see the `TensorBoard # documentation `__ From 1588c5e0170835d48cf6cf7602a937f723f5732b Mon Sep 17 00:00:00 2001 From: Alanna Burke Date: Thu, 1 May 2025 14:54:06 -0400 Subject: [PATCH 30/32] Removing Conda install references. --- recipes_source/intel_neural_compressor_for_pytorch.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/recipes_source/intel_neural_compressor_for_pytorch.rst b/recipes_source/intel_neural_compressor_for_pytorch.rst index 02ce3d7b37..3c108afd9f 100755 --- a/recipes_source/intel_neural_compressor_for_pytorch.rst +++ b/recipes_source/intel_neural_compressor_for_pytorch.rst @@ -50,9 +50,6 @@ Installation # install nightly version from pip pip install -i https://test.pypi.org/simple/ neural-compressor - # install stable version from from conda - conda install neural-compressor -c conda-forge -c intel - *Supported python versions are 3.6 or 3.7 or 3.8 or 3.9* Usages From 78f9670e5c630417a604b4f3cc4f57aa3d3e3b9d Mon Sep 17 00:00:00 2001 From: Alanna Burke Date: Thu, 1 May 2025 14:54:13 -0400 Subject: [PATCH 31/32] Removing Conda install references. --- .../recipes/tensorboard_with_pytorch.py | 78 +++++++++---------- 1 file changed, 37 insertions(+), 41 deletions(-) diff --git a/recipes_source/recipes/tensorboard_with_pytorch.py b/recipes_source/recipes/tensorboard_with_pytorch.py index 4bceda81ea..d32450ea32 100644 --- a/recipes_source/recipes/tensorboard_with_pytorch.py +++ b/recipes_source/recipes/tensorboard_with_pytorch.py @@ -1,24 +1,17 @@ """ How to use TensorBoard with PyTorch =================================== -TensorBoard is a visualization toolkit for machine learning experimentation. -TensorBoard allows tracking and visualizing metrics such as loss and accuracy, -visualizing the model graph, viewing histograms, displaying images and much more. -In this tutorial we are going to cover TensorBoard installation, +TensorBoard is a visualization toolkit for machine learning experimentation. +TensorBoard allows tracking and visualizing metrics such as loss and accuracy, +visualizing the model graph, viewing histograms, displaying images and much more. +In this tutorial we are going to cover TensorBoard installation, basic usage with PyTorch, and how to visualize data you logged in TensorBoard UI. Installation ---------------------- -PyTorch should be installed to log models and metrics into TensorBoard log -directory. The following command will install PyTorch 1.4+ via -Anaconda (recommended): - -.. code-block:: sh - - $ conda install pytorch torchvision -c pytorch - - -or pip +PyTorch should be installed to log models and metrics into TensorBoard log +directory. The following command will install PyTorch 1.4+ via +pip: .. code-block:: sh @@ -29,31 +22,32 @@ ###################################################################### # Using TensorBoard in PyTorch # ----------------------------- -# -# Let’s now try using TensorBoard with PyTorch! Before logging anything, +# +# Let’s now try using TensorBoard with PyTorch! Before logging anything, # we need to create a ``SummaryWriter`` instance. -# +# import torch from torch.utils.tensorboard import SummaryWriter + writer = SummaryWriter() ###################################################################### # Writer will output to ``./runs/`` directory by default. -# +# ###################################################################### # Log scalars # ----------- -# -# In machine learning, it’s important to understand key metrics such as -# loss and how they change during training. Scalar helps to save -# the loss value of each training step, or the accuracy after each epoch. -# -# To log a scalar value, use -# ``add_scalar(tag, scalar_value, global_step=None, walltime=None)``. -# For example, lets create a simple linear regression training, and +# +# In machine learning, it’s important to understand key metrics such as +# loss and how they change during training. Scalar helps to save +# the loss value of each training step, or the accuracy after each epoch. +# +# To log a scalar value, use +# ``add_scalar(tag, scalar_value, global_step=None, walltime=None)``. +# For example, lets create a simple linear regression training, and # log loss value using ``add_scalar`` # @@ -62,7 +56,8 @@ model = torch.nn.Linear(1, 1) criterion = torch.nn.MSELoss() -optimizer = torch.optim.SGD(model.parameters(), lr = 0.1) +optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + def train_model(iter): for epoch in range(iter): @@ -72,18 +67,19 @@ def train_model(iter): optimizer.zero_grad() loss.backward() optimizer.step() - + + train_model(10) writer.flush() -###################################################################### -# Call ``flush()`` method to make sure that all pending events +###################################################################### +# Call ``flush()`` method to make sure that all pending events # have been written to disk. -# -# See `torch.utils.tensorboard tutorials `_ +# +# See `torch.utils.tensorboard tutorials `_ # to find more TensorBoard visualization types you can log. -# +# # If you do not need the summary writer anymore, call ``close()`` method. # @@ -92,7 +88,7 @@ def train_model(iter): ###################################################################### # Run TensorBoard # ---------------- -# +# # Install TensorBoard through the command line to visualize data you logged # # .. code-block:: sh @@ -100,9 +96,9 @@ def train_model(iter): # pip install tensorboard # # -# Now, start TensorBoard, specifying the root log directory you used above. -# Argument ``logdir`` points to directory where TensorBoard will look to find -# event files that it can display. TensorBoard will recursively walk +# Now, start TensorBoard, specifying the root log directory you used above. +# Argument ``logdir`` points to directory where TensorBoard will look to find +# event files that it can display. TensorBoard will recursively walk # the directory structure rooted at ``logdir``, looking for ``.*tfevents.*`` files. # # .. code-block:: sh @@ -114,9 +110,9 @@ def train_model(iter): # .. image:: ../../_static/img/thumbnails/tensorboard_scalars.png # :scale: 40 % # -# This dashboard shows how the loss and accuracy change with every epoch. -# You can use it to also track training speed, learning rate, and other -# scalar values. It’s helpful to compare these metrics across different +# This dashboard shows how the loss and accuracy change with every epoch. +# You can use it to also track training speed, learning rate, and other +# scalar values. It’s helpful to compare these metrics across different # training runs to improve your model. # @@ -124,7 +120,7 @@ def train_model(iter): ######################################################################## # Learn More # ---------------------------- -# +# # - `torch.utils.tensorboard `_ docs # - `Visualizing models, data, and training with TensorBoard `_ tutorial # From 3f98763f6b17e80fcac8c5703a9bf0dbe9f8ef13 Mon Sep 17 00:00:00 2001 From: Alanna Burke Date: Tue, 6 May 2025 17:37:01 -0400 Subject: [PATCH 32/32] Removing formatting issues. --- .ci/docker/requirements.txt | 6 +- .jenkins/build.sh | 7 +- .jenkins/metadata.json | 3 - .jenkins/validate_tutorials_built.py | 11 +- advanced_source/sharding.rst | 4 - advanced_source/torch_script_custom_ops.rst | 2 + beginner_source/colab.rst | 4 +- beginner_source/introyt/captumyt.py | 50 ++-- .../introyt/tensorboardyt_tutorial.py | 7 - conf.py | 6 - en-wordlist.txt | 11 - intermediate_source/dist_tuto.rst | 1 + intermediate_source/memory_format_tutorial.py | 16 -- intermediate_source/torch_compile_tutorial.py | 17 +- prototype_source/inductor_windows.rst | 5 +- prototype_source/openvino_quantizer.rst | 250 ------------------ prototype_source/prototype_index.rst | 7 - .../intel_neural_compressor_for_pytorch.rst | 3 - 18 files changed, 53 insertions(+), 357 deletions(-) delete mode 100644 prototype_source/openvino_quantizer.rst diff --git a/.ci/docker/requirements.txt b/.ci/docker/requirements.txt index e6802cb045..0e95c62c6b 100644 --- a/.ci/docker/requirements.txt +++ b/.ci/docker/requirements.txt @@ -14,7 +14,7 @@ tqdm==4.66.1 numpy==1.24.4 matplotlib librosa -torch==2.7 +torch==2.6 torchvision torchdata networkx @@ -67,7 +67,7 @@ iopath pygame==2.6.0 pycocotools semilearn==0.3.2 -torchao==0.10.0 +torchao==0.5.0 segment_anything==1.0 torchrec==1.1.0; platform_system == "Linux" -fbgemm-gpu==1.2.0; platform_system == "Linux" +fbgemm-gpu==1.1.0; platform_system == "Linux" diff --git a/.jenkins/build.sh b/.jenkins/build.sh index 58483c168b..8786859d7d 100755 --- a/.jenkins/build.sh +++ b/.jenkins/build.sh @@ -22,10 +22,13 @@ sudo apt-get install -y pandoc #Install PyTorch Nightly for test. # Nightly - pip install --pre torch torchvision torchaudio -f https://download.pytorch.org/whl/nightly/cu102/torch_nightly.html # Install 2.5 to merge all 2.4 PRs - uncomment to install nightly binaries (update the version as needed). +# sudo pip uninstall -y torch torchvision torchaudio torchtext torchdata +# sudo pip3 install torch==2.6.0 torchvision --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu124 # sudo pip uninstall -y fbgemm-gpu torchrec -# sudo pip uninstall -y torch torchvision torchaudio torchtext torchdata torchrl tensordict # sudo pip3 install fbgemm-gpu==1.1.0 torchrec==1.0.0 --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu124 -# pip3 install torch==2.7.0 torchvision torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu126 +sudo pip uninstall -y torch torchvision torchaudio torchtext torchdata torchrl tensordict +pip3 install torch==2.7.0 torchvision torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu126 +#sudo pip uninstall -y fbgemm-gpu # Install two language tokenizers for Translation with TorchText tutorial python -m spacy download en_core_web_sm python -m spacy download de_core_news_sm diff --git a/.jenkins/metadata.json b/.jenkins/metadata.json index 0514266bd6..6e82d054b4 100644 --- a/.jenkins/metadata.json +++ b/.jenkins/metadata.json @@ -1,7 +1,4 @@ { - "recipes_source/torch_logs.py": { - "duration": 0 - }, "intermediate_source/ax_multiobjective_nas_tutorial.py": { "extra_files": ["intermediate_source/mnist_train_nas.py"], "duration": 2000 diff --git a/.jenkins/validate_tutorials_built.py b/.jenkins/validate_tutorials_built.py index 1a1420f52e..3ed1e0c028 100644 --- a/.jenkins/validate_tutorials_built.py +++ b/.jenkins/validate_tutorials_built.py @@ -50,8 +50,15 @@ "intermediate_source/flask_rest_api_tutorial", "intermediate_source/text_to_speech_with_torchaudio", "intermediate_source/tensorboard_profiler_tutorial", # reenable after 2.0 release. - "intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixe - "intermediate_source/memory_format_tutorial" + "advanced_source/semi_structured_sparse", # reenable after 3303 is fixed. + "intermediate_source/mario_rl_tutorial", # reenable after 3302 is fixed + "intermediate_source/reinforcement_ppo", # reenable after 3302 is fixed + "intermediate_source/pinmem_nonblock", # reenable after 3302 is fixed + "intermediate_source/dqn_with_rnn_tutorial", # reenable after 3302 is fixed + "advanced_source/pendulum", # reenable after 3302 is fixed + "advanced_source/coding_ddpg", # reenable after 3302 is fixed + "intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixed + "recipes_source/recipes/reasoning_about_shapes" # reenable after 3326 is fixed ] def tutorial_source_dirs() -> List[Path]: diff --git a/advanced_source/sharding.rst b/advanced_source/sharding.rst index 7dfeeb88bf..22ddd4dd7f 100644 --- a/advanced_source/sharding.rst +++ b/advanced_source/sharding.rst @@ -22,10 +22,6 @@ We highly recommend CUDA when using torchRec. If using CUDA: - cuda >= !sudo chmod +x Miniconda3-py37_4.9.2-Linux-x86_64.sh !sudo bash ./Miniconda3-py37_4.9.2-Linux-x86_64.sh -b -f -p /usr/local -.. code:: python - - # install pytorch with cudatoolkit 11.3 - !sudo conda install pytorch cudatoolkit=11.3 -c pytorch-nightly -y Installing torchRec will also install `FBGEMM `__, a collection of CUDA diff --git a/advanced_source/torch_script_custom_ops.rst b/advanced_source/torch_script_custom_ops.rst index 0a0e6e2bd7..f59aa6e883 100644 --- a/advanced_source/torch_script_custom_ops.rst +++ b/advanced_source/torch_script_custom_ops.rst @@ -190,6 +190,8 @@ Environment setup We need an installation of PyTorch and OpenCV. The easiest and most platform independent way to get both is to via Conda:: +.. these need to be updated + conda install -c pytorch pytorch conda install opencv diff --git a/beginner_source/colab.rst b/beginner_source/colab.rst index e5106a2c81..329f488466 100644 --- a/beginner_source/colab.rst +++ b/beginner_source/colab.rst @@ -11,7 +11,7 @@ PyTorch Version in Google Colab ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Wen you are running a tutorial that requires a version of PyTorch that has -just been released, that version might not be yet available in Google Colab. +jst been released, that version might not be yet available in Google Colab. To check that you have the required ``torch`` and compatible domain libraries installed, run ``!pip list``. @@ -27,7 +27,7 @@ Using Tutorial Data from Google Drive in Colab ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We've added a new feature to tutorials that allows users to open the -notebook associated with a tutorial in Google Colab. You may need to +ntebook associated with a tutorial in Google Colab. You may need to copy data to your Google drive account to get the more complex tutorials to work. diff --git a/beginner_source/introyt/captumyt.py b/beginner_source/introyt/captumyt.py index abf2391d25..60fc1333eb 100644 --- a/beginner_source/introyt/captumyt.py +++ b/beginner_source/introyt/captumyt.py @@ -106,14 +106,7 @@ - Matplotlib version 3.3.4, since Captum currently uses a Matplotlib function whose arguments have been renamed in later versions -To install Captum in an Anaconda or pip virtual environment, use the -appropriate command for your environment below: - -With ``conda``: - -.. code-block:: sh - - conda install pytorch torchvision captum flask-compress matplotlib=3.3.4 -c pytorch +To install Captum, use the appropriate command for your environment below: With ``pip``: @@ -127,51 +120,56 @@ A First Example --------------- - + To start, let’s take a simple, visual example. We’ll start with a ResNet model pretrained on the ImageNet dataset. We’ll get a test input, and use different **Feature Attribution** algorithms to examine how the input images affect the output, and see a helpful visualization of this input attribution map for some test images. - -First, some imports: -""" +First, some imports: -import torch -import torch.nn.functional as F -import torchvision.transforms as transforms -import torchvision.models as models +""" -import captum -from captum.attr import IntegratedGradients, Occlusion, LayerGradCam, LayerAttribution -from captum.attr import visualization as viz +import json import os, sys -import json -import numpy as np -from PIL import Image +import captum import matplotlib.pyplot as plt + +import numpy as np +import torch +import torch.nn.functional as F +import torchvision.models as models +import torchvision.transforms as transforms +from captum.attr import ( + IntegratedGradients, + LayerAttribution, + LayerGradCam, + Occlusion, + visualization as viz, +) from matplotlib.colors import LinearSegmentedColormap +from PIL import Image ######################################################################### # Now we’ll use the TorchVision model library to download a pretrained # ResNet. Since we’re not training, we’ll place it in evaluation mode for # now. -# +# -model = models.resnet18(weights='IMAGENET1K_V1') +model = models.resnet18(weights="IMAGENET1K_V1") model = model.eval() ####################################################################### # The place where you got this interactive notebook should also have an # ``img`` folder with a file ``cat.jpg`` in it. -# +# -test_img = Image.open('img/cat.jpg') +test_img = Image.open("img/cat.jpg") test_img_data = np.asarray(test_img) plt.imshow(test_img_data) plt.show() diff --git a/beginner_source/introyt/tensorboardyt_tutorial.py b/beginner_source/introyt/tensorboardyt_tutorial.py index 49d321bd6d..06d86ed511 100644 --- a/beginner_source/introyt/tensorboardyt_tutorial.py +++ b/beginner_source/introyt/tensorboardyt_tutorial.py @@ -24,13 +24,6 @@ To run this tutorial, you’ll need to install PyTorch, TorchVision, Matplotlib, and TensorBoard. -With ``conda``: - -.. code-block:: sh - - conda install pytorch torchvision -c pytorch - conda install matplotlib tensorboard - With ``pip``: .. code-block:: sh diff --git a/conf.py b/conf.py index 05cfa11ca1..a12a05d21c 100644 --- a/conf.py +++ b/conf.py @@ -99,16 +99,10 @@ def reset_seeds(gallery_conf, fname): torch.cuda.empty_cache() - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False - torch._dynamo.reset() - torch._inductor.config.force_disable_caches = True torch.manual_seed(42) torch.set_default_device(None) random.seed(10) numpy.random.seed(10) - torch.set_grad_enabled(True) - gc.collect() sphinx_gallery_conf = { diff --git a/en-wordlist.txt b/en-wordlist.txt index baf75d75ac..6a794e7786 100644 --- a/en-wordlist.txt +++ b/en-wordlist.txt @@ -698,14 +698,3 @@ TorchServe Inductor’s onwards recompilations -BiasCorrection -ELU -GELU -NNCF -OpenVINO -OpenVINOQuantizer -PReLU -Quantizer -SmoothQuant -quantizer -quantizers \ No newline at end of file diff --git a/intermediate_source/dist_tuto.rst b/intermediate_source/dist_tuto.rst index 1b622aa277..080bffc57f 100644 --- a/intermediate_source/dist_tuto.rst +++ b/intermediate_source/dist_tuto.rst @@ -523,6 +523,7 @@ for an available MPI implementation. The following steps install the MPI backend, by installing PyTorch `from source `__. +.. needs an update 1. Create and activate your Anaconda environment, install all the pre-requisites following `the guide `__, but do diff --git a/intermediate_source/memory_format_tutorial.py b/intermediate_source/memory_format_tutorial.py index 4ed971b91a..26bc5c9d53 100644 --- a/intermediate_source/memory_format_tutorial.py +++ b/intermediate_source/memory_format_tutorial.py @@ -376,22 +376,6 @@ def attribute(m): for (k, v) in attrs.items(): setattr(m, k, v) -import gc -import sys - -torch._dynamo.reset() -if torch.cuda.is_available(): - torch.cuda.empty_cache() - -gc.collect() - -# Clear any references to the wrapper functions -del old_attrs -del contains_cl -del print_inputs -del check_wrapper -del attribute - ###################################################################### # Work to do # ---------- diff --git a/intermediate_source/torch_compile_tutorial.py b/intermediate_source/torch_compile_tutorial.py index de31af04dc..a5c1b345e9 100644 --- a/intermediate_source/torch_compile_tutorial.py +++ b/intermediate_source/torch_compile_tutorial.py @@ -101,11 +101,8 @@ def forward(self, x): return torch.nn.functional.relu(self.lin(x)) mod = MyModule() -mod.compile() -print(mod(t)) -## or: -# opt_mod = torch.compile(mod) -# print(opt_mod(t)) +opt_mod = torch.compile(mod) +print(opt_mod(t)) ###################################################################### # torch.compile and Nested Calls @@ -138,8 +135,8 @@ def forward(self, x): return torch.nn.functional.relu(self.outer_lin(x)) outer_mod = OuterModule() -outer_mod.compile() -print(outer_mod(t)) +opt_outer_mod = torch.compile(outer_mod) +print(opt_outer_mod(t)) ###################################################################### # We can also disable some functions from being compiled by using @@ -200,12 +197,6 @@ def outer_function(): # 4. **Compile Leaf Functions First:** In complex models with multiple nested # functions and modules, start by compiling the leaf functions or modules first. # For more information see `TorchDynamo APIs for fine-grained tracing `__. -# -# 5. **Prefer ``mod.compile()`` over ``torch.compile(mod)``:** Avoids ``_orig_`` prefix issues in ``state_dict``. -# -# 6. **Use ``fullgraph=True`` to catch graph breaks:** Helps ensure end-to-end compilation, maximizing speedup -# and compatibility with ``torch.export``. - ###################################################################### # Demonstrating Speedups diff --git a/prototype_source/inductor_windows.rst b/prototype_source/inductor_windows.rst index 871cc48a33..1844e6d273 100644 --- a/prototype_source/inductor_windows.rst +++ b/prototype_source/inductor_windows.rst @@ -22,9 +22,10 @@ Install a Compiler C++ compiler is required for TorchInductor optimization, let's take Microsoft Visual C++ (MSVC) as an example. -#. Download and install `MSVC `_. +1. Download and install `MSVC `_. -#. During Installation, select **Workloads** and then **Desktop & Mobile**. Select a checkmark on **Desktop Development with C++** and install. +1. During Installation, select **Workloads** and then **Desktop & Mobile**. +1. Select a checkmark on **Desktop Development with C++** and install. .. image:: ../_static/img/install_msvc.png diff --git a/prototype_source/openvino_quantizer.rst b/prototype_source/openvino_quantizer.rst deleted file mode 100644 index 9412c77220..0000000000 --- a/prototype_source/openvino_quantizer.rst +++ /dev/null @@ -1,250 +0,0 @@ -PyTorch 2 Export Quantization for OpenVINO torch.compile Backend -=========================================================================== - -**Authors**: `Daniil Lyakhov `_, `Aamir Nazir `_, `Alexander Suslov `_, `Yamini Nimmagadda `_, `Alexander Kozlov `_ - -Prerequisites --------------- -- `PyTorch 2 Export Post Training Quantization `_ -- `How to Write a Quantizer for PyTorch 2 Export Quantization `_ - -Introduction --------------- - -.. note:: - - This is an experimental feature, the quantization API is subject to change. - -This tutorial demonstrates how to use ``OpenVINOQuantizer`` from `Neural Network Compression Framework (NNCF) `_ in PyTorch 2 Export Quantization flow to generate a quantized model customized for the `OpenVINO torch.compile backend `_ and explains how to lower the quantized model into the `OpenVINO `_ representation. -``OpenVINOQuantizer`` unlocks the full potential of low-precision OpenVINO kernels due to the placement of quantizers designed specifically for the OpenVINO. - -The PyTorch 2 export quantization flow uses ``torch.export`` to capture the model into a graph and performs quantization transformations on top of the ATen graph. -This approach is expected to have significantly higher model coverage, improved flexibility, and a simplified UX. -OpenVINO backend compiles the FX Graph generated by TorchDynamo into an optimized OpenVINO model. - -The quantization flow mainly includes four steps: - -- Step 1: Capture the FX Graph from the eager Model based on the `torch export mechanism `_. -- Step 2: Apply the PyTorch 2 Export Quantization flow with OpenVINOQuantizer based on the captured FX Graph. -- Step 3: Lower the quantized model into OpenVINO representation with the `torch.compile `_ API. -- Optional step 4: : Improve quantized model metrics via `quantize_pt2e `_ method. - -The high-level architecture of this flow could look like this: - -:: - - float_model(Python) Example Input - \ / - \ / - —-------------------------------------------------------- - | export | - —-------------------------------------------------------- - | - FX Graph in ATen - | - | OpenVINOQuantizer - | / - —-------------------------------------------------------- - | prepare_pt2e | - | | | - | Calibrate - | | | - | convert_pt2e | - —-------------------------------------------------------- - | - Quantized Model - | - —-------------------------------------------------------- - | Lower into Inductor | - —-------------------------------------------------------- - | - OpenVINO model - -Post Training Quantization ----------------------------- - -Now, we will walk you through a step-by-step tutorial for how to use it with `torchvision resnet18 model `_ -for post training quantization. - -Prerequisite: OpenVINO and NNCF installation -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -OpenVINO and NNCF could be easily installed via `pip distribution `_: - -.. code-block:: bash - - pip install -U pip - pip install openvino, nncf - - -1. Capture FX Graph -^^^^^^^^^^^^^^^^^^^^^ - -We will start by performing the necessary imports, capturing the FX Graph from the eager module. - -.. code-block:: python - - import copy - import openvino.torch - import torch - import torchvision.models as models - from torch.ao.quantization.quantize_pt2e import convert_pt2e - from torch.ao.quantization.quantize_pt2e import prepare_pt2e - - import nncf.torch - - # Create the Eager Model - model_name = "resnet18" - model = models.__dict__[model_name](pretrained=True) - - # Set the model to eval mode - model = model.eval() - - # Create the data, using the dummy data here as an example - traced_bs = 50 - x = torch.randn(traced_bs, 3, 224, 224) - example_inputs = (x,) - - # Capture the FX Graph to be quantized - with torch.no_grad(), nncf.torch.disable_patching(): - exported_model = torch.export.export(model, example_inputs).module() - - - -2. Apply Quantization -^^^^^^^^^^^^^^^^^^^^^^^ - -After we capture the FX Module to be quantized, we will import the OpenVINOQuantizer. - - -.. code-block:: python - - from nncf.experimental.torch.fx import OpenVINOQuantizer - - quantizer = OpenVINOQuantizer() - -``OpenVINOQuantizer`` has several optional parameters that allow tuning the quantization process to get a more accurate model. -Below is the list of essential parameters and their description: - - -* ``preset`` - defines quantization scheme for the model. Two types of presets are available: - - * ``PERFORMANCE`` (default) - defines symmetric quantization of weights and activations - - * ``MIXED`` - weights are quantized with symmetric quantization and the activations are quantized with asymmetric quantization. This preset is recommended for models with non-ReLU and asymmetric activation functions, e.g. ELU, PReLU, GELU, etc. - - .. code-block:: python - - OpenVINOQuantizer(preset=nncf.QuantizationPreset.MIXED) - -* ``model_type`` - used to specify quantization scheme required for specific type of the model. Transformer is the only supported special quantization scheme to preserve accuracy after quantization of Transformer models (BERT, Llama, etc.). None is default, i.e. no specific scheme is defined. - - .. code-block:: python - - OpenVINOQuantizer(model_type=nncf.ModelType.Transformer) - -* ``ignored_scope`` - this parameter can be used to exclude some layers from the quantization process to preserve the model accuracy. For example, when you want to exclude the last layer of the model from quantization. Below are some examples of how to use this parameter: - - .. code-block:: python - - #Exclude by layer name: - names = ['layer_1', 'layer_2', 'layer_3'] - OpenVINOQuantizer(ignored_scope=nncf.IgnoredScope(names=names)) - - #Exclude by layer type: - types = ['Conv2d', 'Linear'] - OpenVINOQuantizer(ignored_scope=nncf.IgnoredScope(types=types)) - - #Exclude by regular expression: - regex = '.*layer_.*' - OpenVINOQuantizer(ignored_scope=nncf.IgnoredScope(patterns=regex)) - - #Exclude by subgraphs: - # In this case, all nodes along all simple paths in the graph - # from input to output nodes will be excluded from the quantization process. - subgraph = nncf.Subgraph(inputs=['layer_1', 'layer_2'], outputs=['layer_3']) - OpenVINOQuantizer(ignored_scope=nncf.IgnoredScope(subgraphs=[subgraph])) - - -* ``target_device`` - defines the target device, the specificity of which will be taken into account during optimization. The following values are supported: ``ANY`` (default), ``CPU``, ``CPU_SPR``, ``GPU``, and ``NPU``. - - .. code-block:: python - - OpenVINOQuantizer(target_device=nncf.TargetDevice.CPU) - -For further details on `OpenVINOQuantizer` please see the `documentation `_. - -After we import the backend-specific Quantizer, we will prepare the model for post-training quantization. -``prepare_pt2e`` folds BatchNorm operators into preceding Conv2d operators, and inserts observers in appropriate places in the model. - -.. code-block:: python - - prepared_model = prepare_pt2e(exported_model, quantizer) - -Now, we will calibrate the ``prepared_model`` after the observers are inserted in the model. - -.. code-block:: python - - # We use the dummy data as an example here - prepared_model(*example_inputs) - -Finally, we will convert the calibrated Model to a quantized Model. ``convert_pt2e`` takes a calibrated model and produces a quantized model. - -.. code-block:: python - - quantized_model = convert_pt2e(prepared_model, fold_quantize=False) - -After these steps, we finished running the quantization flow, and we will get the quantized model. - - -3. Lower into OpenVINO representation -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -After that the FX Graph can utilize OpenVINO optimizations using `torch.compile(…, backend=”openvino”) `_ functionality. - -.. code-block:: python - - with torch.no_grad(), nncf.torch.disable_patching(): - optimized_model = torch.compile(quantized_model, backend="openvino") - - # Running some benchmark - optimized_model(*example_inputs) - - - -The optimized model is using low-level kernels designed specifically for Intel CPU. -This should significantly speed up inference time in comparison with the eager model. - -4. Optional: Improve quantized model metrics -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -NNCF implements advanced quantization algorithms like `SmoothQuant `_ and `BiasCorrection `_, which help -to improve the quantized model metrics while minimizing the output discrepancies between the original and compressed models. -These advanced NNCF algorithms can be accessed via the NNCF `quantize_pt2e` API: - -.. code-block:: python - - from nncf.experimental.torch.fx import quantize_pt2e - - calibration_loader = torch.utils.data.DataLoader(...) - - - def transform_fn(data_item): - images, _ = data_item - return images - - - calibration_dataset = nncf.Dataset(calibration_loader, transform_fn) - quantized_model = quantize_pt2e( - exported_model, quantizer, calibration_dataset, smooth_quant=True, fast_bias_correction=False - ) - - -For further details, please see the `documentation `_ -and a complete `example on Resnet18 quantization `_. - -Conclusion ------------- - -This tutorial introduces how to use torch.compile with the OpenVINO backend and the OpenVINO quantizer. -For more details on NNCF and the NNCF Quantization Flow for PyTorch models, refer to the `NNCF Quantization Guide `_. -For additional information, check out the `OpenVINO Deployment via torch.compile Documentation `_. diff --git a/prototype_source/prototype_index.rst b/prototype_source/prototype_index.rst index 5d6a1b5ea9..a0f7706c61 100644 --- a/prototype_source/prototype_index.rst +++ b/prototype_source/prototype_index.rst @@ -96,13 +96,6 @@ Prototype features are not available as part of binary distributions like PyPI o :link: ../prototype/pt2e_quant_x86_inductor.html :tags: Quantization -.. customcarditem:: - :header: PyTorch 2 Export Quantization for OpenVINO torch.compile Backend - :card_description: Learn how to use PT2 Export Quantization with OpenVINO torch.compile Backend. - :image: ../_static/img/thumbnails/cropped/generic-pytorch-logo.png - :link: ../prototype/openvino_quantizer.html - :tags: Quantization - .. customcarditem:: :header: PyTorch 2 Export Quantization with Intel GPU Backend through Inductor :card_description: Learn how to use PT2 Export Quantization with Intel GPU Backend through Inductor. diff --git a/recipes_source/intel_neural_compressor_for_pytorch.rst b/recipes_source/intel_neural_compressor_for_pytorch.rst index 02ce3d7b37..3c108afd9f 100755 --- a/recipes_source/intel_neural_compressor_for_pytorch.rst +++ b/recipes_source/intel_neural_compressor_for_pytorch.rst @@ -50,9 +50,6 @@ Installation # install nightly version from pip pip install -i https://test.pypi.org/simple/ neural-compressor - # install stable version from from conda - conda install neural-compressor -c conda-forge -c intel - *Supported python versions are 3.6 or 3.7 or 3.8 or 3.9* Usages