From 8327283285f2127d951c9f8e9c20a30a47444ff4 Mon Sep 17 00:00:00 2001 From: Frank Schneider Date: Wed, 15 Jan 2025 13:07:55 +0100 Subject: [PATCH 01/14] Create equivalent pyproject toml --- pyproject.toml | 327 +++++++++++++++++++++++++++++++++++++++++++++++++ setup.cfg | 314 ----------------------------------------------- setup.py | 4 - 3 files changed, 327 insertions(+), 318 deletions(-) create mode 100644 pyproject.toml delete mode 100644 setup.cfg delete mode 100644 setup.py diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..10fd4b730 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,327 @@ +############################################################################### +# MLCommons Algorithmic Efficiency. # +############################################################################### + +[project] +name = "algorithmic_efficiency" +dynamic = ["version"] +description = "Codebase for the AlgoPerf: Training Algorithms benchmark" +authors = [ + { name = "MLCommons Algorithms Working Group", email = "algorithms@mlcommons.org" }, +] +license = { text = "Apache 2.0" } +readme = "README.md" +requires-python = ">=3.8" +keywords = [ + "algoperf", + "algorithmic-efficiency", + "machine-learning", + "deep-learning", + "optimization", + "benchmarking", + "training-methods", +] +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Topic :: Scientific/Engineering :: Artificial Intelligence", +] +dependencies = [ + "absl-py==1.4.0", + "networkx==3.1", + "docker==7.0.0", + "numpy>=1.23", + "pandas>=2.0.1", + "tensorflow==2.12.0", + "tensorflow-datasets==4.9.2", + "tensorflow-probability==0.20.0", + "tensorflow-addons==0.20.0", + "gputil==1.4.0", + "psutil==5.9.5", + "clu==0.0.7", + "matplotlib>=3.7.2", + "tabulate==0.9.0", +] + +[build-system] +requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2"] +build-backend = "setuptools.build_meta" + +[tool.setuptools] +packages = ["algorithmic_efficiency"] +py-modules = ["submission_runner"] +include-package-data = true +zip-safe = false + +[tool.setuptools.dynamic] +version = { attr = "algorithmic_efficiency.__version__" } + +############################################################################### +# (Optional) Dependencies # +############################################################################### +[project.optional-dependencies] +# All workloads +full = [ + "algorithmic_efficiency[criteo1tb,fastmri,ogbg,librispeech_conformer,wmt]", +] +# All workloads plus development dependencies +full_dev = ["algorithmic_efficiency[full,dev]"] +# Dependencies for developing the package +dev = [ + "isort==5.12.0", + "pylint==2.17.4", + "pytest==7.3.1", + "yapf==0.33.0", + "pre-commit==3.3.1", +] + +# Workloads +criteo1tb = ["scikit-learn==1.2.2"] +fastmri = ["h5py==3.8.0", "scikit-image==0.20.0"] +ogbg = ["jraph==0.0.6.dev0", "scikit-learn==1.2.2"] +librispeech_conformer = [ + "sentencepiece==0.1.99", + "tensorflow-text==2.12.1", + "pydub==0.25.1", +] +wmt = ["sentencepiece==0.1.99", "tensorflow-text==2.12.1", "sacrebleu==1.3.1"] + +# Frameworks +jax_core_deps = [ + "flax==0.6.10", + "optax==0.1.5", + # Todo(kasimbeg): verify if this is necessary after we upgrade jax. + "chex==0.1.7", + "ml_dtypes==0.2.0", + "protobuf==4.25.3", +] +jax_cpu = [ + "jax==0.4.10", + "jaxlib==0.4.10", + "algorithmic_efficiency[jax_core_deps]", +] +jax_gpu = [ + "jax==0.4.10", + "jaxlib==0.4.10+cuda12.cudnn88", + "algorithmic_efficiency[jax_core_deps]", +] +pytorch_cpu = ["torch==2.1.0", "torchvision==0.16.0"] +pytorch_gpu = [ + "torch==2.1.0", + "torchvision==0.16.0", +] # Note: omit the cuda suffix and installing from the appropriate wheel will result in using locally installed CUDA. +wandb = ["wandb==0.16.5"] + +############################################################################### +# Linting Configurations # +############################################################################### + +# yapf configuration +[tool.yapf] +based_on_style = "yapf" +each_dict_entry_on_separate_line = false +split_all_top_level_comma_separated_values = true + +# isort configuration +[tool.isort] +profile = "google" + +# pylint configuration +[tool.pylint.MASTER] +persistent = false +ignore = "get_references_web.py,get_references_web_single_group.py" + +[tool.pylint.REPORTS] +reports = false +msg-template = "{msg_id}:{line:3} {obj}: {msg} [{symbol}]" + +[tool.pylint.MESSAGES_CONTROL] +enable = "indexing-exception,old-raise-syntax" + +[tool.pylint.BASIC] +# Required attributes for module, separated by a comma +#required-attributes= +# Regular expression which should only match the name +# of functions or classes which do not require a docstring. +no-docstring-rgx = "(__.*__|main)" +# Min length in lines of a function that requires a docstring. +docstring-min-length = 10 +# Regular expression which should only match correct module names. The +# leading underscore is sanctioned for private modules by Google's style +# guide. +# +# There are exceptions to the basic rule (_?[a-z][a-z0-9_]*) to cover +# requirements of Python's module system. +module-rgx = "^(_?[a-z][a-z0-9_]*)|__init__$" +# Regular expression which should only match correct module level names +const-rgx = "^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$" +# Regular expression which should only match correct class attribute +class-attribute-rgx = "^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$" +# Regular expression which should only match correct class names +class-rgx = "^_?[A-Z][a-zA-Z0-9]*$" +# Regular expression which should only match correct function names. +# 'camel_case' and 'snake_case' group names are used for consistency of naming +# styles across functions and methods. +function-rgx = "^(?:(?PsetUp|tearDown|setUpModule|tearDownModule)|(?P_?[A-Z][a-zA-Z0-9]*)|(?P_?[a-z][a-z0-9_]*))$" +# Regular expression which should only match correct method names. +# 'camel_case' and 'snake_case' group names are used for consistency of naming +# styles across functions and methods. 'exempt' indicates a name which is +# consistent with all naming styles. +method-rgx = "(?x)^(?:(?P_[a-z0-9_]+__|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|_testDatasetSize|setUpClass|test|assert)_*[A-Z0-9][a-zA-Z0-9_]*|(?:test|assert)_*[A-Z0-9][a-zA-Z0-9_]*|next|(?P_{0,2}[A-Z][a-zA-Z0-9_]*)|(?P_{0,2}[a-z][a-z0-9_]*))$" +# Regular expression which should only match correct instance attribute names +attr-rgx = "^_{0,2}[a-z][a-z0-9_]*$" +# Regular expression which should only match correct argument names +argument-rgx = "^[a-z][a-z0-9_]*$" +# Regular expression which should only match correct variable names +variable-rgx = "^[a-z][a-z0-9_]*$" +# Regular expression which should only match correct list comprehension / +# generator expression variable names +inlinevar-rgx = "^[a-z][a-z0-9_]*$" +# Good variable names which should always be accepted, separated by a comma +good-names = "main,_" +# Bad variable names which should always be refused, separated by a comma +bad-names = "" +# List of builtins function names that should not be used, separated by a comma +#bad-functions=input,apply,reduce +# List of decorators that define properties, such as abc.abstractproperty. +property-classes = "abc.abstractproperty" + +[tool.pylint.typecheck] +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members = true + +# List of decorators that create context managers from functions, such as +# contextlib.contextmanager. +contextmanager-decorators = [ + "contextlib.contextmanager", + "contextlib2.contextmanager", +] + +[tool.pylint.VARIABLES] +# Tells whether we should check for unused import in __init__ files. +init-import = false + +# A regular expression matching names used for dummy variables (i.e. not used). +dummy-variables-rgx = "^\\*{0,2}(_$|unused_|dummy_)" + +# List of additional names supposed to be defined in builtins. +additional-builtins = [] + +[tool.pylint.CLASSES] +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods = ["__init__", "__new__", "setUp"] + +# Valid names for the first argument to a class method. +valid-classmethod-first-arg = ["cls", "class_"] + +[tool.pylint.EXCEPTIONS] +overgeneral-exceptions = [ + "builtins.StandardError", + "builtins.Exception", + "builtins.BaseException", +] + +[tool.pylint.IMPORTS] +# Deprecated modules which should not be used, separated by a comma +deprecated-modules = ["regsub", "TERMIOS", "Bastion", "rexec", "sets"] + +[tool.pylint.FORMAT] +# List of checkers and warnings to disable. +disable = [ + "abstract-method", + "access-member-before-definition", + "arguments-differ", + "assignment-from-no-return", + "attribute-defined-outside-init", + "bad-mcs-classmethod-argument", + "bad-option-value", + "c-extension-no-member", + "consider-merging-isinstance", + "consider-using-dict-comprehension", + "consider-using-enumerate", + "consider-using-in", + "consider-using-set-comprehension", + "consider-using-ternary", + "deprecated-method", + "design", + "file-ignored", + "fixme", + "global-statement", + "import-error", + "inconsistent-return-statements", + "invalid-unary-operand-type", + "len-as-condition", + "locally-disabled", + "locally-enabled", + "misplaced-comparison-constant", + "missing-docstring", + "multiple-imports", + "no-else-return", + "no-member", + "no-name-in-module", + "no-self-use", + "no-value-for-parameter", + "not-an-iterable", + "not-context-manager", + "pointless-except", + "protected-access", + "redefined-argument-from-local", + "signature-differs", + "similarities", + "simplifiable-if-expression", + "star-args", + "super-init-not-called", + "suppressed-message", + "too-many-function-args", + "trailing-comma-tuple", + "trailing-newlines", + "ungrouped-imports", + "unnecessary-pass", + "unsubscriptable-object", + "unused-argument", + "useless-object-inheritance", + "useless-return", + "useless-suppression", + "wrong-import-order", + "wrong-import-position", + "unneeded-not", + "unexpected-keyword-arg", + "redundant-keyword-arg", + "unspecified-encoding", + "logging-fstring-interpolation", + "consider-using-f-string", + "use-dict-literal", +] +# Maximum number of characters on a single line. +max-line-length = 80 +ignore-long-lines = "(?x)(^\\s*(import|from)\\s|^\\s*(\\#\\ )??$|^[a-zA-Z_][a-zA-Z0-9_]*\\s*=\\s*('[^']\\S+'|\"[^\"]\\S+\"))" +# Maximum number of lines in a module +max-module-lines = 99999 +# String used as indentation unit. We differ from PEP8's normal 4 spaces. +indent-string = ' ' +single-line-if-stmt = true +# Do not warn about multiple statements on a single line for constructs like +# if test: stmt +[tool.pylint.LOGGING] +logging-modules = "logging,absl.logging" +# Add logging modules. +[tool.pylint.MISCELLANEOUS] +# Maximum line length for lambdas +#short-func-length=1 +# List of module members that should be marked as deprecated. +# All of the string functions are listed in 4.1.4 Deprecated string functions +# in the Python 2.4 docs. +#deprecated-members=string.atof,string.atoi,string.atol,string.capitalize,string.expandtabs,string.find,string.rfind,string.index,string.rindex,string.count,string.lower,string.split,string.rsplit,string.splitfields,string.join,string.joinfields,string.lstrip,string.rstrip,string.strip,string.swapcase,string.translate,string.upper,string.ljust,string.rjust,string.center,string.zfill,string.replace,sys.exitfunc,sys.maxint +# List of exceptions that do not need to be mentioned in the Raises section of +# a docstring. +#ignore-exceptions=AssertionError,NotImplementedError,StopIteration,TypeError +# Number of spaces of indent required when the last token on the preceding line +# is an open (, [, or {. +indent-after-paren = 4 diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 4afefd164..000000000 --- a/setup.cfg +++ /dev/null @@ -1,314 +0,0 @@ -############################################################################### -# MLCommons Algorithmic Efficiency. # -############################################################################### - -[metadata] -name = algorithmic_efficiency -version = attr: algorithmic_efficiency.__version__ -description = MLCommons Algorithmic Efficiency -url = https://github.com/mlcommons/algorithmic-efficiency -author = MLCommons Algorithmic Efficiency -author_email = algorithms@mlcommons.org -license = Apache 2.0 -long_description = file: README.md -long_description_content_type = text/markdown -keywords = algorithmic-efficiency, machine-learning, deep-learning, - optimization, benchmarking, training-methods -platforms = any -classifiers = - Development Status :: 3 - Alpha - Intended Audience :: Developers - Intended Audience :: Science/Research - License :: OSI Approved :: Apache Software License - Operating System :: OS Independent - Programming Language :: Python :: 3.8 - Programming Language :: Python :: 3.9 - Programming Language :: Python :: 3.10 - Topic :: Scientific/Engineering :: Artificial Intelligence - -[options] -zip_safe = False -packages = find: -include_package_data = True -setup_requires = - setuptools_scm -# Dependencies of the project: -install_requires = - absl-py==1.4.0 - # Pin to avoid unpinned install in dependencies that requires Python>=3.9. - networkx==3.1 - docker==7.0.0 - numpy>=1.23 - pandas>=2.0.1 - tensorflow==2.12.0 - tensorflow-datasets==4.9.2 - tensorflow-probability==0.20.0 - tensorflow-addons==0.20.0 - gputil==1.4.0 - psutil==5.9.5 - clu==0.0.7 - matplotlib>=3.7.2 - tabulate==0.9.0 -python_requires = >=3.8 - - -############################################################################### -# Additional Dependencies # -############################################################################### - -[options.extras_require] -# Add extra dependencies, e.g. to run tests or for the different frameworks. -# Use as `pip install -e '.[jax_gpu]' -f https://storage.googleapis.com/jax-releases/jax_releases.html` -# or `pip install -e '.[dev]'` - -# Bundled installs # - -# All workloads -full = - %(criteo1tb)s - %(fastmri)s - %(ogbg)s - %(librispeech_conformer)s - %(wmt)s - -# All workloads plus development dependencies -full_dev = - %(full)s - %(dev)s - - -# Dependencies for developing the package -dev = - isort==5.12.0 - pylint==2.17.4 - pytest==7.3.1 - yapf==0.33.0 - pre-commit==3.3.1 - -# Workloads # -criteo1tb = - scikit-learn==1.2.2 - -fastmri = - h5py==3.8.0 - scikit-image==0.20.0 - -ogbg = - jraph==0.0.6.dev0 - scikit-learn==1.2.2 - -librispeech_conformer = - sentencepiece==0.1.99 - tensorflow-text==2.12.1 - pydub==0.25.1 - -wmt = - sentencepiece==0.1.99 - tensorflow-text==2.12.1 - sacrebleu==1.3.1 - -# Frameworks # - -# JAX Core -jax_core_deps = - flax==0.6.10 - optax==0.1.5 - # Fix chex (optax dependency) version. - # Not fixing it can raise dependency issues with our - # jax version. - # Todo(kasimbeg): verify if this is necessary after we - # upgrade jax. - chex==0.1.7 - ml_dtypes==0.2.0 - protobuf==4.25.3 - - -# JAX CPU -jax_cpu = - jax==0.4.10 - jaxlib==0.4.10 - %(jax_core_deps)s - -# JAX GPU -# Note this installs both jax and jaxlib. -jax_gpu = - jax==0.4.10 - jaxlib==0.4.10+cuda12.cudnn88 - %(jax_core_deps)s - -# PyTorch CPU -pytorch_cpu = - torch==2.1.0 - torchvision==0.16.0 - -# PyTorch GPU -# Note: omit the cuda suffix and installing from the appropriate -# wheel will result in using locally installed CUDA. -pytorch_gpu = - torch==2.1.0 - torchvision==0.16.0 - -# wandb -wandb = - wandb==0.16.5 - -############################################################################### -# Linting Configurations # -############################################################################### - -# yapf configuration -[yapf] -based_on_style = yapf -each_dict_entry_on_separate_line = false -split_all_top_level_comma_separated_values = true - - -# isort configuration -[isort] -profile=google - - -# pylint configuration -[pylint.MASTER] -persistent=no # Pickle collected data for later comparisons. -#cache-size=500 # Set the cache size for astng objects. -# Ignore Py3 files -ignore=get_references_web.py,get_references_web_single_group.py -[pylint.REPORTS] -# Set the output format. -# output-format=sorted-text -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". -#files-output=no -# Tells whether to display a full report or only the messages. -reports=no -# Disable the report(s) with the given id(s). -#disable-report=R0001,R0002,R0003,R0004,R0101,R0102,R0201,R0202,R0220,R0401,R0402,R0701,R0801,R0901,R0902,R0903,R0904,R0911,R0912,R0913,R0914,R0915,R0921,R0922,R0923 -# Error message template (continued on second line) -msg-template={msg_id}:{line:3} {obj}: {msg} [{symbol}] -[pylint.'MESSAGES CONTROL'] -# List of checkers and warnings to enable. -enable=indexing-exception,old-raise-syntax - - -[pylint.BASIC] -# Required attributes for module, separated by a comma -#required-attributes= -# Regular expression which should only match the name -# of functions or classes which do not require a docstring. -no-docstring-rgx=(__.*__|main) -# Min length in lines of a function that requires a docstring. -docstring-min-length=10 -# Regular expression which should only match correct module names. The -# leading underscore is sanctioned for private modules by Google's style -# guide. -# -# There are exceptions to the basic rule (_?[a-z][a-z0-9_]*) to cover -# requirements of Python's module system. -module-rgx=^(_?[a-z][a-z0-9_]*)|__init__$ -# Regular expression which should only match correct module level names -const-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ -# Regular expression which should only match correct class attribute -class-attribute-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ -# Regular expression which should only match correct class names -class-rgx=^_?[A-Z][a-zA-Z0-9]*$ -# Regular expression which should only match correct function names. -# 'camel_case' and 'snake_case' group names are used for consistency of naming -# styles across functions and methods. -function-rgx=^(?:(?PsetUp|tearDown|setUpModule|tearDownModule)|(?P_?[A-Z][a-zA-Z0-9]*)|(?P_?[a-z][a-z0-9_]*))$ -# Regular expression which should only match correct method names. -# 'camel_case' and 'snake_case' group names are used for consistency of naming -# styles across functions and methods. 'exempt' indicates a name which is -# consistent with all naming styles. -method-rgx=(?x) - ^(?:(?P_[a-z0-9_]+__|runTest|setUp|tearDown|setUpTestCase - |tearDownTestCase|setupSelf|tearDownClass|_testDatasetSize|setUpClass - |(test|assert)_*[A-Z0-9][a-zA-Z0-9_]*|next) - |(?P_{0,2}[A-Z][a-zA-Z0-9_]*) - |(?P_{0,2}[a-z][a-z0-9_]*))$ -# Regular expression which should only match correct instance attribute names -attr-rgx=^_{0,2}[a-z][a-z0-9_]*$ -# Regular expression which should only match correct argument names -argument-rgx=^[a-z][a-z0-9_]*$ -# Regular expression which should only match correct variable names -variable-rgx=^[a-z][a-z0-9_]*$ -# Regular expression which should only match correct list comprehension / -# generator expression variable names -inlinevar-rgx=^[a-z][a-z0-9_]*$ -# Good variable names which should always be accepted, separated by a comma -good-names=main,_ -# Bad variable names which should always be refused, separated by a comma -bad-names= -# List of builtins function names that should not be used, separated by a comma -#bad-functions=input,apply,reduce -# List of decorators that define properties, such as abc.abstractproperty. -property-classes=abc.abstractproperty -[pylint.TYPECHECK] -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes -# List of decorators that create context managers from functions, such as -# contextlib.contextmanager. -contextmanager-decorators=contextlib.contextmanager,contextlib2.contextmanager -[pylint.VARIABLES] -# Tells whether we should check for unused import in __init__ files. -init-import=no -# A regular expression matching names used for dummy variables (i.e. not used). -dummy-variables-rgx=^\*{0,2}(_$|unused_|dummy_) -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= -[pylint.CLASSES] -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp -# "class_" is also a valid for the first argument to a class method. -valid-classmethod-first-arg=cls,class_ -[pylint.EXCEPTIONS] -overgeneral-exceptions=builtins.StandardError,builtins.Exception,builtins.BaseException -[pylint.IMPORTS] -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=regsub,TERMIOS,Bastion,rexec,sets -[pylint.FORMAT] -# List of checkers and warnings to disable. -disable=abstract-method,access-member-before-definition,arguments-differ,assignment-from-no-return,attribute-defined-outside-init,bad-mcs-classmethod-argument,bad-option-value,c-extension-no-member,consider-merging-isinstance,consider-using-dict-comprehension,consider-using-enumerate,consider-using-in,consider-using-set-comprehension,consider-using-ternary,deprecated-method,design,file-ignored,fixme,global-statement,import-error,inconsistent-return-statements,invalid-unary-operand-type,len-as-condition,locally-disabled,locally-enabled,misplaced-comparison-constant,missing-docstring,multiple-imports,no-else-return,no-member,no-name-in-module,no-self-use,no-value-for-parameter,not-an-iterable,not-context-manager,pointless-except,protected-access,redefined-argument-from-local,signature-differs,similarities,simplifiable-if-expression,star-args,super-init-not-called,suppressed-message,too-many-function-args,trailing-comma-tuple,trailing-newlines,ungrouped-imports,unnecessary-pass,unsubscriptable-object,unused-argument,useless-object-inheritance,useless-return,useless-suppression,wrong-import-order,wrong-import-position,unneeded-not,unexpected-keyword-arg,redundant-keyword-arg,unspecified-encoding,logging-fstring-interpolation,consider-using-f-string,use-dict-literal - -# Maximum number of characters on a single line. -max-line-length=80 -# Regexp for a line that is allowed to be longer than the limit. -# This "ignore" regex is today composed of several independent parts: -# (1) Long import lines -# (2) URLs in comments or pydocs. Detecting URLs by regex is a hard problem and -# no amount of tweaking will make a perfect regex AFAICT. This one is a good -# compromise. -# (3) Constant string literals at the start of files don't need to be broken -# across lines. Allowing long paths and urls to be on a single -# line. Also requires that the string not be a triplequoted string. -ignore-long-lines=(?x) - (^\s*(import|from)\s - |^\s*(\#\ )??$ - |^[a-zA-Z_][a-zA-Z0-9_]*\s*=\s*("[^"]\S+"|'[^']\S+') - ) -# Maximum number of lines in a module -max-module-lines=99999 -# String used as indentation unit. We differ from PEP8's normal 4 spaces. -indent-string=' ' -# Do not warn about multiple statements on a single line for constructs like -# if test: stmt -single-line-if-stmt=y -[pylint.LOGGING] -# Add logging modules. -logging-modules=logging,absl.logging -[pylint.MISCELLANEOUS] -# Maximum line length for lambdas -#short-func-length=1 -# List of module members that should be marked as deprecated. -# All of the string functions are listed in 4.1.4 Deprecated string functions -# in the Python 2.4 docs. -#deprecated-members=string.atof,string.atoi,string.atol,string.capitalize,string.expandtabs,string.find,string.rfind,string.index,string.rindex,string.count,string.lower,string.split,string.rsplit,string.splitfields,string.join,string.joinfields,string.lstrip,string.rstrip,string.strip,string.swapcase,string.translate,string.upper,string.ljust,string.rjust,string.center,string.zfill,string.replace,sys.exitfunc,sys.maxint -# List of exceptions that do not need to be mentioned in the Raises section of -# a docstring. -#ignore-exceptions=AssertionError,NotImplementedError,StopIteration,TypeError -# Number of spaces of indent required when the last token on the preceding line -# is an open (, [, or {. -indent-after-paren=4 diff --git a/setup.py b/setup.py deleted file mode 100644 index a4ead8f48..000000000 --- a/setup.py +++ /dev/null @@ -1,4 +0,0 @@ -from setuptools import setup - -if __name__ == "__main__": - setup() From c8dc704ec42a77cf6ac84c83fbcada13c9b41a1c Mon Sep 17 00:00:00 2001 From: Frank Schneider Date: Wed, 15 Jan 2025 13:21:11 +0100 Subject: [PATCH 02/14] yapf requires toml --- .github/workflows/linting.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 89b5ef288..e49686358 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -50,7 +50,7 @@ jobs: - name: Install yapf run: | python -m pip install --upgrade pip - pip install yapf==0.32 + pip install yapf==0.32 toml - name: Run yapf run: | yapf . --diff --recursive From 50658bc1e23cb5f4076f6565c3629d0dc1a8e1fa Mon Sep 17 00:00:00 2001 From: Frank Schneider Date: Wed, 15 Jan 2025 13:55:34 +0100 Subject: [PATCH 03/14] Revert to auto-finding packages (includes `tests/`) --- pyproject.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 10fd4b730..3ff79bece 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,11 +54,13 @@ requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2"] build-backend = "setuptools.build_meta" [tool.setuptools] -packages = ["algorithmic_efficiency"] py-modules = ["submission_runner"] include-package-data = true zip-safe = false +[tool.setuptools.packages] +find = {} # Scanning implicit namespaces is active by default + [tool.setuptools.dynamic] version = { attr = "algorithmic_efficiency.__version__" } From 616a0f499967b95db1ff2f47b36effcee975fc5c Mon Sep 17 00:00:00 2001 From: Frank Schneider Date: Wed, 15 Jan 2025 14:36:33 +0100 Subject: [PATCH 04/14] Match version to GH --- CONTRIBUTING.md | 6 ++++++ algorithmic_efficiency/__init__.py | 2 +- pyproject.toml | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 364bbee62..bc5d004e9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -22,6 +22,7 @@ - [Style Testing](#style-testing) - [Unit and Integration Tests](#unit-and-integration-tests) - [Regression Tests](#regression-tests) + - [Versioning](#versioning) ## Contributing to MLCommons @@ -276,3 +277,8 @@ To run a regression test: 2. Turn on the self-hosted runner. 3. Run the self-hosted runner application for the runner to accept jobs. 4. Open a pull request into mian to trigger the workflow. + +### Versioning + +The package version is centrally defined in `algorithmic_efficiency/__init__.py`. +When releasing a new version, update the version number in `algorithmic_efficiency/__init__.py` and create a new release in the GitHub UI. diff --git a/algorithmic_efficiency/__init__.py b/algorithmic_efficiency/__init__.py index a0e473e1d..05485dcaa 100644 --- a/algorithmic_efficiency/__init__.py +++ b/algorithmic_efficiency/__init__.py @@ -1,3 +1,3 @@ """Algorithmic Efficiency.""" -__version__ = '0.1.0' +__version__ = "0.1.5" diff --git a/pyproject.toml b/pyproject.toml index 3ff79bece..eb3271ee3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,7 +50,7 @@ dependencies = [ ] [build-system] -requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2"] +requires = ["setuptools>=45"] build-backend = "setuptools.build_meta" [tool.setuptools] From bad76f55311715144476542454d1fcde60f509e5 Mon Sep 17 00:00:00 2001 From: Frank Schneider Date: Wed, 15 Jan 2025 15:13:08 +0100 Subject: [PATCH 05/14] Let `setuptools_scm` handle versioning. --- .gitignore | 4 +++- CONTRIBUTING.md | 11 +++++++++-- algorithmic_efficiency/__init__.py | 4 +++- pyproject.toml | 6 +++--- 4 files changed, 18 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index d2e212366..85063bcf4 100644 --- a/.gitignore +++ b/.gitignore @@ -23,4 +23,6 @@ wandb/ scoring/plots/ !scoring/test_data/experiment_dir/study_0/mnist_jax/trial_0/eval_measurements.csv -!scoring/test_data/experiment_dir/study_0/mnist_jax/trial_1/eval_measurements.csv \ No newline at end of file +!scoring/test_data/experiment_dir/study_0/mnist_jax/trial_1/eval_measurements.csv + +algorithmic_efficiency/_version.py \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bc5d004e9..a93289852 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -280,5 +280,12 @@ To run a regression test: ### Versioning -The package version is centrally defined in `algorithmic_efficiency/__init__.py`. -When releasing a new version, update the version number in `algorithmic_efficiency/__init__.py` and create a new release in the GitHub UI. +The package version is automatically determined by the `setuptools_scm` package based on the last git tag. +It follows the structure `major.minor.patch` + `devN` where `N` is the number of commits since the last tag. +It automatically increments the patch version (i.e. it guesses the next version) if there are commits after the last tag. +Additionally, if there are uncommitted changes, the version will include a suffix separated by a `+` character and includes the last commit hash plus the date on dirt workdir (see [setuptools_scm's documentation](https://setuptools-scm.readthedocs.io/en/latest/extending/#setuptools_scmlocal_scheme) with the default version and local scheme). +You can check what version `setuptools_scm` is creating by running `python -m setuptools_scm`. + +To create a new version, create a new release (and tag) in the GitHub UI. +The package version is automatically updated to the new version. +Once the package is installed, the version can be accessed as the package attribute `algorithmic_efficiency.__version__`, i.e. via `python -c "import algorithmic_efficiency; print(algorithmic_efficiency.__version__)"`. diff --git a/algorithmic_efficiency/__init__.py b/algorithmic_efficiency/__init__.py index 05485dcaa..7d54f8290 100644 --- a/algorithmic_efficiency/__init__.py +++ b/algorithmic_efficiency/__init__.py @@ -1,3 +1,5 @@ """Algorithmic Efficiency.""" -__version__ = "0.1.5" +from ._version import version as __version__ + +__all__ = ["__version__"] diff --git a/pyproject.toml b/pyproject.toml index eb3271ee3..2c6d28458 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,7 +50,7 @@ dependencies = [ ] [build-system] -requires = ["setuptools>=45"] +requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2"] build-backend = "setuptools.build_meta" [tool.setuptools] @@ -61,8 +61,8 @@ zip-safe = false [tool.setuptools.packages] find = {} # Scanning implicit namespaces is active by default -[tool.setuptools.dynamic] -version = { attr = "algorithmic_efficiency.__version__" } +[tool.setuptools_scm] +version_file = "algorithmic_efficiency/_version.py" ############################################################################### # (Optional) Dependencies # From ff4a457ea6eea6e2603887066e3e2735a8867d2a Mon Sep 17 00:00:00 2001 From: Frank Schneider Date: Wed, 15 Jan 2025 15:23:37 +0100 Subject: [PATCH 06/14] Fix version test --- tests/version_test.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/version_test.py b/tests/version_test.py index 9f7006aab..7e3001324 100644 --- a/tests/version_test.py +++ b/tests/version_test.py @@ -4,10 +4,13 @@ def test_version_attribute(): - """Check whether __version__ exists and is a valid string.""" + """Check whether __version__ exists and is a valid string.""" - assert hasattr(algorithmic_efficiency, "__version__") - version = algorithmic_efficiency.__version__ - assert isinstance(version, str) - version_elements = version.split(".") - assert all(el.isnumeric() for el in version_elements) + assert hasattr(algorithmic_efficiency, "__version__") + version = algorithmic_efficiency.__version__ + assert isinstance(version, str) + version_elements = version.split(".") + print(version_elements) + # Only check the first three elements, i.e. major, minor, patch. + # The remaining elements contain commit hash and dirty status. + assert all(el.isnumeric() for el in version_elements[0:3]) From f97c880bb8a425430666257f3cdac2ef5a6a8187 Mon Sep 17 00:00:00 2001 From: Frank Schneider Date: Wed, 15 Jan 2025 15:25:06 +0100 Subject: [PATCH 07/14] Match file name of version test to the other tests --- .github/workflows/CI.yml | 2 +- tests/{version_test.py => test_version.py} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename tests/{version_test.py => test_version.py} (100%) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 05d94e896..e05b74eef 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -199,7 +199,7 @@ jobs: pip install .[pytorch_cpu] - name: Run pytest tests run: | - pytest -vx tests/version_test.py + pytest -vx tests/test_version.py pytest -vx tests/test_num_params.py pytest -vx tests/test_param_shapes.py pytest -vx tests/test_param_types.py diff --git a/tests/version_test.py b/tests/test_version.py similarity index 100% rename from tests/version_test.py rename to tests/test_version.py From f98b55480041a31d5dc07f0af26937eee8750a49 Mon Sep 17 00:00:00 2001 From: Frank Schneider Date: Wed, 15 Jan 2025 15:38:17 +0100 Subject: [PATCH 08/14] Fix linting --- pyproject.toml | 4 +++- tests/test_version.py | 18 +++++++++--------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2c6d28458..0788d48a5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -129,6 +129,8 @@ wandb = ["wandb==0.16.5"] based_on_style = "yapf" each_dict_entry_on_separate_line = false split_all_top_level_comma_separated_values = true +[tool.yapfignore] +ignore_patterns = ["algorithmic_efficiency/_version.py"] # isort configuration [tool.isort] @@ -137,7 +139,7 @@ profile = "google" # pylint configuration [tool.pylint.MASTER] persistent = false -ignore = "get_references_web.py,get_references_web_single_group.py" +ignore = "get_references_web.py,get_references_web_single_group.py,_version.py" [tool.pylint.REPORTS] reports = false diff --git a/tests/test_version.py b/tests/test_version.py index 7e3001324..37aa26ea9 100644 --- a/tests/test_version.py +++ b/tests/test_version.py @@ -4,13 +4,13 @@ def test_version_attribute(): - """Check whether __version__ exists and is a valid string.""" + """Check whether __version__ exists and is a valid string.""" - assert hasattr(algorithmic_efficiency, "__version__") - version = algorithmic_efficiency.__version__ - assert isinstance(version, str) - version_elements = version.split(".") - print(version_elements) - # Only check the first three elements, i.e. major, minor, patch. - # The remaining elements contain commit hash and dirty status. - assert all(el.isnumeric() for el in version_elements[0:3]) + assert hasattr(algorithmic_efficiency, "__version__") + version = algorithmic_efficiency.__version__ + assert isinstance(version, str) + version_elements = version.split(".") + print(version_elements) + # Only check the first three elements, i.e. major, minor, patch. + # The remaining elements contain commit hash and dirty status. + assert all(el.isnumeric() for el in version_elements[0:3]) From 8171a32e12047c448003f75041b0886a6f09365c Mon Sep 17 00:00:00 2001 From: Frank Schneider Date: Wed, 15 Jan 2025 16:00:28 +0100 Subject: [PATCH 09/14] Update version test to only check major and minor elements, excluding patch version. --- tests/test_version.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/test_version.py b/tests/test_version.py index 37aa26ea9..ef01d4f32 100644 --- a/tests/test_version.py +++ b/tests/test_version.py @@ -11,6 +11,7 @@ def test_version_attribute(): assert isinstance(version, str) version_elements = version.split(".") print(version_elements) - # Only check the first three elements, i.e. major, minor, patch. + # Only check the first two elements, i.e. major, minor + # (patch is not checked as it is not required). # The remaining elements contain commit hash and dirty status. - assert all(el.isnumeric() for el in version_elements[0:3]) + assert all(el.isnumeric() for el in version_elements[0:2]) From 96cc471df4b85d22595907a0b18d268a10186141 Mon Sep 17 00:00:00 2001 From: Frank Schneider Date: Wed, 15 Jan 2025 16:04:03 +0100 Subject: [PATCH 10/14] Rename job in regression tests workflow from `criteo_resnet_pytorch` to `criteo_embed_init_pytorch` due to likely typo. --- .github/workflows/regression_tests_variants.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/regression_tests_variants.yml b/.github/workflows/regression_tests_variants.yml index ef1585d0d..b234575b7 100644 --- a/.github/workflows/regression_tests_variants.yml +++ b/.github/workflows/regression_tests_variants.yml @@ -72,7 +72,7 @@ jobs: run: | docker pull us-central1-docker.pkg.dev/training-algorithms-external/mlcommons-docker-repo/algoperf_pytorch_${{ github.head_ref || github.ref_name }} docker run -v $HOME/data/:/data/ -v $HOME/experiment_runs/:/experiment_runs -v $HOME/experiment_runs/logs:/logs --gpus all --ipc=host us-central1-docker.pkg.dev/training-algorithms-external/mlcommons-docker-repo/algoperf_pytorch_${{ github.head_ref || github.ref_name }} -d criteo1tb -f pytorch -s reference_algorithms/paper_baselines/adamw/pytorch/submission.py -w criteo1tb_resnet -t reference_algorithms/paper_baselines/adamw/tuning_search_space.json -e tests/regression_tests/adamw -m 10 -c False -o True -r false - criteo_resnet_pytorch: + criteo_embed_init_pytorch: runs-on: self-hosted needs: build_and_push_pytorch_docker_image steps: From 230bf8471e9d660d60ca7c191800a5769483de72 Mon Sep 17 00:00:00 2001 From: Frank Schneider Date: Wed, 15 Jan 2025 16:05:34 +0100 Subject: [PATCH 11/14] Fix some markdown linting issues. --- DOCUMENTATION.md | 3 +-- GETTING_STARTED.md | 15 +++++++++------ 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md index 05d6515bd..d04b247f3 100644 --- a/DOCUMENTATION.md +++ b/DOCUMENTATION.md @@ -222,7 +222,6 @@ def update_params( - Cannot replace the model parameters with pre-trained ones. - Batch norm should work here because the `model_fn` will return updated batch norm moving averages when it is told to with `update_batch_norm`. - ###### Prepare for evaluation function ```python @@ -278,7 +277,7 @@ def data_selection( In general, with noisy, non-deterministic training, evaluation frequency can affect training time measurements as more "bites of the apple" potentially allows the training code to exploit instability. We also want to discourage submissions from complicated and unrealistic logic that attempts to guess when training is close to complete and increases the evaluation rate, while not producing a well-sampled training curve at the start of training. Simply allowing submissions complete freedom over evaluation frequency encourages competitors to work to minimize the number of evaluations, which distracts from the primary goal of finding better training algorithms. -Submissions are eligible for an untimed eval every `eval_period` seconds. Before proceeding to evaluation, the submission can prepare the model through a call to `prepare_for_eval`, effectively modifying the model parameters and state as well as the the optimizer state. Any additional evaluations performed by the submission code count against the runtime for scoring. +Submissions are eligible for an untimed eval every `eval_period` seconds. Before proceeding to evaluation, the submission can prepare the model through a call to `prepare_for_eval`, effectively modifying the model parameters and state as well as the the optimizer state. Any additional evaluations performed by the submission code count against the runtime for scoring. The harness that runs the submission code will attempt to eval every `eval_period` seconds by checking between each submission step (call of `update_params`) whether it has been at least `eval_period` seconds since that last eval, if so, the submission is given the possibility to prepare for evaluation (through a timed call to `prepare_for_eval`). If the accumulated runtime does not exceed the maximum allowed runtime after the preparation step, the clock is paused, and the submission is evaluated. This means that if calls to `update_params` typically take a lot more than `eval_period` seconds, such submissions will not receive as many untimed evals as a submission that had an `update_params` function that took less time. However, for appropriate settings of `eval_period`, we expect this to be quite rare. Submissions are always free to restructure their `update_params` code to split work into two subsequent steps to regain the potential benefits of these untimed model evaluations. For each workload, the `eval_period` will be set such that the total evaluation time is roughly between 10% and 20% of the total training time for the target-setting runs. #### Valid submissions diff --git a/GETTING_STARTED.md b/GETTING_STARTED.md index 006b972ec..7d53c35e2 100644 --- a/GETTING_STARTED.md +++ b/GETTING_STARTED.md @@ -18,6 +18,8 @@ - [Docker Tips](#docker-tips) - [Score your Submission](#score-your-submission) - [Running workloads](#running-workloads) +- [Package your Submission code](#package-your-submission-code) +- [Package Logs for Self-Reporting Submissions](#package-logs-for-self-reporting-submissions) ## Set Up and Installation @@ -80,7 +82,6 @@ To set up a virtual enviornment and install this repository pip3 install -e '.[full]' ``` -
Per workload installations @@ -414,22 +415,24 @@ submission_folder/ ``` Specifically we require that: + 1. There exist subdirectories in the the submission folder named after the ruleset: `external_tuning` or `self_tuning`. -2. The ruleset subdirectories contain directories named according to -some identifier of the algorithm. -3. Each algorithm subdirectory contains a `submission.py` module. Additional helper modules are allowed if prefer to you organize your code into multiple files. If there are additional python packages that have to be installed for the algorithm also include a `requirements.txt` with package names and versions in the algorithm subdirectory. +2. The ruleset subdirectories contain directories named according to +some identifier of the algorithm. +3. Each algorithm subdirectory contains a `submission.py` module. Additional helper modules are allowed if prefer to you organize your code into multiple files. If there are additional python packages that have to be installed for the algorithm also include a `requirements.txt` with package names and versions in the algorithm subdirectory. 4. For `external_tuning` algorithms the algorithm subdirectory should contain a `tuning_search_space.json`. To check that your submission folder meets the above requirements you can run the `submissions/repo_checker.py` script. ## Package Logs for Self-Reporting Submissions + To prepare your submission for self reporting run: -``` +```bash python3 package_logs.py --experiment_dir --destination_dir ``` -The destination directiory will contain the logs packed in studies and trials required for self-reporting. +The destination directiory will contain the logs packed in studies and trials required for self-reporting. **Good Luck!** From ce44582c5e4e81e71a41798076315534259c97a0 Mon Sep 17 00:00:00 2001 From: Frank Schneider Date: Wed, 15 Jan 2025 16:14:51 +0100 Subject: [PATCH 12/14] Add trailing new line --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 85063bcf4..403b08c2b 100644 --- a/.gitignore +++ b/.gitignore @@ -25,4 +25,4 @@ scoring/plots/ !scoring/test_data/experiment_dir/study_0/mnist_jax/trial_0/eval_measurements.csv !scoring/test_data/experiment_dir/study_0/mnist_jax/trial_1/eval_measurements.csv -algorithmic_efficiency/_version.py \ No newline at end of file +algorithmic_efficiency/_version.py From 37f556d7e5a6e22c87b59ddde663c3ca5b263280 Mon Sep 17 00:00:00 2001 From: Frank Schneider Date: Wed, 15 Jan 2025 16:34:41 +0100 Subject: [PATCH 13/14] Rename package from `algorithmic-efficiency` to `algoperf`. --- .github/workflows/linting.yml | 2 +- .gitignore | 6 +-- CHANGELOG.md | 20 +++++---- CONTRIBUTING.md | 8 ++-- DOCUMENTATION.md | 2 +- GETTING_STARTED.md | 2 +- .../__init__.py | 0 .../checkpoint_utils.py | 4 +- .../data_utils.py | 2 +- .../halton.py | 0 .../init_utils.py | 0 .../interop_utils.py | 2 +- .../logger_utils.py | 4 +- .../param_utils.py | 2 +- .../profiler.py | 0 .../pytorch_utils.py | 8 ++-- .../random_utils.py | 0 {algorithmic_efficiency => algoperf}/spec.py | 0 .../workloads/__init__.py | 0 .../workloads/cifar/__init__.py | 0 .../workloads/cifar/cifar_jax/__init__.py | 0 .../cifar/cifar_jax/input_pipeline.py | 4 +- .../workloads/cifar/cifar_jax/models.py | 4 +- .../workloads/cifar/cifar_jax/workload.py | 10 ++--- .../workloads/cifar/cifar_pytorch/__init__.py | 0 .../workloads/cifar/cifar_pytorch/models.py | 10 ++--- .../workloads/cifar/cifar_pytorch/workload.py | 12 +++--- .../workloads/cifar/workload.py | 6 +-- .../workloads/criteo1tb/__init__.py | 0 .../criteo1tb/criteo1tb_jax/__init__.py | 0 .../criteo1tb/criteo1tb_jax/models.py | 0 .../criteo1tb/criteo1tb_jax/workload.py | 8 ++-- .../criteo1tb/criteo1tb_pytorch/__init__.py | 0 .../criteo1tb/criteo1tb_pytorch/models.py | 0 .../criteo1tb/criteo1tb_pytorch/workload.py | 10 ++--- .../workloads/criteo1tb/input_pipeline.py | 2 +- .../workloads/criteo1tb/workload.py | 4 +- .../workloads/fastmri/__init__.py | 0 .../workloads/fastmri/fastmri_jax/__init__.py | 0 .../workloads/fastmri/fastmri_jax/models.py | 0 .../workloads/fastmri/fastmri_jax/ssim.py | 0 .../workloads/fastmri/fastmri_jax/workload.py | 12 +++--- .../fastmri/fastmri_pytorch/__init__.py | 0 .../fastmri/fastmri_pytorch/models.py | 2 +- .../workloads/fastmri/fastmri_pytorch/ssim.py | 2 +- .../fastmri/fastmri_pytorch/workload.py | 14 +++---- .../workloads/fastmri/input_pipeline.py | 2 +- .../workloads/fastmri/workload.py | 4 +- .../workloads/imagenet_resnet/__init__.py | 0 .../imagenet_resnet/imagenet_jax/__init__.py | 0 .../imagenet_jax/input_pipeline.py | 6 +-- .../imagenet_resnet/imagenet_jax/models.py | 2 +- .../imagenet_jax/randaugment.py | 0 .../imagenet_resnet/imagenet_jax/workload.py | 14 +++---- .../imagenet_pytorch/__init__.py | 0 .../imagenet_pytorch/models.py | 4 +- .../imagenet_pytorch/randaugment.py | 2 +- .../imagenet_pytorch/workload.py | 18 ++++---- .../workloads/imagenet_resnet/imagenet_v2.py | 6 +-- .../workloads/imagenet_resnet/workload.py | 2 +- .../workloads/imagenet_vit/__init__.py | 0 .../imagenet_vit/imagenet_jax/__init__.py | 0 .../imagenet_vit/imagenet_jax/models.py | 2 +- .../imagenet_vit/imagenet_jax/workload.py | 12 +++--- .../imagenet_vit/imagenet_pytorch/__init__.py | 0 .../imagenet_vit/imagenet_pytorch/models.py | 6 +-- .../imagenet_vit/imagenet_pytorch/workload.py | 14 +++---- .../workloads/imagenet_vit/workload.py | 4 +- .../librispeech_conformer/__init__.py | 0 .../librispeech_conformer/input_pipeline.py | 0 .../librispeech_jax/__init__.py | 0 .../librispeech_preprocessor.py | 0 .../librispeech_jax/models.py | 4 +- .../librispeech_jax/spectrum_augmenter.py | 0 .../librispeech_jax/workload.py | 14 +++---- .../librispeech_pytorch/__init__.py | 0 .../librispeech_pytorch/models.py | 4 +- .../librispeech_pytorch/preprocessor.py | 0 .../librispeech_pytorch/spectrum_augmenter.py | 0 .../librispeech_pytorch/workload.py | 18 ++++---- .../librispeech_conformer/metrics.py | 0 .../librispeech_conformer/workload.py | 2 +- .../librispeech_deepspeech/__init__.py | 0 .../librispeech_jax/__init__.py | 0 .../librispeech_jax/models.py | 4 +- .../librispeech_jax/workload.py | 8 ++-- .../librispeech_pytorch/__init__.py | 0 .../librispeech_pytorch/models.py | 4 +- .../librispeech_pytorch/workload.py | 14 +++---- .../workloads/mnist/__init__.py | 0 .../workloads/mnist/mnist_jax/__init__.py | 0 .../workloads/mnist/mnist_jax/workload.py | 6 +-- .../workloads/mnist/mnist_pytorch/__init__.py | 0 .../workloads/mnist/mnist_pytorch/workload.py | 10 ++--- .../workloads/mnist/workload.py | 8 ++-- .../workloads/ogbg/__init__.py | 0 .../workloads/ogbg/input_pipeline.py | 0 .../workloads/ogbg/metrics.py | 2 +- .../workloads/ogbg/ogbg_jax/__init__.py | 0 .../workloads/ogbg/ogbg_jax/models.py | 0 .../workloads/ogbg/ogbg_jax/workload.py | 10 ++--- .../workloads/ogbg/ogbg_pytorch/__init__.py | 0 .../workloads/ogbg/ogbg_pytorch/models.py | 2 +- .../workloads/ogbg/ogbg_pytorch/workload.py | 12 +++--- .../workloads/ogbg/workload.py | 8 ++-- .../workloads/utils.py | 0 .../workloads/wmt/__init__.py | 0 .../workloads/wmt/bleu.py | 2 +- .../workloads/wmt/input_pipeline.py | 6 +-- .../workloads/wmt/tokenizer.py | 0 .../workloads/wmt/wmt_jax/__init__.py | 0 .../workloads/wmt/wmt_jax/decode.py | 0 .../workloads/wmt/wmt_jax/models.py | 0 .../workloads/wmt/wmt_jax/workload.py | 12 +++--- .../workloads/wmt/wmt_pytorch/__init__.py | 0 .../workloads/wmt/wmt_pytorch/decode.py | 2 +- .../workloads/wmt/wmt_pytorch/models.py | 0 .../workloads/wmt/wmt_pytorch/workload.py | 14 +++---- .../workloads/wmt/workload.py | 6 +-- .../workloads/workloads.py | 4 +- datasets/dataset_setup.py | 4 +- .../external_tuning/jax_nadamw_full_budget.py | 2 +- .../jax_nadamw_target_setting.py | 2 +- .../pytorch_nadamw_full_budget.py | 4 +- .../pytorch_nadamw_target_setting.py | 4 +- .../self_tuning/jax_nadamw_full_budget.py | 2 +- .../self_tuning/jax_nadamw_target_setting.py | 2 +- .../self_tuning/pytorch_nadamw_full_budget.py | 4 +- .../pytorch_nadamw_target_setting.py | 4 +- pyproject.toml | 14 +++---- .../cifar/cifar_jax/submission.py | 2 +- .../cifar/cifar_pytorch/submission.py | 2 +- .../mnist/mnist_jax/submission.py | 2 +- .../mnist/mnist_pytorch/submission.py | 2 +- .../adafactor/jax/submission.py | 2 +- .../adafactor/pytorch/submission.py | 4 +- .../paper_baselines/adamw/jax/submission.py | 2 +- .../adamw/pytorch/submission.py | 4 +- .../paper_baselines/lamb/jax/submission.py | 2 +- .../lamb/pytorch/submission.py | 2 +- .../momentum/jax/submission.py | 2 +- .../momentum/pytorch/submission.py | 4 +- .../paper_baselines/nadamw/jax/submission.py | 2 +- .../nadamw/pytorch/submission.py | 4 +- .../nesterov/jax/submission.py | 2 +- .../nesterov/pytorch/submission.py | 4 +- .../paper_baselines/sam/jax/submission.py | 2 +- .../paper_baselines/sam/pytorch/submission.py | 4 +- .../paper_baselines/shampoo/jax/submission.py | 2 +- .../data_selection.py | 2 +- .../target_setting_algorithms/jax_adamw.py | 2 +- .../target_setting_algorithms/jax_momentum.py | 2 +- .../target_setting_algorithms/jax_nadamw.py | 2 +- .../target_setting_algorithms/jax_nesterov.py | 2 +- .../jax_submission_base.py | 2 +- .../pytorch_adamw.py | 2 +- .../pytorch_momentum.py | 2 +- .../pytorch_nadamw.py | 2 +- .../pytorch_nesterov.py | 2 +- .../pytorch_submission_base.py | 4 +- scoring/performance_profile.py | 6 +-- scoring/run_workloads.py | 4 +- scoring/scoring_utils.py | 4 +- submission_runner.py | 22 +++++----- submissions/template/submission.py | 2 +- tests/modeldiffs/criteo1tb/compare.py | 6 +-- .../criteo1tb_embed_init/compare.py | 6 +-- .../modeldiffs/criteo1tb_layernorm/compare.py | 6 +-- tests/modeldiffs/criteo1tb_resnet/compare.py | 6 +-- tests/modeldiffs/fastmri/compare.py | 6 +-- tests/modeldiffs/fastmri_layernorm/compare.py | 6 +-- .../modeldiffs/fastmri_model_size/compare.py | 6 +-- tests/modeldiffs/fastmri_tanh/compare.py | 6 +-- tests/modeldiffs/imagenet_resnet/compare.py | 6 +-- .../imagenet_resnet/gelu_compare.py | 6 +-- .../imagenet_resnet/silu_compare.py | 6 +-- tests/modeldiffs/imagenet_vit/compare.py | 6 +-- tests/modeldiffs/imagenet_vit_glu/compare.py | 6 +-- tests/modeldiffs/imagenet_vit_map/compare.py | 6 +-- .../modeldiffs/imagenet_vit_postln/compare.py | 6 +-- .../librispeech_conformer/compare.py | 6 +-- .../compare.py | 6 +-- .../librispeech_conformer_gelu/compare.py | 6 +-- .../compare.py | 6 +-- .../librispeech_deepspeech/compare.py | 6 +-- .../compare.py | 6 +-- .../librispeech_deepspeech_normaug/compare.py | 6 +-- .../librispeech_deepspeech_tanh/compare.py | 6 +-- tests/modeldiffs/ogbg/compare.py | 6 +-- tests/modeldiffs/ogbg_gelu/compare.py | 6 +-- tests/modeldiffs/ogbg_model_size/compare.py | 6 +-- tests/modeldiffs/ogbg_silu/compare.py | 6 +-- tests/modeldiffs/vanilla_sgd_jax.py | 2 +- tests/modeldiffs/vanilla_sgd_pytorch.py | 2 +- tests/modeldiffs/wmt/compare.py | 6 +-- .../modeldiffs/wmt_attention_temp/compare.py | 6 +-- tests/modeldiffs/wmt_glu_tanh/compare.py | 6 +-- tests/modeldiffs/wmt_post_ln/compare.py | 6 +-- tests/reference_algorithm_tests.py | 14 +++---- tests/submission_runner_test.py | 2 +- tests/test_baselines.py | 4 +- tests/test_num_params.py | 38 ++++++++--------- tests/test_param_shapes.py | 40 +++++++++--------- tests/test_param_types.py | 42 +++++++++---------- tests/test_ssim.py | 10 ++--- tests/test_version.py | 6 +-- tests/version_test.py | 16 +++++++ .../imagenet_jax/workload_test.py | 4 +- 208 files changed, 489 insertions(+), 467 deletions(-) rename {algorithmic_efficiency => algoperf}/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/checkpoint_utils.py (98%) rename {algorithmic_efficiency => algoperf}/data_utils.py (99%) rename {algorithmic_efficiency => algoperf}/halton.py (100%) rename {algorithmic_efficiency => algoperf}/init_utils.py (100%) rename {algorithmic_efficiency => algoperf}/interop_utils.py (90%) rename {algorithmic_efficiency => algoperf}/logger_utils.py (99%) rename {algorithmic_efficiency => algoperf}/param_utils.py (99%) rename {algorithmic_efficiency => algoperf}/profiler.py (100%) rename {algorithmic_efficiency => algoperf}/pytorch_utils.py (89%) rename {algorithmic_efficiency => algoperf}/random_utils.py (100%) rename {algorithmic_efficiency => algoperf}/spec.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/cifar/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/cifar/cifar_jax/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/cifar/cifar_jax/input_pipeline.py (98%) rename {algorithmic_efficiency => algoperf}/workloads/cifar/cifar_jax/models.py (93%) rename {algorithmic_efficiency => algoperf}/workloads/cifar/cifar_jax/workload.py (95%) rename {algorithmic_efficiency => algoperf}/workloads/cifar/cifar_pytorch/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/cifar/cifar_pytorch/models.py (92%) rename {algorithmic_efficiency => algoperf}/workloads/cifar/cifar_pytorch/workload.py (96%) rename {algorithmic_efficiency => algoperf}/workloads/cifar/workload.py (97%) rename {algorithmic_efficiency => algoperf}/workloads/criteo1tb/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/criteo1tb/criteo1tb_jax/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/criteo1tb/criteo1tb_jax/models.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/criteo1tb/criteo1tb_jax/workload.py (96%) rename {algorithmic_efficiency => algoperf}/workloads/criteo1tb/criteo1tb_pytorch/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/criteo1tb/criteo1tb_pytorch/models.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/criteo1tb/criteo1tb_pytorch/workload.py (97%) rename {algorithmic_efficiency => algoperf}/workloads/criteo1tb/input_pipeline.py (98%) rename {algorithmic_efficiency => algoperf}/workloads/criteo1tb/workload.py (97%) rename {algorithmic_efficiency => algoperf}/workloads/fastmri/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/fastmri/fastmri_jax/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/fastmri/fastmri_jax/models.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/fastmri/fastmri_jax/ssim.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/fastmri/fastmri_jax/workload.py (95%) rename {algorithmic_efficiency => algoperf}/workloads/fastmri/fastmri_pytorch/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/fastmri/fastmri_pytorch/models.py (99%) rename {algorithmic_efficiency => algoperf}/workloads/fastmri/fastmri_pytorch/ssim.py (98%) rename {algorithmic_efficiency => algoperf}/workloads/fastmri/fastmri_pytorch/workload.py (96%) rename {algorithmic_efficiency => algoperf}/workloads/fastmri/input_pipeline.py (99%) rename {algorithmic_efficiency => algoperf}/workloads/fastmri/workload.py (96%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_resnet/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_resnet/imagenet_jax/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_resnet/imagenet_jax/input_pipeline.py (98%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_resnet/imagenet_jax/models.py (98%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_resnet/imagenet_jax/randaugment.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_resnet/imagenet_jax/workload.py (95%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_resnet/imagenet_pytorch/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_resnet/imagenet_pytorch/models.py (98%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_resnet/imagenet_pytorch/randaugment.py (99%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_resnet/imagenet_pytorch/workload.py (95%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_resnet/imagenet_v2.py (90%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_resnet/workload.py (99%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_vit/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_vit/imagenet_jax/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_vit/imagenet_jax/models.py (99%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_vit/imagenet_jax/workload.py (91%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_vit/imagenet_pytorch/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_vit/imagenet_pytorch/models.py (98%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_vit/imagenet_pytorch/workload.py (87%) rename {algorithmic_efficiency => algoperf}/workloads/imagenet_vit/workload.py (96%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_conformer/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_conformer/input_pipeline.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_conformer/librispeech_jax/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_conformer/librispeech_jax/librispeech_preprocessor.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_conformer/librispeech_jax/models.py (99%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_conformer/librispeech_jax/spectrum_augmenter.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_conformer/librispeech_jax/workload.py (96%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_conformer/librispeech_pytorch/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_conformer/librispeech_pytorch/models.py (98%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_conformer/librispeech_pytorch/preprocessor.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_conformer/librispeech_pytorch/spectrum_augmenter.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_conformer/librispeech_pytorch/workload.py (95%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_conformer/metrics.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_conformer/workload.py (98%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_deepspeech/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_deepspeech/librispeech_jax/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_deepspeech/librispeech_jax/models.py (99%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_deepspeech/librispeech_jax/workload.py (95%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_deepspeech/librispeech_pytorch/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_deepspeech/librispeech_pytorch/models.py (98%) rename {algorithmic_efficiency => algoperf}/workloads/librispeech_deepspeech/librispeech_pytorch/workload.py (88%) rename {algorithmic_efficiency => algoperf}/workloads/mnist/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/mnist/mnist_jax/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/mnist/mnist_jax/workload.py (96%) rename {algorithmic_efficiency => algoperf}/workloads/mnist/mnist_pytorch/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/mnist/mnist_pytorch/workload.py (96%) rename {algorithmic_efficiency => algoperf}/workloads/mnist/workload.py (97%) rename {algorithmic_efficiency => algoperf}/workloads/ogbg/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/ogbg/input_pipeline.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/ogbg/metrics.py (98%) rename {algorithmic_efficiency => algoperf}/workloads/ogbg/ogbg_jax/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/ogbg/ogbg_jax/models.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/ogbg/ogbg_jax/workload.py (95%) rename {algorithmic_efficiency => algoperf}/workloads/ogbg/ogbg_pytorch/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/ogbg/ogbg_pytorch/models.py (99%) rename {algorithmic_efficiency => algoperf}/workloads/ogbg/ogbg_pytorch/workload.py (96%) rename {algorithmic_efficiency => algoperf}/workloads/ogbg/workload.py (96%) rename {algorithmic_efficiency => algoperf}/workloads/utils.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/wmt/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/wmt/bleu.py (98%) rename {algorithmic_efficiency => algoperf}/workloads/wmt/input_pipeline.py (98%) rename {algorithmic_efficiency => algoperf}/workloads/wmt/tokenizer.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/wmt/wmt_jax/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/wmt/wmt_jax/decode.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/wmt/wmt_jax/models.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/wmt/wmt_jax/workload.py (97%) rename {algorithmic_efficiency => algoperf}/workloads/wmt/wmt_pytorch/__init__.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/wmt/wmt_pytorch/decode.py (99%) rename {algorithmic_efficiency => algoperf}/workloads/wmt/wmt_pytorch/models.py (100%) rename {algorithmic_efficiency => algoperf}/workloads/wmt/wmt_pytorch/workload.py (97%) rename {algorithmic_efficiency => algoperf}/workloads/wmt/workload.py (97%) rename {algorithmic_efficiency => algoperf}/workloads/workloads.py (98%) create mode 100644 tests/version_test.py diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index e49686358..0efa7b236 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -17,7 +17,7 @@ jobs: pip install pylint==2.16.1 - name: Run pylint run: | - pylint algorithmic_efficiency + pylint algoperf pylint reference_algorithms pylint prize_qualification_baselines pylint submission_runner.py diff --git a/.gitignore b/.gitignore index 403b08c2b..7d35f0ccc 100644 --- a/.gitignore +++ b/.gitignore @@ -12,8 +12,8 @@ makefile *.swp */data/ *events.out.tfevents* -algorithmic_efficiency/workloads/librispeech_conformer/data_dir -algorithmic_efficiency/workloads/librispeech_conformer/work_dir +algoperf/workloads/librispeech_conformer/data_dir +algoperf/workloads/librispeech_conformer/work_dir *.flac *.npy *.csv @@ -25,4 +25,4 @@ scoring/plots/ !scoring/test_data/experiment_dir/study_0/mnist_jax/trial_0/eval_measurements.csv !scoring/test_data/experiment_dir/study_0/mnist_jax/trial_1/eval_measurements.csv -algorithmic_efficiency/_version.py +algoperf/_version.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 95cd40775..685926506 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,34 +4,39 @@ - Finalized variant workload targets. - Fix in random_utils helper function. -- For conformer PyTorch Dropout layers set `inplace=True`. +- For conformer PyTorch Dropout layers set `inplace=True`. - Clear CUDA cache at begining of each trial for PyTorch. ## algoperf-benchmark-0.1.4 (2024-03-26) Upgrade CUDA version to CUDA 12.1: + - Upgrade CUDA version in Dockerfiles that will be used for scoring. - Update Jax and PyTorch package version tags to use local CUDA installation. -Add flag for completely disabling checkpointing. +Add flag for completely disabling checkpointing. + - Note that we will run with checkpointing off at scoring time. -Update Deepspeech and Conformer variant target setting configurations. -- Note that variant targets are not final. +Update Deepspeech and Conformer variant target setting configurations. + +- Note that variant targets are not final. Fixed bug in scoring code to take best trial in a study for external-tuning ruleset. -Added instructions for submission. +Added instructions for submission. -Changed default number of workers for PyTorch data loaders to 0. Running with >0 may lead to incorrect eval results see https://github.com/mlcommons/algorithmic-efficiency/issues/732. +Changed default number of workers for PyTorch data loaders to 0. Running with >0 may lead to incorrect eval results see . ## algoperf-benchmark-0.1.2 (2024-03-04) + Workload variant additions and fixes: + - Add Deepspeech workload variant - Fix bugs in Imagenet ResNet, WMT and Criteo1tb variants Add prize qualification logs for external tuning ruleset. -Note: FastMRI trials with dropout are not yet added due to https://github.com/mlcommons/algorithmic-efficiency/issues/664. +Note: FastMRI trials with dropout are not yet added due to . Add missing funcitonality to Docker startup script for self_tuning ruleset. Add self_tuning ruleset option to script that runs all workloads for scoring. @@ -41,6 +46,7 @@ Datasetup fixes. Fix tests that check training differences in PyTorch and JAX on GPU. ## algoperf-benchmark-0.1.1 (2024-01-19) + Bug fixes to FastMRI metric calculation and targets. Added workload variants and targets for ogbg, fastmri, librispeech_conformer, imagenet_resnet, imagenet_vit, criteo1tb to be used as held-out workloads. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a93289852..c98a5009e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -205,7 +205,7 @@ docker run -t -d \ -v $HOME/data/:/data/ \ -v $HOME/experiment_runs/:/experiment_runs \ -v $HOME/experiment_runs/logs:/logs \ --v $HOME/algorithmic-efficiency:/algorithmic-efficiency \ +-v $HOME/algorithmic-efficiency:/algoperf \ --gpus all \ --ipc=host \ \ @@ -229,7 +229,7 @@ To run the below commands, use the versions installed via `pip install -e '.[dev To automatically fix formatting errors, run the following (*WARNING:* this will edit your code, so it is suggested to make a git commit first!): ```bash -yapf -i -r -vv -p algorithmic_efficiency datasets prize_qualification_baselines reference_algorithms tests *.py +yapf -i -r -vv -p algoperf datasets prize_qualification_baselines reference_algorithms tests *.py ``` To sort all import orderings, run the following: @@ -247,7 +247,7 @@ isort . --check --diff To print out all offending pylint issues, run the following: ```bash -pylint algorithmic_efficiency +pylint algoperf pylint datasets pylint prize_qualification_baselines pylint reference_algorithms @@ -288,4 +288,4 @@ You can check what version `setuptools_scm` is creating by running `python -m se To create a new version, create a new release (and tag) in the GitHub UI. The package version is automatically updated to the new version. -Once the package is installed, the version can be accessed as the package attribute `algorithmic_efficiency.__version__`, i.e. via `python -c "import algorithmic_efficiency; print(algorithmic_efficiency.__version__)"`. +Once the package is installed, the version can be accessed as the package attribute `algoperf.__version__`, i.e. via `python -c "import algoperf; print(algoperf.__version__)"`. diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md index d04b247f3..63439cb09 100644 --- a/DOCUMENTATION.md +++ b/DOCUMENTATION.md @@ -641,4 +641,4 @@ That said, while submitting Adam with some novel heuristic to set various hyperp The JAX and PyTorch versions of the Criteo, FastMRI, Librispeech, OGBG, and WMT workloads use the same TensorFlow input pipelines. Due to differences in how JAX and PyTorch distribute computations across devices, the PyTorch workloads have an additional overhead for these workloads. Since we use PyTorch's [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel) implementation, there is one Python process for each device. Depending on the hardware and the settings of the cluster, running a TensorFlow input pipeline in each Python process can lead to errors, since too many threads are created in each process. See [this PR thread](https://github.com/mlcommons/algorithmic-efficiency/pull/85) for more details. -While this issue might not affect all setups, we currently implement a different strategy: we only run the TensorFlow input pipeline in one Python process (with `rank == 0`), and [broadcast](https://pytorch.org/docs/stable/distributed.html#torch.distributed.broadcast) the batches to all other devices. This introduces an additional communication overhead for each batch. See the [implementation for the WMT workload](https://github.com/mlcommons/algorithmic-efficiency/blob/main/algorithmic_efficiency/workloads/wmt/wmt_pytorch/workload.py#L215-L288) as an example. +While this issue might not affect all setups, we currently implement a different strategy: we only run the TensorFlow input pipeline in one Python process (with `rank == 0`), and [broadcast](https://pytorch.org/docs/stable/distributed.html#torch.distributed.broadcast) the batches to all other devices. This introduces an additional communication overhead for each batch. See the [implementation for the WMT workload](https://github.com/mlcommons/algorithmic-efficiency/blob/main/algoperf/workloads/wmt/wmt_pytorch/workload.py#L215-L288) as an example. diff --git a/GETTING_STARTED.md b/GETTING_STARTED.md index 7d53c35e2..a4b4460a6 100644 --- a/GETTING_STARTED.md +++ b/GETTING_STARTED.md @@ -58,7 +58,7 @@ To set up a virtual enviornment and install this repository cd algorithmic-efficiency ``` -3. Run the following pip3 install commands based on your chosen framework to install `algorithmic_efficiency` and its dependencies. +3. Run the following pip3 install commands based on your chosen framework to install `algoperf` and its dependencies. For **JAX**: diff --git a/algorithmic_efficiency/__init__.py b/algoperf/__init__.py similarity index 100% rename from algorithmic_efficiency/__init__.py rename to algoperf/__init__.py diff --git a/algorithmic_efficiency/checkpoint_utils.py b/algoperf/checkpoint_utils.py similarity index 98% rename from algorithmic_efficiency/checkpoint_utils.py rename to algoperf/checkpoint_utils.py index 29c1a821e..8d3fc5102 100644 --- a/algorithmic_efficiency/checkpoint_utils.py +++ b/algoperf/checkpoint_utils.py @@ -16,8 +16,8 @@ from tensorflow.io import gfile # pytype: disable=import-error import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup _, _, DEVICE, _ = pytorch_setup() CheckpointReturn = Tuple[spec.OptimizerState, diff --git a/algorithmic_efficiency/data_utils.py b/algoperf/data_utils.py similarity index 99% rename from algorithmic_efficiency/data_utils.py rename to algoperf/data_utils.py index 901f0b582..b09731fbe 100644 --- a/algorithmic_efficiency/data_utils.py +++ b/algoperf/data_utils.py @@ -11,7 +11,7 @@ from torch.utils.data import DistributedSampler from torch.utils.data import Sampler -from algorithmic_efficiency import spec +from algoperf import spec def shard_and_maybe_pad_np( diff --git a/algorithmic_efficiency/halton.py b/algoperf/halton.py similarity index 100% rename from algorithmic_efficiency/halton.py rename to algoperf/halton.py diff --git a/algorithmic_efficiency/init_utils.py b/algoperf/init_utils.py similarity index 100% rename from algorithmic_efficiency/init_utils.py rename to algoperf/init_utils.py diff --git a/algorithmic_efficiency/interop_utils.py b/algoperf/interop_utils.py similarity index 90% rename from algorithmic_efficiency/interop_utils.py rename to algoperf/interop_utils.py index e307042a9..0c6535d7a 100644 --- a/algorithmic_efficiency/interop_utils.py +++ b/algoperf/interop_utils.py @@ -1,7 +1,7 @@ import jax.dlpack import torch -from algorithmic_efficiency import spec +from algoperf import spec def jax_to_pytorch(x: spec.Tensor, take_ownership: bool = False) -> spec.Tensor: diff --git a/algorithmic_efficiency/logger_utils.py b/algoperf/logger_utils.py similarity index 99% rename from algorithmic_efficiency/logger_utils.py rename to algoperf/logger_utils.py index 609d996e6..37a8ab246 100644 --- a/algorithmic_efficiency/logger_utils.py +++ b/algoperf/logger_utils.py @@ -18,8 +18,8 @@ import psutil import torch.distributed as dist -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup USE_PYTORCH_DDP, RANK, DEVICE, _ = pytorch_setup() diff --git a/algorithmic_efficiency/param_utils.py b/algoperf/param_utils.py similarity index 99% rename from algorithmic_efficiency/param_utils.py rename to algoperf/param_utils.py index b430366b1..00fde1cce 100644 --- a/algorithmic_efficiency/param_utils.py +++ b/algoperf/param_utils.py @@ -6,7 +6,7 @@ import jax from torch import nn -from algorithmic_efficiency import spec +from algoperf import spec def pytorch_param_shapes(model: nn.Module) -> Dict[str, spec.ShapeTuple]: diff --git a/algorithmic_efficiency/profiler.py b/algoperf/profiler.py similarity index 100% rename from algorithmic_efficiency/profiler.py rename to algoperf/profiler.py diff --git a/algorithmic_efficiency/pytorch_utils.py b/algoperf/pytorch_utils.py similarity index 89% rename from algorithmic_efficiency/pytorch_utils.py rename to algoperf/pytorch_utils.py index 590f500fa..4a674985d 100644 --- a/algorithmic_efficiency/pytorch_utils.py +++ b/algoperf/pytorch_utils.py @@ -7,11 +7,11 @@ import torch import torch.distributed as dist -from algorithmic_efficiency import spec -from algorithmic_efficiency.profiler import Profiler -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.models import \ +from algoperf import spec +from algoperf.profiler import Profiler +from algoperf.workloads.librispeech_conformer.librispeech_pytorch.models import \ BatchNorm as ConformerBatchNorm -from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_pytorch.models import \ +from algoperf.workloads.librispeech_deepspeech.librispeech_pytorch.models import \ BatchNorm as DeepspeechBatchNorm diff --git a/algorithmic_efficiency/random_utils.py b/algoperf/random_utils.py similarity index 100% rename from algorithmic_efficiency/random_utils.py rename to algoperf/random_utils.py diff --git a/algorithmic_efficiency/spec.py b/algoperf/spec.py similarity index 100% rename from algorithmic_efficiency/spec.py rename to algoperf/spec.py diff --git a/algorithmic_efficiency/workloads/__init__.py b/algoperf/workloads/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/__init__.py rename to algoperf/workloads/__init__.py diff --git a/algorithmic_efficiency/workloads/cifar/__init__.py b/algoperf/workloads/cifar/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/cifar/__init__.py rename to algoperf/workloads/cifar/__init__.py diff --git a/algorithmic_efficiency/workloads/cifar/cifar_jax/__init__.py b/algoperf/workloads/cifar/cifar_jax/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/cifar/cifar_jax/__init__.py rename to algoperf/workloads/cifar/cifar_jax/__init__.py diff --git a/algorithmic_efficiency/workloads/cifar/cifar_jax/input_pipeline.py b/algoperf/workloads/cifar/cifar_jax/input_pipeline.py similarity index 98% rename from algorithmic_efficiency/workloads/cifar/cifar_jax/input_pipeline.py rename to algoperf/workloads/cifar/cifar_jax/input_pipeline.py index 3e6a68844..728d05f29 100644 --- a/algorithmic_efficiency/workloads/cifar/cifar_jax/input_pipeline.py +++ b/algoperf/workloads/cifar/cifar_jax/input_pipeline.py @@ -13,8 +13,8 @@ import tensorflow as tf import tensorflow_datasets as tfds -from algorithmic_efficiency import spec -from algorithmic_efficiency.data_utils import shard_and_maybe_pad_np +from algoperf import spec +from algoperf.data_utils import shard_and_maybe_pad_np def preprocess_for_train(image: spec.Tensor, diff --git a/algorithmic_efficiency/workloads/cifar/cifar_jax/models.py b/algoperf/workloads/cifar/cifar_jax/models.py similarity index 93% rename from algorithmic_efficiency/workloads/cifar/cifar_jax/models.py rename to algoperf/workloads/cifar/cifar_jax/models.py index 059352fb6..4d5df766e 100644 --- a/algorithmic_efficiency/workloads/cifar/cifar_jax/models.py +++ b/algoperf/workloads/cifar/cifar_jax/models.py @@ -10,8 +10,8 @@ from flax import linen as nn import jax.numpy as jnp -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.models import \ +from algoperf import spec +from algoperf.workloads.imagenet_resnet.imagenet_jax.models import \ ResNetBlock ModuleDef = nn.Module diff --git a/algorithmic_efficiency/workloads/cifar/cifar_jax/workload.py b/algoperf/workloads/cifar/cifar_jax/workload.py similarity index 95% rename from algorithmic_efficiency/workloads/cifar/cifar_jax/workload.py rename to algoperf/workloads/cifar/cifar_jax/workload.py index 8268c6ca3..f4bcffbc3 100644 --- a/algorithmic_efficiency/workloads/cifar/cifar_jax/workload.py +++ b/algoperf/workloads/cifar/cifar_jax/workload.py @@ -11,12 +11,12 @@ import optax import tensorflow_datasets as tfds -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.cifar.cifar_jax import models -from algorithmic_efficiency.workloads.cifar.cifar_jax.input_pipeline import \ +from algoperf import param_utils +from algoperf import spec +from algoperf.workloads.cifar.cifar_jax import models +from algoperf.workloads.cifar.cifar_jax.input_pipeline import \ create_input_iter -from algorithmic_efficiency.workloads.cifar.workload import BaseCifarWorkload +from algoperf.workloads.cifar.workload import BaseCifarWorkload class CifarWorkload(BaseCifarWorkload): diff --git a/algorithmic_efficiency/workloads/cifar/cifar_pytorch/__init__.py b/algoperf/workloads/cifar/cifar_pytorch/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/cifar/cifar_pytorch/__init__.py rename to algoperf/workloads/cifar/cifar_pytorch/__init__.py diff --git a/algorithmic_efficiency/workloads/cifar/cifar_pytorch/models.py b/algoperf/workloads/cifar/cifar_pytorch/models.py similarity index 92% rename from algorithmic_efficiency/workloads/cifar/cifar_pytorch/models.py rename to algoperf/workloads/cifar/cifar_pytorch/models.py index b592e10ab..393d568b9 100644 --- a/algorithmic_efficiency/workloads/cifar/cifar_pytorch/models.py +++ b/algoperf/workloads/cifar/cifar_pytorch/models.py @@ -10,13 +10,13 @@ import torch from torch import nn -from algorithmic_efficiency import spec -from algorithmic_efficiency.init_utils import pytorch_default_init -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.models import \ +from algoperf import spec +from algoperf.init_utils import pytorch_default_init +from algoperf.workloads.imagenet_resnet.imagenet_pytorch.models import \ BasicBlock -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.models import \ +from algoperf.workloads.imagenet_resnet.imagenet_pytorch.models import \ Bottleneck -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.models import \ +from algoperf.workloads.imagenet_resnet.imagenet_pytorch.models import \ conv1x1 diff --git a/algorithmic_efficiency/workloads/cifar/cifar_pytorch/workload.py b/algoperf/workloads/cifar/cifar_pytorch/workload.py similarity index 96% rename from algorithmic_efficiency/workloads/cifar/cifar_pytorch/workload.py rename to algoperf/workloads/cifar/cifar_pytorch/workload.py index 7abcf4d6c..2ba92f0b9 100644 --- a/algorithmic_efficiency/workloads/cifar/cifar_pytorch/workload.py +++ b/algoperf/workloads/cifar/cifar_pytorch/workload.py @@ -12,13 +12,13 @@ from torchvision import transforms from torchvision.datasets import CIFAR10 -from algorithmic_efficiency import data_utils -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import pytorch_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.cifar.cifar_pytorch.models import \ +from algoperf import data_utils +from algoperf import param_utils +from algoperf import pytorch_utils +from algoperf import spec +from algoperf.workloads.cifar.cifar_pytorch.models import \ resnet18 -from algorithmic_efficiency.workloads.cifar.workload import BaseCifarWorkload +from algoperf.workloads.cifar.workload import BaseCifarWorkload USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup() diff --git a/algorithmic_efficiency/workloads/cifar/workload.py b/algoperf/workloads/cifar/workload.py similarity index 97% rename from algorithmic_efficiency/workloads/cifar/workload.py rename to algoperf/workloads/cifar/workload.py index 9e36cb291..c0d565108 100644 --- a/algorithmic_efficiency/workloads/cifar/workload.py +++ b/algoperf/workloads/cifar/workload.py @@ -7,9 +7,9 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup -import algorithmic_efficiency.random_utils as prng +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup +import algoperf.random_utils as prng USE_PYTORCH_DDP, _, _, _ = pytorch_setup() diff --git a/algorithmic_efficiency/workloads/criteo1tb/__init__.py b/algoperf/workloads/criteo1tb/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/criteo1tb/__init__.py rename to algoperf/workloads/criteo1tb/__init__.py diff --git a/algorithmic_efficiency/workloads/criteo1tb/criteo1tb_jax/__init__.py b/algoperf/workloads/criteo1tb/criteo1tb_jax/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/criteo1tb/criteo1tb_jax/__init__.py rename to algoperf/workloads/criteo1tb/criteo1tb_jax/__init__.py diff --git a/algorithmic_efficiency/workloads/criteo1tb/criteo1tb_jax/models.py b/algoperf/workloads/criteo1tb/criteo1tb_jax/models.py similarity index 100% rename from algorithmic_efficiency/workloads/criteo1tb/criteo1tb_jax/models.py rename to algoperf/workloads/criteo1tb/criteo1tb_jax/models.py diff --git a/algorithmic_efficiency/workloads/criteo1tb/criteo1tb_jax/workload.py b/algoperf/workloads/criteo1tb/criteo1tb_jax/workload.py similarity index 96% rename from algorithmic_efficiency/workloads/criteo1tb/criteo1tb_jax/workload.py rename to algoperf/workloads/criteo1tb/criteo1tb_jax/workload.py index 3743dc1ff..91761e458 100644 --- a/algorithmic_efficiency/workloads/criteo1tb/criteo1tb_jax/workload.py +++ b/algoperf/workloads/criteo1tb/criteo1tb_jax/workload.py @@ -8,10 +8,10 @@ import jax.numpy as jnp import numpy as np -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_jax import models -from algorithmic_efficiency.workloads.criteo1tb.workload import \ +from algoperf import param_utils +from algoperf import spec +from algoperf.workloads.criteo1tb.criteo1tb_jax import models +from algoperf.workloads.criteo1tb.workload import \ BaseCriteo1TbDlrmSmallWorkload diff --git a/algorithmic_efficiency/workloads/criteo1tb/criteo1tb_pytorch/__init__.py b/algoperf/workloads/criteo1tb/criteo1tb_pytorch/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/criteo1tb/criteo1tb_pytorch/__init__.py rename to algoperf/workloads/criteo1tb/criteo1tb_pytorch/__init__.py diff --git a/algorithmic_efficiency/workloads/criteo1tb/criteo1tb_pytorch/models.py b/algoperf/workloads/criteo1tb/criteo1tb_pytorch/models.py similarity index 100% rename from algorithmic_efficiency/workloads/criteo1tb/criteo1tb_pytorch/models.py rename to algoperf/workloads/criteo1tb/criteo1tb_pytorch/models.py diff --git a/algorithmic_efficiency/workloads/criteo1tb/criteo1tb_pytorch/workload.py b/algoperf/workloads/criteo1tb/criteo1tb_pytorch/workload.py similarity index 97% rename from algorithmic_efficiency/workloads/criteo1tb/criteo1tb_pytorch/workload.py rename to algoperf/workloads/criteo1tb/criteo1tb_pytorch/workload.py index 446267440..726aa8705 100644 --- a/algorithmic_efficiency/workloads/criteo1tb/criteo1tb_pytorch/workload.py +++ b/algoperf/workloads/criteo1tb/criteo1tb_pytorch/workload.py @@ -7,11 +7,11 @@ import torch.distributed as dist from torch.nn.parallel import DistributedDataParallel as DDP -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup -from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_pytorch import models -from algorithmic_efficiency.workloads.criteo1tb.workload import \ +from algoperf import param_utils +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup +from algoperf.workloads.criteo1tb.criteo1tb_pytorch import models +from algoperf.workloads.criteo1tb.workload import \ BaseCriteo1TbDlrmSmallWorkload USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_setup() diff --git a/algorithmic_efficiency/workloads/criteo1tb/input_pipeline.py b/algoperf/workloads/criteo1tb/input_pipeline.py similarity index 98% rename from algorithmic_efficiency/workloads/criteo1tb/input_pipeline.py rename to algoperf/workloads/criteo1tb/input_pipeline.py index cb091b3a5..7e254336a 100644 --- a/algorithmic_efficiency/workloads/criteo1tb/input_pipeline.py +++ b/algoperf/workloads/criteo1tb/input_pipeline.py @@ -12,7 +12,7 @@ import tensorflow as tf -from algorithmic_efficiency import data_utils +from algoperf import data_utils _NUM_DAY_23_FILES = 36 diff --git a/algorithmic_efficiency/workloads/criteo1tb/workload.py b/algoperf/workloads/criteo1tb/workload.py similarity index 97% rename from algorithmic_efficiency/workloads/criteo1tb/workload.py rename to algoperf/workloads/criteo1tb/workload.py index f18f2656f..80ec9d67a 100644 --- a/algorithmic_efficiency/workloads/criteo1tb/workload.py +++ b/algoperf/workloads/criteo1tb/workload.py @@ -7,8 +7,8 @@ from absl import flags import torch.distributed as dist -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.criteo1tb import input_pipeline +from algoperf import spec +from algoperf.workloads.criteo1tb import input_pipeline FLAGS = flags.FLAGS diff --git a/algorithmic_efficiency/workloads/fastmri/__init__.py b/algoperf/workloads/fastmri/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/fastmri/__init__.py rename to algoperf/workloads/fastmri/__init__.py diff --git a/algorithmic_efficiency/workloads/fastmri/fastmri_jax/__init__.py b/algoperf/workloads/fastmri/fastmri_jax/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/fastmri/fastmri_jax/__init__.py rename to algoperf/workloads/fastmri/fastmri_jax/__init__.py diff --git a/algorithmic_efficiency/workloads/fastmri/fastmri_jax/models.py b/algoperf/workloads/fastmri/fastmri_jax/models.py similarity index 100% rename from algorithmic_efficiency/workloads/fastmri/fastmri_jax/models.py rename to algoperf/workloads/fastmri/fastmri_jax/models.py diff --git a/algorithmic_efficiency/workloads/fastmri/fastmri_jax/ssim.py b/algoperf/workloads/fastmri/fastmri_jax/ssim.py similarity index 100% rename from algorithmic_efficiency/workloads/fastmri/fastmri_jax/ssim.py rename to algoperf/workloads/fastmri/fastmri_jax/ssim.py diff --git a/algorithmic_efficiency/workloads/fastmri/fastmri_jax/workload.py b/algoperf/workloads/fastmri/fastmri_jax/workload.py similarity index 95% rename from algorithmic_efficiency/workloads/fastmri/fastmri_jax/workload.py rename to algoperf/workloads/fastmri/fastmri_jax/workload.py index a5dfe8c22..393aa19d7 100644 --- a/algorithmic_efficiency/workloads/fastmri/fastmri_jax/workload.py +++ b/algoperf/workloads/fastmri/fastmri_jax/workload.py @@ -8,12 +8,12 @@ import jax import jax.numpy as jnp -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import spec -import algorithmic_efficiency.random_utils as prng -from algorithmic_efficiency.workloads.fastmri.fastmri_jax.models import UNet -from algorithmic_efficiency.workloads.fastmri.fastmri_jax.ssim import ssim -from algorithmic_efficiency.workloads.fastmri.workload import \ +from algoperf import param_utils +from algoperf import spec +import algoperf.random_utils as prng +from algoperf.workloads.fastmri.fastmri_jax.models import UNet +from algoperf.workloads.fastmri.fastmri_jax.ssim import ssim +from algoperf.workloads.fastmri.workload import \ BaseFastMRIWorkload diff --git a/algorithmic_efficiency/workloads/fastmri/fastmri_pytorch/__init__.py b/algoperf/workloads/fastmri/fastmri_pytorch/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/fastmri/fastmri_pytorch/__init__.py rename to algoperf/workloads/fastmri/fastmri_pytorch/__init__.py diff --git a/algorithmic_efficiency/workloads/fastmri/fastmri_pytorch/models.py b/algoperf/workloads/fastmri/fastmri_pytorch/models.py similarity index 99% rename from algorithmic_efficiency/workloads/fastmri/fastmri_pytorch/models.py rename to algoperf/workloads/fastmri/fastmri_pytorch/models.py index 6c0ab19e2..28f20bf20 100644 --- a/algorithmic_efficiency/workloads/fastmri/fastmri_pytorch/models.py +++ b/algoperf/workloads/fastmri/fastmri_pytorch/models.py @@ -12,7 +12,7 @@ from torch import Tensor from torch.nn import functional as F -from algorithmic_efficiency import init_utils +from algoperf import init_utils class UNet(nn.Module): diff --git a/algorithmic_efficiency/workloads/fastmri/fastmri_pytorch/ssim.py b/algoperf/workloads/fastmri/fastmri_pytorch/ssim.py similarity index 98% rename from algorithmic_efficiency/workloads/fastmri/fastmri_pytorch/ssim.py rename to algoperf/workloads/fastmri/fastmri_pytorch/ssim.py index eff6fb62f..45b61bea4 100644 --- a/algorithmic_efficiency/workloads/fastmri/fastmri_pytorch/ssim.py +++ b/algoperf/workloads/fastmri/fastmri_pytorch/ssim.py @@ -6,7 +6,7 @@ import torch.nn.functional as F from torchvision.transforms.functional import pad as pad_fn -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf.pytorch_utils import pytorch_setup DEVICE = pytorch_setup()[2] diff --git a/algorithmic_efficiency/workloads/fastmri/fastmri_pytorch/workload.py b/algoperf/workloads/fastmri/fastmri_pytorch/workload.py similarity index 96% rename from algorithmic_efficiency/workloads/fastmri/fastmri_pytorch/workload.py rename to algoperf/workloads/fastmri/fastmri_pytorch/workload.py index 74f6aa13d..f40654678 100644 --- a/algorithmic_efficiency/workloads/fastmri/fastmri_pytorch/workload.py +++ b/algoperf/workloads/fastmri/fastmri_pytorch/workload.py @@ -9,14 +9,14 @@ import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import pytorch_utils -from algorithmic_efficiency import spec -import algorithmic_efficiency.random_utils as prng -from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.models import \ +from algoperf import param_utils +from algoperf import pytorch_utils +from algoperf import spec +import algoperf.random_utils as prng +from algoperf.workloads.fastmri.fastmri_pytorch.models import \ UNet -from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.ssim import ssim -from algorithmic_efficiency.workloads.fastmri.workload import \ +from algoperf.workloads.fastmri.fastmri_pytorch.ssim import ssim +from algoperf.workloads.fastmri.workload import \ BaseFastMRIWorkload USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup() diff --git a/algorithmic_efficiency/workloads/fastmri/input_pipeline.py b/algoperf/workloads/fastmri/input_pipeline.py similarity index 99% rename from algorithmic_efficiency/workloads/fastmri/input_pipeline.py rename to algoperf/workloads/fastmri/input_pipeline.py index 8f6ddafd1..f20611f43 100644 --- a/algorithmic_efficiency/workloads/fastmri/input_pipeline.py +++ b/algoperf/workloads/fastmri/input_pipeline.py @@ -9,7 +9,7 @@ import jax import tensorflow as tf -from algorithmic_efficiency import data_utils +from algoperf import data_utils _TRAIN_DIR = 'knee_singlecoil_train' _VAL_DIR = 'knee_singlecoil_val' diff --git a/algorithmic_efficiency/workloads/fastmri/workload.py b/algoperf/workloads/fastmri/workload.py similarity index 96% rename from algorithmic_efficiency/workloads/fastmri/workload.py rename to algoperf/workloads/fastmri/workload.py index a8fd1abbb..e9a2a313a 100644 --- a/algorithmic_efficiency/workloads/fastmri/workload.py +++ b/algoperf/workloads/fastmri/workload.py @@ -3,8 +3,8 @@ import math from typing import Optional -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.fastmri import input_pipeline +from algoperf import spec +from algoperf.workloads.fastmri import input_pipeline class BaseFastMRIWorkload(spec.Workload): diff --git a/algorithmic_efficiency/workloads/imagenet_resnet/__init__.py b/algoperf/workloads/imagenet_resnet/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/imagenet_resnet/__init__.py rename to algoperf/workloads/imagenet_resnet/__init__.py diff --git a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_jax/__init__.py b/algoperf/workloads/imagenet_resnet/imagenet_jax/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/imagenet_resnet/imagenet_jax/__init__.py rename to algoperf/workloads/imagenet_resnet/imagenet_jax/__init__.py diff --git a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_jax/input_pipeline.py b/algoperf/workloads/imagenet_resnet/imagenet_jax/input_pipeline.py similarity index 98% rename from algorithmic_efficiency/workloads/imagenet_resnet/imagenet_jax/input_pipeline.py rename to algoperf/workloads/imagenet_resnet/imagenet_jax/input_pipeline.py index 422eb9f7a..709a318c2 100644 --- a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_jax/input_pipeline.py +++ b/algoperf/workloads/imagenet_resnet/imagenet_jax/input_pipeline.py @@ -12,9 +12,9 @@ import tensorflow as tf import tensorflow_datasets as tfds -from algorithmic_efficiency import data_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax import \ +from algoperf import data_utils +from algoperf import spec +from algoperf.workloads.imagenet_resnet.imagenet_jax import \ randaugment TFDS_SPLIT_NAME = { diff --git a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_jax/models.py b/algoperf/workloads/imagenet_resnet/imagenet_jax/models.py similarity index 98% rename from algorithmic_efficiency/workloads/imagenet_resnet/imagenet_jax/models.py rename to algoperf/workloads/imagenet_resnet/imagenet_jax/models.py index 34cd17440..ffa60b260 100644 --- a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_jax/models.py +++ b/algoperf/workloads/imagenet_resnet/imagenet_jax/models.py @@ -10,7 +10,7 @@ from flax import linen as nn import jax.numpy as jnp -from algorithmic_efficiency import spec +from algoperf import spec ModuleDef = nn.Module diff --git a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_jax/randaugment.py b/algoperf/workloads/imagenet_resnet/imagenet_jax/randaugment.py similarity index 100% rename from algorithmic_efficiency/workloads/imagenet_resnet/imagenet_jax/randaugment.py rename to algoperf/workloads/imagenet_resnet/imagenet_jax/randaugment.py diff --git a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_jax/workload.py b/algoperf/workloads/imagenet_resnet/imagenet_jax/workload.py similarity index 95% rename from algorithmic_efficiency/workloads/imagenet_resnet/imagenet_jax/workload.py rename to algoperf/workloads/imagenet_resnet/imagenet_jax/workload.py index 2747fc2db..b445e9f00 100644 --- a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_jax/workload.py +++ b/algoperf/workloads/imagenet_resnet/imagenet_jax/workload.py @@ -17,15 +17,15 @@ import optax import tensorflow_datasets as tfds -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import random_utils as prng -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.imagenet_resnet import imagenet_v2 -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax import \ +from algoperf import param_utils +from algoperf import random_utils as prng +from algoperf import spec +from algoperf.workloads.imagenet_resnet import imagenet_v2 +from algoperf.workloads.imagenet_resnet.imagenet_jax import \ input_pipeline -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax import \ +from algoperf.workloads.imagenet_resnet.imagenet_jax import \ models -from algorithmic_efficiency.workloads.imagenet_resnet.workload import \ +from algoperf.workloads.imagenet_resnet.workload import \ BaseImagenetResNetWorkload diff --git a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_pytorch/__init__.py b/algoperf/workloads/imagenet_resnet/imagenet_pytorch/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/imagenet_resnet/imagenet_pytorch/__init__.py rename to algoperf/workloads/imagenet_resnet/imagenet_pytorch/__init__.py diff --git a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_pytorch/models.py b/algoperf/workloads/imagenet_resnet/imagenet_pytorch/models.py similarity index 98% rename from algorithmic_efficiency/workloads/imagenet_resnet/imagenet_pytorch/models.py rename to algoperf/workloads/imagenet_resnet/imagenet_pytorch/models.py index 2b9093940..aba9e671f 100644 --- a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_pytorch/models.py +++ b/algoperf/workloads/imagenet_resnet/imagenet_pytorch/models.py @@ -11,8 +11,8 @@ from torch import nn from torch import Tensor -from algorithmic_efficiency import spec -from algorithmic_efficiency.init_utils import pytorch_default_init +from algoperf import spec +from algoperf.init_utils import pytorch_default_init def conv3x3(in_planes: int, diff --git a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_pytorch/randaugment.py b/algoperf/workloads/imagenet_resnet/imagenet_pytorch/randaugment.py similarity index 99% rename from algorithmic_efficiency/workloads/imagenet_resnet/imagenet_pytorch/randaugment.py rename to algoperf/workloads/imagenet_resnet/imagenet_pytorch/randaugment.py index 829d82d74..c7a98e77a 100644 --- a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_pytorch/randaugment.py +++ b/algoperf/workloads/imagenet_resnet/imagenet_pytorch/randaugment.py @@ -14,7 +14,7 @@ from torchvision.transforms import functional as F from torchvision.transforms import InterpolationMode -from algorithmic_efficiency import spec +from algoperf import spec def cutout(img: spec.Tensor, pad_size: int) -> spec.Tensor: diff --git a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_pytorch/workload.py b/algoperf/workloads/imagenet_resnet/imagenet_pytorch/workload.py similarity index 95% rename from algorithmic_efficiency/workloads/imagenet_resnet/imagenet_pytorch/workload.py rename to algoperf/workloads/imagenet_resnet/imagenet_pytorch/workload.py index 3549911fa..7a08f325e 100644 --- a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_pytorch/workload.py +++ b/algoperf/workloads/imagenet_resnet/imagenet_pytorch/workload.py @@ -16,17 +16,17 @@ from torchvision import transforms from torchvision.datasets.folder import ImageFolder -from algorithmic_efficiency import data_utils -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import pytorch_utils -from algorithmic_efficiency import spec -import algorithmic_efficiency.random_utils as prng -from algorithmic_efficiency.workloads.imagenet_resnet import imagenet_v2 -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch import \ +from algoperf import data_utils +from algoperf import param_utils +from algoperf import pytorch_utils +from algoperf import spec +import algoperf.random_utils as prng +from algoperf.workloads.imagenet_resnet import imagenet_v2 +from algoperf.workloads.imagenet_resnet.imagenet_pytorch import \ randaugment -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.models import \ +from algoperf.workloads.imagenet_resnet.imagenet_pytorch.models import \ resnet50 -from algorithmic_efficiency.workloads.imagenet_resnet.workload import \ +from algoperf.workloads.imagenet_resnet.workload import \ BaseImagenetResNetWorkload USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup() diff --git a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_v2.py b/algoperf/workloads/imagenet_resnet/imagenet_v2.py similarity index 90% rename from algorithmic_efficiency/workloads/imagenet_resnet/imagenet_v2.py rename to algoperf/workloads/imagenet_resnet/imagenet_v2.py index 05ab12eb1..f63ddbc34 100644 --- a/algorithmic_efficiency/workloads/imagenet_resnet/imagenet_v2.py +++ b/algoperf/workloads/imagenet_resnet/imagenet_v2.py @@ -8,9 +8,9 @@ import tensorflow_datasets as tfds -from algorithmic_efficiency import data_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax import \ +from algoperf import data_utils +from algoperf import spec +from algoperf.workloads.imagenet_resnet.imagenet_jax import \ input_pipeline diff --git a/algorithmic_efficiency/workloads/imagenet_resnet/workload.py b/algoperf/workloads/imagenet_resnet/workload.py similarity index 99% rename from algorithmic_efficiency/workloads/imagenet_resnet/workload.py rename to algoperf/workloads/imagenet_resnet/workload.py index 2e06805f7..8b3393ded 100644 --- a/algorithmic_efficiency/workloads/imagenet_resnet/workload.py +++ b/algoperf/workloads/imagenet_resnet/workload.py @@ -3,7 +3,7 @@ import math from typing import Dict, Iterator, Optional, Tuple -from algorithmic_efficiency import spec +from algoperf import spec class BaseImagenetResNetWorkload(spec.Workload): diff --git a/algorithmic_efficiency/workloads/imagenet_vit/__init__.py b/algoperf/workloads/imagenet_vit/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/imagenet_vit/__init__.py rename to algoperf/workloads/imagenet_vit/__init__.py diff --git a/algorithmic_efficiency/workloads/imagenet_vit/imagenet_jax/__init__.py b/algoperf/workloads/imagenet_vit/imagenet_jax/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/imagenet_vit/imagenet_jax/__init__.py rename to algoperf/workloads/imagenet_vit/imagenet_jax/__init__.py diff --git a/algorithmic_efficiency/workloads/imagenet_vit/imagenet_jax/models.py b/algoperf/workloads/imagenet_vit/imagenet_jax/models.py similarity index 99% rename from algorithmic_efficiency/workloads/imagenet_vit/imagenet_jax/models.py rename to algoperf/workloads/imagenet_vit/imagenet_jax/models.py index 639800b44..cfa104b53 100644 --- a/algorithmic_efficiency/workloads/imagenet_vit/imagenet_jax/models.py +++ b/algoperf/workloads/imagenet_vit/imagenet_jax/models.py @@ -10,7 +10,7 @@ from flax import linen as nn import jax.numpy as jnp -from algorithmic_efficiency import spec +from algoperf import spec def posemb_sincos_2d(h: int, diff --git a/algorithmic_efficiency/workloads/imagenet_vit/imagenet_jax/workload.py b/algoperf/workloads/imagenet_vit/imagenet_jax/workload.py similarity index 91% rename from algorithmic_efficiency/workloads/imagenet_vit/imagenet_jax/workload.py rename to algoperf/workloads/imagenet_vit/imagenet_jax/workload.py index 2ad71ffd0..2261aac6d 100644 --- a/algorithmic_efficiency/workloads/imagenet_vit/imagenet_jax/workload.py +++ b/algoperf/workloads/imagenet_vit/imagenet_jax/workload.py @@ -7,14 +7,14 @@ import jax import jax.numpy as jnp -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.workload import \ +from algoperf import param_utils +from algoperf import spec +from algoperf.workloads.imagenet_resnet.imagenet_jax.workload import \ ImagenetResNetWorkload -from algorithmic_efficiency.workloads.imagenet_vit.imagenet_jax import models -from algorithmic_efficiency.workloads.imagenet_vit.workload import \ +from algoperf.workloads.imagenet_vit.imagenet_jax import models +from algoperf.workloads.imagenet_vit.workload import \ BaseImagenetVitWorkload -from algorithmic_efficiency.workloads.imagenet_vit.workload import \ +from algoperf.workloads.imagenet_vit.workload import \ decode_variant diff --git a/algorithmic_efficiency/workloads/imagenet_vit/imagenet_pytorch/__init__.py b/algoperf/workloads/imagenet_vit/imagenet_pytorch/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/imagenet_vit/imagenet_pytorch/__init__.py rename to algoperf/workloads/imagenet_vit/imagenet_pytorch/__init__.py diff --git a/algorithmic_efficiency/workloads/imagenet_vit/imagenet_pytorch/models.py b/algoperf/workloads/imagenet_vit/imagenet_pytorch/models.py similarity index 98% rename from algorithmic_efficiency/workloads/imagenet_vit/imagenet_pytorch/models.py rename to algoperf/workloads/imagenet_vit/imagenet_pytorch/models.py index 02d708da8..4fac8bd35 100644 --- a/algorithmic_efficiency/workloads/imagenet_vit/imagenet_pytorch/models.py +++ b/algoperf/workloads/imagenet_vit/imagenet_pytorch/models.py @@ -12,9 +12,9 @@ from torch import nn import torch.nn.functional as F -from algorithmic_efficiency import init_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.wmt.wmt_pytorch.models import \ +from algoperf import init_utils +from algoperf import spec +from algoperf.workloads.wmt.wmt_pytorch.models import \ MultiheadAttention diff --git a/algorithmic_efficiency/workloads/imagenet_vit/imagenet_pytorch/workload.py b/algoperf/workloads/imagenet_vit/imagenet_pytorch/workload.py similarity index 87% rename from algorithmic_efficiency/workloads/imagenet_vit/imagenet_pytorch/workload.py rename to algoperf/workloads/imagenet_vit/imagenet_pytorch/workload.py index 703d40b07..20b294b47 100644 --- a/algorithmic_efficiency/workloads/imagenet_vit/imagenet_pytorch/workload.py +++ b/algoperf/workloads/imagenet_vit/imagenet_pytorch/workload.py @@ -6,16 +6,16 @@ import torch from torch.nn.parallel import DistributedDataParallel as DDP -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import pytorch_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.workload import \ +from algoperf import param_utils +from algoperf import pytorch_utils +from algoperf import spec +from algoperf.workloads.imagenet_resnet.imagenet_pytorch.workload import \ ImagenetResNetWorkload -from algorithmic_efficiency.workloads.imagenet_vit.imagenet_pytorch import \ +from algoperf.workloads.imagenet_vit.imagenet_pytorch import \ models -from algorithmic_efficiency.workloads.imagenet_vit.workload import \ +from algoperf.workloads.imagenet_vit.workload import \ BaseImagenetVitWorkload -from algorithmic_efficiency.workloads.imagenet_vit.workload import \ +from algoperf.workloads.imagenet_vit.workload import \ decode_variant USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup() diff --git a/algorithmic_efficiency/workloads/imagenet_vit/workload.py b/algoperf/workloads/imagenet_vit/workload.py similarity index 96% rename from algorithmic_efficiency/workloads/imagenet_vit/workload.py rename to algoperf/workloads/imagenet_vit/workload.py index ed0118ca0..7f06715a3 100644 --- a/algorithmic_efficiency/workloads/imagenet_vit/workload.py +++ b/algoperf/workloads/imagenet_vit/workload.py @@ -2,8 +2,8 @@ from typing import Dict, Iterator, Optional -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.imagenet_resnet.workload import \ +from algoperf import spec +from algoperf.workloads.imagenet_resnet.workload import \ BaseImagenetResNetWorkload diff --git a/algorithmic_efficiency/workloads/librispeech_conformer/__init__.py b/algoperf/workloads/librispeech_conformer/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/librispeech_conformer/__init__.py rename to algoperf/workloads/librispeech_conformer/__init__.py diff --git a/algorithmic_efficiency/workloads/librispeech_conformer/input_pipeline.py b/algoperf/workloads/librispeech_conformer/input_pipeline.py similarity index 100% rename from algorithmic_efficiency/workloads/librispeech_conformer/input_pipeline.py rename to algoperf/workloads/librispeech_conformer/input_pipeline.py diff --git a/algorithmic_efficiency/workloads/librispeech_conformer/librispeech_jax/__init__.py b/algoperf/workloads/librispeech_conformer/librispeech_jax/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/librispeech_conformer/librispeech_jax/__init__.py rename to algoperf/workloads/librispeech_conformer/librispeech_jax/__init__.py diff --git a/algorithmic_efficiency/workloads/librispeech_conformer/librispeech_jax/librispeech_preprocessor.py b/algoperf/workloads/librispeech_conformer/librispeech_jax/librispeech_preprocessor.py similarity index 100% rename from algorithmic_efficiency/workloads/librispeech_conformer/librispeech_jax/librispeech_preprocessor.py rename to algoperf/workloads/librispeech_conformer/librispeech_jax/librispeech_preprocessor.py diff --git a/algorithmic_efficiency/workloads/librispeech_conformer/librispeech_jax/models.py b/algoperf/workloads/librispeech_conformer/librispeech_jax/models.py similarity index 99% rename from algorithmic_efficiency/workloads/librispeech_conformer/librispeech_jax/models.py rename to algoperf/workloads/librispeech_conformer/librispeech_jax/models.py index cb6287c5e..adb5e803c 100644 --- a/algorithmic_efficiency/workloads/librispeech_conformer/librispeech_jax/models.py +++ b/algoperf/workloads/librispeech_conformer/librispeech_jax/models.py @@ -22,9 +22,9 @@ import jax.numpy as jnp import numpy as np -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax import \ +from algoperf.workloads.librispeech_conformer.librispeech_jax import \ librispeech_preprocessor as preprocessor -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax import \ +from algoperf.workloads.librispeech_conformer.librispeech_jax import \ spectrum_augmenter diff --git a/algorithmic_efficiency/workloads/librispeech_conformer/librispeech_jax/spectrum_augmenter.py b/algoperf/workloads/librispeech_conformer/librispeech_jax/spectrum_augmenter.py similarity index 100% rename from algorithmic_efficiency/workloads/librispeech_conformer/librispeech_jax/spectrum_augmenter.py rename to algoperf/workloads/librispeech_conformer/librispeech_jax/spectrum_augmenter.py diff --git a/algorithmic_efficiency/workloads/librispeech_conformer/librispeech_jax/workload.py b/algoperf/workloads/librispeech_conformer/librispeech_jax/workload.py similarity index 96% rename from algorithmic_efficiency/workloads/librispeech_conformer/librispeech_jax/workload.py rename to algoperf/workloads/librispeech_conformer/librispeech_jax/workload.py index e362f973b..b4fdb0811 100644 --- a/algorithmic_efficiency/workloads/librispeech_conformer/librispeech_jax/workload.py +++ b/algoperf/workloads/librispeech_conformer/librispeech_jax/workload.py @@ -11,14 +11,14 @@ import optax import torch -from algorithmic_efficiency import data_utils -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.librispeech_conformer import metrics -from algorithmic_efficiency.workloads.librispeech_conformer import workload -from algorithmic_efficiency.workloads.librispeech_conformer.input_pipeline import \ +from algoperf import data_utils +from algoperf import param_utils +from algoperf import spec +from algoperf.workloads.librispeech_conformer import metrics +from algoperf.workloads.librispeech_conformer import workload +from algoperf.workloads.librispeech_conformer.input_pipeline import \ LibriSpeechDataset -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax import \ +from algoperf.workloads.librispeech_conformer.librispeech_jax import \ models diff --git a/algorithmic_efficiency/workloads/librispeech_conformer/librispeech_pytorch/__init__.py b/algoperf/workloads/librispeech_conformer/librispeech_pytorch/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/librispeech_conformer/librispeech_pytorch/__init__.py rename to algoperf/workloads/librispeech_conformer/librispeech_pytorch/__init__.py diff --git a/algorithmic_efficiency/workloads/librispeech_conformer/librispeech_pytorch/models.py b/algoperf/workloads/librispeech_conformer/librispeech_pytorch/models.py similarity index 98% rename from algorithmic_efficiency/workloads/librispeech_conformer/librispeech_pytorch/models.py rename to algoperf/workloads/librispeech_conformer/librispeech_pytorch/models.py index 61400806a..db1e24521 100644 --- a/algorithmic_efficiency/workloads/librispeech_conformer/librispeech_pytorch/models.py +++ b/algoperf/workloads/librispeech_conformer/librispeech_pytorch/models.py @@ -12,9 +12,9 @@ from torch.nn import init import torch.nn.functional as F -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch import \ +from algoperf.workloads.librispeech_conformer.librispeech_pytorch import \ preprocessor -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.spectrum_augmenter import \ +from algoperf.workloads.librispeech_conformer.librispeech_pytorch.spectrum_augmenter import \ SpecAug diff --git a/algorithmic_efficiency/workloads/librispeech_conformer/librispeech_pytorch/preprocessor.py b/algoperf/workloads/librispeech_conformer/librispeech_pytorch/preprocessor.py similarity index 100% rename from algorithmic_efficiency/workloads/librispeech_conformer/librispeech_pytorch/preprocessor.py rename to algoperf/workloads/librispeech_conformer/librispeech_pytorch/preprocessor.py diff --git a/algorithmic_efficiency/workloads/librispeech_conformer/librispeech_pytorch/spectrum_augmenter.py b/algoperf/workloads/librispeech_conformer/librispeech_pytorch/spectrum_augmenter.py similarity index 100% rename from algorithmic_efficiency/workloads/librispeech_conformer/librispeech_pytorch/spectrum_augmenter.py rename to algoperf/workloads/librispeech_conformer/librispeech_pytorch/spectrum_augmenter.py diff --git a/algorithmic_efficiency/workloads/librispeech_conformer/librispeech_pytorch/workload.py b/algoperf/workloads/librispeech_conformer/librispeech_pytorch/workload.py similarity index 95% rename from algorithmic_efficiency/workloads/librispeech_conformer/librispeech_pytorch/workload.py rename to algoperf/workloads/librispeech_conformer/librispeech_pytorch/workload.py index 155b30920..592e63989 100644 --- a/algorithmic_efficiency/workloads/librispeech_conformer/librispeech_pytorch/workload.py +++ b/algoperf/workloads/librispeech_conformer/librispeech_pytorch/workload.py @@ -10,16 +10,16 @@ import torch.distributed as dist from torch.nn.parallel import DistributedDataParallel as DDP -from algorithmic_efficiency import data_utils -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import pytorch_utils -from algorithmic_efficiency import spec -import algorithmic_efficiency.random_utils as prng -from algorithmic_efficiency.workloads.librispeech_conformer import metrics -from algorithmic_efficiency.workloads.librispeech_conformer import workload -from algorithmic_efficiency.workloads.librispeech_conformer.input_pipeline import \ +from algoperf import data_utils +from algoperf import param_utils +from algoperf import pytorch_utils +from algoperf import spec +import algoperf.random_utils as prng +from algoperf.workloads.librispeech_conformer import metrics +from algoperf.workloads.librispeech_conformer import workload +from algoperf.workloads.librispeech_conformer.input_pipeline import \ LibriSpeechDataset -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch import \ +from algoperf.workloads.librispeech_conformer.librispeech_pytorch import \ models USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup() diff --git a/algorithmic_efficiency/workloads/librispeech_conformer/metrics.py b/algoperf/workloads/librispeech_conformer/metrics.py similarity index 100% rename from algorithmic_efficiency/workloads/librispeech_conformer/metrics.py rename to algoperf/workloads/librispeech_conformer/metrics.py diff --git a/algorithmic_efficiency/workloads/librispeech_conformer/workload.py b/algoperf/workloads/librispeech_conformer/workload.py similarity index 98% rename from algorithmic_efficiency/workloads/librispeech_conformer/workload.py rename to algoperf/workloads/librispeech_conformer/workload.py index c2413c076..c9f5a3c59 100644 --- a/algorithmic_efficiency/workloads/librispeech_conformer/workload.py +++ b/algoperf/workloads/librispeech_conformer/workload.py @@ -1,7 +1,7 @@ import math from typing import Dict -from algorithmic_efficiency import spec +from algoperf import spec class BaseLibrispeechWorkload(spec.Workload): diff --git a/algorithmic_efficiency/workloads/librispeech_deepspeech/__init__.py b/algoperf/workloads/librispeech_deepspeech/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/librispeech_deepspeech/__init__.py rename to algoperf/workloads/librispeech_deepspeech/__init__.py diff --git a/algorithmic_efficiency/workloads/librispeech_deepspeech/librispeech_jax/__init__.py b/algoperf/workloads/librispeech_deepspeech/librispeech_jax/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/librispeech_deepspeech/librispeech_jax/__init__.py rename to algoperf/workloads/librispeech_deepspeech/librispeech_jax/__init__.py diff --git a/algorithmic_efficiency/workloads/librispeech_deepspeech/librispeech_jax/models.py b/algoperf/workloads/librispeech_deepspeech/librispeech_jax/models.py similarity index 99% rename from algorithmic_efficiency/workloads/librispeech_deepspeech/librispeech_jax/models.py rename to algoperf/workloads/librispeech_deepspeech/librispeech_jax/models.py index f9eb732e9..b116f44cd 100644 --- a/algorithmic_efficiency/workloads/librispeech_deepspeech/librispeech_jax/models.py +++ b/algoperf/workloads/librispeech_deepspeech/librispeech_jax/models.py @@ -16,9 +16,9 @@ from jax.experimental import rnn import jax.numpy as jnp -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax import \ +from algoperf.workloads.librispeech_conformer.librispeech_jax import \ librispeech_preprocessor as preprocessor -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax import \ +from algoperf.workloads.librispeech_conformer.librispeech_jax import \ spectrum_augmenter Array = jnp.ndarray diff --git a/algorithmic_efficiency/workloads/librispeech_deepspeech/librispeech_jax/workload.py b/algoperf/workloads/librispeech_deepspeech/librispeech_jax/workload.py similarity index 95% rename from algorithmic_efficiency/workloads/librispeech_deepspeech/librispeech_jax/workload.py rename to algoperf/workloads/librispeech_deepspeech/librispeech_jax/workload.py index a0db6d607..3e0781deb 100644 --- a/algorithmic_efficiency/workloads/librispeech_deepspeech/librispeech_jax/workload.py +++ b/algoperf/workloads/librispeech_deepspeech/librispeech_jax/workload.py @@ -6,11 +6,11 @@ import jax.numpy as jnp import numpy as np -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax.workload import \ +from algoperf import param_utils +from algoperf import spec +from algoperf.workloads.librispeech_conformer.librispeech_jax.workload import \ LibriSpeechConformerWorkload -from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_jax import \ +from algoperf.workloads.librispeech_deepspeech.librispeech_jax import \ models diff --git a/algorithmic_efficiency/workloads/librispeech_deepspeech/librispeech_pytorch/__init__.py b/algoperf/workloads/librispeech_deepspeech/librispeech_pytorch/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/librispeech_deepspeech/librispeech_pytorch/__init__.py rename to algoperf/workloads/librispeech_deepspeech/librispeech_pytorch/__init__.py diff --git a/algorithmic_efficiency/workloads/librispeech_deepspeech/librispeech_pytorch/models.py b/algoperf/workloads/librispeech_deepspeech/librispeech_pytorch/models.py similarity index 98% rename from algorithmic_efficiency/workloads/librispeech_deepspeech/librispeech_pytorch/models.py rename to algoperf/workloads/librispeech_deepspeech/librispeech_pytorch/models.py index bdf556f1c..84d317326 100644 --- a/algorithmic_efficiency/workloads/librispeech_deepspeech/librispeech_pytorch/models.py +++ b/algoperf/workloads/librispeech_deepspeech/librispeech_pytorch/models.py @@ -11,9 +11,9 @@ import torch.distributed.nn as dist_nn import torch.nn.functional as F -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch import \ +from algoperf.workloads.librispeech_conformer.librispeech_pytorch import \ preprocessor -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.spectrum_augmenter import \ +from algoperf.workloads.librispeech_conformer.librispeech_pytorch.spectrum_augmenter import \ SpecAug USE_PYTORCH_DDP = 'LOCAL_RANK' in os.environ diff --git a/algorithmic_efficiency/workloads/librispeech_deepspeech/librispeech_pytorch/workload.py b/algoperf/workloads/librispeech_deepspeech/librispeech_pytorch/workload.py similarity index 88% rename from algorithmic_efficiency/workloads/librispeech_deepspeech/librispeech_pytorch/workload.py rename to algoperf/workloads/librispeech_deepspeech/librispeech_pytorch/workload.py index 626bac278..4f8ad1974 100644 --- a/algorithmic_efficiency/workloads/librispeech_deepspeech/librispeech_pytorch/workload.py +++ b/algoperf/workloads/librispeech_deepspeech/librispeech_pytorch/workload.py @@ -3,16 +3,16 @@ import torch from torch.nn.parallel import DistributedDataParallel as DDP -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.models import \ +from algoperf import param_utils +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup +from algoperf.workloads.librispeech_conformer.librispeech_pytorch.models import \ initialize -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.workload import \ +from algoperf.workloads.librispeech_conformer.librispeech_pytorch.workload import \ LibriSpeechConformerWorkload -from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_pytorch.models import \ +from algoperf.workloads.librispeech_deepspeech.librispeech_pytorch.models import \ DeepspeechConfig -from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_pytorch.models import \ +from algoperf.workloads.librispeech_deepspeech.librispeech_pytorch.models import \ DeepspeechEncoderDecoder USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_setup() diff --git a/algorithmic_efficiency/workloads/mnist/__init__.py b/algoperf/workloads/mnist/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/mnist/__init__.py rename to algoperf/workloads/mnist/__init__.py diff --git a/algorithmic_efficiency/workloads/mnist/mnist_jax/__init__.py b/algoperf/workloads/mnist/mnist_jax/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/mnist/mnist_jax/__init__.py rename to algoperf/workloads/mnist/mnist_jax/__init__.py diff --git a/algorithmic_efficiency/workloads/mnist/mnist_jax/workload.py b/algoperf/workloads/mnist/mnist_jax/workload.py similarity index 96% rename from algorithmic_efficiency/workloads/mnist/mnist_jax/workload.py rename to algoperf/workloads/mnist/mnist_jax/workload.py index efbd73e33..8154026d1 100644 --- a/algorithmic_efficiency/workloads/mnist/mnist_jax/workload.py +++ b/algoperf/workloads/mnist/mnist_jax/workload.py @@ -10,9 +10,9 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.mnist.workload import BaseMnistWorkload +from algoperf import param_utils +from algoperf import spec +from algoperf.workloads.mnist.workload import BaseMnistWorkload class _Model(nn.Module): diff --git a/algorithmic_efficiency/workloads/mnist/mnist_pytorch/__init__.py b/algoperf/workloads/mnist/mnist_pytorch/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/mnist/mnist_pytorch/__init__.py rename to algoperf/workloads/mnist/mnist_pytorch/__init__.py diff --git a/algorithmic_efficiency/workloads/mnist/mnist_pytorch/workload.py b/algoperf/workloads/mnist/mnist_pytorch/workload.py similarity index 96% rename from algorithmic_efficiency/workloads/mnist/mnist_pytorch/workload.py rename to algoperf/workloads/mnist/mnist_pytorch/workload.py index e638df078..780e1bca0 100644 --- a/algorithmic_efficiency/workloads/mnist/mnist_pytorch/workload.py +++ b/algoperf/workloads/mnist/mnist_pytorch/workload.py @@ -10,11 +10,11 @@ import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP -from algorithmic_efficiency import init_utils -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup -from algorithmic_efficiency.workloads.mnist.workload import BaseMnistWorkload +from algoperf import init_utils +from algoperf import param_utils +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup +from algoperf.workloads.mnist.workload import BaseMnistWorkload USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_setup() diff --git a/algorithmic_efficiency/workloads/mnist/workload.py b/algoperf/workloads/mnist/workload.py similarity index 97% rename from algorithmic_efficiency/workloads/mnist/workload.py rename to algoperf/workloads/mnist/workload.py index dcc195170..f53aadd0b 100644 --- a/algorithmic_efficiency/workloads/mnist/workload.py +++ b/algoperf/workloads/mnist/workload.py @@ -10,10 +10,10 @@ import tensorflow_datasets as tfds import torch -from algorithmic_efficiency import data_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup -import algorithmic_efficiency.random_utils as prng +from algoperf import data_utils +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup +import algoperf.random_utils as prng USE_PYTORCH_DDP, _, _, _ = pytorch_setup() diff --git a/algorithmic_efficiency/workloads/ogbg/__init__.py b/algoperf/workloads/ogbg/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/ogbg/__init__.py rename to algoperf/workloads/ogbg/__init__.py diff --git a/algorithmic_efficiency/workloads/ogbg/input_pipeline.py b/algoperf/workloads/ogbg/input_pipeline.py similarity index 100% rename from algorithmic_efficiency/workloads/ogbg/input_pipeline.py rename to algoperf/workloads/ogbg/input_pipeline.py diff --git a/algorithmic_efficiency/workloads/ogbg/metrics.py b/algoperf/workloads/ogbg/metrics.py similarity index 98% rename from algorithmic_efficiency/workloads/ogbg/metrics.py rename to algoperf/workloads/ogbg/metrics.py index a654eb2ae..55f83d905 100644 --- a/algorithmic_efficiency/workloads/ogbg/metrics.py +++ b/algoperf/workloads/ogbg/metrics.py @@ -11,7 +11,7 @@ import torch import torch.distributed as dist -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf.pytorch_utils import pytorch_setup USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_setup() diff --git a/algorithmic_efficiency/workloads/ogbg/ogbg_jax/__init__.py b/algoperf/workloads/ogbg/ogbg_jax/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/ogbg/ogbg_jax/__init__.py rename to algoperf/workloads/ogbg/ogbg_jax/__init__.py diff --git a/algorithmic_efficiency/workloads/ogbg/ogbg_jax/models.py b/algoperf/workloads/ogbg/ogbg_jax/models.py similarity index 100% rename from algorithmic_efficiency/workloads/ogbg/ogbg_jax/models.py rename to algoperf/workloads/ogbg/ogbg_jax/models.py diff --git a/algorithmic_efficiency/workloads/ogbg/ogbg_jax/workload.py b/algoperf/workloads/ogbg/ogbg_jax/workload.py similarity index 95% rename from algorithmic_efficiency/workloads/ogbg/ogbg_jax/workload.py rename to algoperf/workloads/ogbg/ogbg_jax/workload.py index ec0c0658d..e895d15a7 100644 --- a/algorithmic_efficiency/workloads/ogbg/ogbg_jax/workload.py +++ b/algoperf/workloads/ogbg/ogbg_jax/workload.py @@ -8,11 +8,11 @@ import jraph import optax -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.ogbg import metrics -from algorithmic_efficiency.workloads.ogbg.ogbg_jax import models -from algorithmic_efficiency.workloads.ogbg.workload import BaseOgbgWorkload +from algoperf import param_utils +from algoperf import spec +from algoperf.workloads.ogbg import metrics +from algoperf.workloads.ogbg.ogbg_jax import models +from algoperf.workloads.ogbg.workload import BaseOgbgWorkload class OgbgWorkload(BaseOgbgWorkload): diff --git a/algorithmic_efficiency/workloads/ogbg/ogbg_pytorch/__init__.py b/algoperf/workloads/ogbg/ogbg_pytorch/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/ogbg/ogbg_pytorch/__init__.py rename to algoperf/workloads/ogbg/ogbg_pytorch/__init__.py diff --git a/algorithmic_efficiency/workloads/ogbg/ogbg_pytorch/models.py b/algoperf/workloads/ogbg/ogbg_pytorch/models.py similarity index 99% rename from algorithmic_efficiency/workloads/ogbg/ogbg_pytorch/models.py rename to algoperf/workloads/ogbg/ogbg_pytorch/models.py index d93013b87..fe9b29bc1 100644 --- a/algorithmic_efficiency/workloads/ogbg/ogbg_pytorch/models.py +++ b/algoperf/workloads/ogbg/ogbg_pytorch/models.py @@ -8,7 +8,7 @@ import torch from torch import nn -from algorithmic_efficiency import init_utils +from algoperf import init_utils def _make_mlp(in_dim, hidden_dims, dropout_rate, activation_fn): diff --git a/algorithmic_efficiency/workloads/ogbg/ogbg_pytorch/workload.py b/algoperf/workloads/ogbg/ogbg_pytorch/workload.py similarity index 96% rename from algorithmic_efficiency/workloads/ogbg/ogbg_pytorch/workload.py rename to algoperf/workloads/ogbg/ogbg_pytorch/workload.py index d4817226d..75252a6b9 100644 --- a/algorithmic_efficiency/workloads/ogbg/ogbg_pytorch/workload.py +++ b/algoperf/workloads/ogbg/ogbg_pytorch/workload.py @@ -8,12 +8,12 @@ import torch.distributed as dist from torch.nn.parallel import DistributedDataParallel as DDP -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import pytorch_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.ogbg import metrics -from algorithmic_efficiency.workloads.ogbg.ogbg_pytorch.models import GNN -from algorithmic_efficiency.workloads.ogbg.workload import BaseOgbgWorkload +from algoperf import param_utils +from algoperf import pytorch_utils +from algoperf import spec +from algoperf.workloads.ogbg import metrics +from algoperf.workloads.ogbg.ogbg_pytorch.models import GNN +from algoperf.workloads.ogbg.workload import BaseOgbgWorkload USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup() diff --git a/algorithmic_efficiency/workloads/ogbg/workload.py b/algoperf/workloads/ogbg/workload.py similarity index 96% rename from algorithmic_efficiency/workloads/ogbg/workload.py rename to algoperf/workloads/ogbg/workload.py index a32f385cb..c6a2162d7 100644 --- a/algorithmic_efficiency/workloads/ogbg/workload.py +++ b/algoperf/workloads/ogbg/workload.py @@ -7,10 +7,10 @@ import jax -from algorithmic_efficiency import random_utils as prng -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.ogbg import input_pipeline -from algorithmic_efficiency.workloads.ogbg import metrics +from algoperf import random_utils as prng +from algoperf import spec +from algoperf.workloads.ogbg import input_pipeline +from algoperf.workloads.ogbg import metrics class BaseOgbgWorkload(spec.Workload): diff --git a/algorithmic_efficiency/workloads/utils.py b/algoperf/workloads/utils.py similarity index 100% rename from algorithmic_efficiency/workloads/utils.py rename to algoperf/workloads/utils.py diff --git a/algorithmic_efficiency/workloads/wmt/__init__.py b/algoperf/workloads/wmt/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/wmt/__init__.py rename to algoperf/workloads/wmt/__init__.py diff --git a/algorithmic_efficiency/workloads/wmt/bleu.py b/algoperf/workloads/wmt/bleu.py similarity index 98% rename from algorithmic_efficiency/workloads/wmt/bleu.py rename to algoperf/workloads/wmt/bleu.py index 1efc87381..10719819c 100644 --- a/algorithmic_efficiency/workloads/wmt/bleu.py +++ b/algoperf/workloads/wmt/bleu.py @@ -6,7 +6,7 @@ import torch import torch.distributed as dist -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf.pytorch_utils import pytorch_setup USE_PYTORCH_DDP, _, DEVICE, N_GPUS = pytorch_setup() diff --git a/algorithmic_efficiency/workloads/wmt/input_pipeline.py b/algoperf/workloads/wmt/input_pipeline.py similarity index 98% rename from algorithmic_efficiency/workloads/wmt/input_pipeline.py rename to algoperf/workloads/wmt/input_pipeline.py index af1c54994..d743b43b0 100644 --- a/algorithmic_efficiency/workloads/wmt/input_pipeline.py +++ b/algoperf/workloads/wmt/input_pipeline.py @@ -6,9 +6,9 @@ import tensorflow as tf import tensorflow_datasets as tfds -from algorithmic_efficiency import data_utils -from algorithmic_efficiency.pytorch_utils import pytorch_setup -from algorithmic_efficiency.workloads.wmt import tokenizer +from algoperf import data_utils +from algoperf.pytorch_utils import pytorch_setup +from algoperf.workloads.wmt import tokenizer RANK = pytorch_setup()[1] # Avoid multithreading in all processes but the first (rank 0). diff --git a/algorithmic_efficiency/workloads/wmt/tokenizer.py b/algoperf/workloads/wmt/tokenizer.py similarity index 100% rename from algorithmic_efficiency/workloads/wmt/tokenizer.py rename to algoperf/workloads/wmt/tokenizer.py diff --git a/algorithmic_efficiency/workloads/wmt/wmt_jax/__init__.py b/algoperf/workloads/wmt/wmt_jax/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/wmt/wmt_jax/__init__.py rename to algoperf/workloads/wmt/wmt_jax/__init__.py diff --git a/algorithmic_efficiency/workloads/wmt/wmt_jax/decode.py b/algoperf/workloads/wmt/wmt_jax/decode.py similarity index 100% rename from algorithmic_efficiency/workloads/wmt/wmt_jax/decode.py rename to algoperf/workloads/wmt/wmt_jax/decode.py diff --git a/algorithmic_efficiency/workloads/wmt/wmt_jax/models.py b/algoperf/workloads/wmt/wmt_jax/models.py similarity index 100% rename from algorithmic_efficiency/workloads/wmt/wmt_jax/models.py rename to algoperf/workloads/wmt/wmt_jax/models.py diff --git a/algorithmic_efficiency/workloads/wmt/wmt_jax/workload.py b/algoperf/workloads/wmt/wmt_jax/workload.py similarity index 97% rename from algorithmic_efficiency/workloads/wmt/wmt_jax/workload.py rename to algoperf/workloads/wmt/wmt_jax/workload.py index 046d5e469..9f919e7cb 100644 --- a/algorithmic_efficiency/workloads/wmt/wmt_jax/workload.py +++ b/algoperf/workloads/wmt/wmt_jax/workload.py @@ -13,12 +13,12 @@ import numpy as np import optax -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.wmt import bleu -from algorithmic_efficiency.workloads.wmt.wmt_jax import decode -from algorithmic_efficiency.workloads.wmt.wmt_jax import models -from algorithmic_efficiency.workloads.wmt.workload import BaseWmtWorkload +from algoperf import param_utils +from algoperf import spec +from algoperf.workloads.wmt import bleu +from algoperf.workloads.wmt.wmt_jax import decode +from algoperf.workloads.wmt.wmt_jax import models +from algoperf.workloads.wmt.workload import BaseWmtWorkload def _to_host(x: spec.Tensor) -> spec.Tensor: diff --git a/algorithmic_efficiency/workloads/wmt/wmt_pytorch/__init__.py b/algoperf/workloads/wmt/wmt_pytorch/__init__.py similarity index 100% rename from algorithmic_efficiency/workloads/wmt/wmt_pytorch/__init__.py rename to algoperf/workloads/wmt/wmt_pytorch/__init__.py diff --git a/algorithmic_efficiency/workloads/wmt/wmt_pytorch/decode.py b/algoperf/workloads/wmt/wmt_pytorch/decode.py similarity index 99% rename from algorithmic_efficiency/workloads/wmt/wmt_pytorch/decode.py rename to algoperf/workloads/wmt/wmt_pytorch/decode.py index 0488a144f..ebfc64c50 100644 --- a/algorithmic_efficiency/workloads/wmt/wmt_pytorch/decode.py +++ b/algoperf/workloads/wmt/wmt_pytorch/decode.py @@ -10,7 +10,7 @@ import torch import torch.nn.functional as F -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf.pytorch_utils import pytorch_setup DEVICE = pytorch_setup()[2] diff --git a/algorithmic_efficiency/workloads/wmt/wmt_pytorch/models.py b/algoperf/workloads/wmt/wmt_pytorch/models.py similarity index 100% rename from algorithmic_efficiency/workloads/wmt/wmt_pytorch/models.py rename to algoperf/workloads/wmt/wmt_pytorch/models.py diff --git a/algorithmic_efficiency/workloads/wmt/wmt_pytorch/workload.py b/algoperf/workloads/wmt/wmt_pytorch/workload.py similarity index 97% rename from algorithmic_efficiency/workloads/wmt/wmt_pytorch/workload.py rename to algoperf/workloads/wmt/wmt_pytorch/workload.py index 0ba49c2f6..9d1248efd 100644 --- a/algorithmic_efficiency/workloads/wmt/wmt_pytorch/workload.py +++ b/algoperf/workloads/wmt/wmt_pytorch/workload.py @@ -12,13 +12,13 @@ import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP -from algorithmic_efficiency import param_utils -from algorithmic_efficiency import pytorch_utils -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.wmt import bleu -from algorithmic_efficiency.workloads.wmt.wmt_pytorch import decode -from algorithmic_efficiency.workloads.wmt.wmt_pytorch.models import Transformer -from algorithmic_efficiency.workloads.wmt.workload import BaseWmtWorkload +from algoperf import param_utils +from algoperf import pytorch_utils +from algoperf import spec +from algoperf.workloads.wmt import bleu +from algoperf.workloads.wmt.wmt_pytorch import decode +from algoperf.workloads.wmt.wmt_pytorch.models import Transformer +from algoperf.workloads.wmt.workload import BaseWmtWorkload USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup() diff --git a/algorithmic_efficiency/workloads/wmt/workload.py b/algoperf/workloads/wmt/workload.py similarity index 97% rename from algorithmic_efficiency/workloads/wmt/workload.py rename to algoperf/workloads/wmt/workload.py index 68ebdc94b..e9a07d2b3 100644 --- a/algorithmic_efficiency/workloads/wmt/workload.py +++ b/algoperf/workloads/wmt/workload.py @@ -9,9 +9,9 @@ import numpy as np import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.wmt import input_pipeline -from algorithmic_efficiency.workloads.wmt.wmt_jax import decode +from algoperf import spec +from algoperf.workloads.wmt import input_pipeline +from algoperf.workloads.wmt.wmt_jax import decode VOCAB_PATH = './wmt_256/sentencepiece_model' WORKDIR = './wmt_256' diff --git a/algorithmic_efficiency/workloads/workloads.py b/algoperf/workloads/workloads.py similarity index 98% rename from algorithmic_efficiency/workloads/workloads.py rename to algoperf/workloads/workloads.py index bb57f598e..4712f4e25 100644 --- a/algorithmic_efficiency/workloads/workloads.py +++ b/algoperf/workloads/workloads.py @@ -4,9 +4,9 @@ import inspect import os -from algorithmic_efficiency import spec +from algoperf import spec -BASE_WORKLOADS_DIR = 'algorithmic_efficiency/workloads/' +BASE_WORKLOADS_DIR = 'algoperf/workloads/' WORKLOADS = { 'cifar': { diff --git a/datasets/dataset_setup.py b/datasets/dataset_setup.py index 5b43a3f87..efe923dbe 100644 --- a/datasets/dataset_setup.py +++ b/datasets/dataset_setup.py @@ -71,8 +71,8 @@ import tensorflow_datasets as tfds from torchvision.datasets import CIFAR10 -from algorithmic_efficiency.workloads.wmt import tokenizer -from algorithmic_efficiency.workloads.wmt.input_pipeline import \ +from algoperf.workloads.wmt import tokenizer +from algoperf.workloads.wmt.input_pipeline import \ normalize_feature_names from datasets import librispeech_preprocess from datasets import librispeech_tokenizer diff --git a/prize_qualification_baselines/external_tuning/jax_nadamw_full_budget.py b/prize_qualification_baselines/external_tuning/jax_nadamw_full_budget.py index 36e7e5607..445074b69 100644 --- a/prize_qualification_baselines/external_tuning/jax_nadamw_full_budget.py +++ b/prize_qualification_baselines/external_tuning/jax_nadamw_full_budget.py @@ -22,7 +22,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec _GRAD_CLIP_EPS = 1e-6 diff --git a/prize_qualification_baselines/external_tuning/jax_nadamw_target_setting.py b/prize_qualification_baselines/external_tuning/jax_nadamw_target_setting.py index 07281f540..ac21f1327 100644 --- a/prize_qualification_baselines/external_tuning/jax_nadamw_target_setting.py +++ b/prize_qualification_baselines/external_tuning/jax_nadamw_target_setting.py @@ -22,7 +22,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec _GRAD_CLIP_EPS = 1e-6 diff --git a/prize_qualification_baselines/external_tuning/pytorch_nadamw_full_budget.py b/prize_qualification_baselines/external_tuning/pytorch_nadamw_full_budget.py index a12523bde..a2f9fb4c5 100644 --- a/prize_qualification_baselines/external_tuning/pytorch_nadamw_full_budget.py +++ b/prize_qualification_baselines/external_tuning/pytorch_nadamw_full_budget.py @@ -11,8 +11,8 @@ from torch.optim.lr_scheduler import LinearLR from torch.optim.lr_scheduler import SequentialLR -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup USE_PYTORCH_DDP = pytorch_setup()[0] diff --git a/prize_qualification_baselines/external_tuning/pytorch_nadamw_target_setting.py b/prize_qualification_baselines/external_tuning/pytorch_nadamw_target_setting.py index 93b41987e..a37b0d341 100644 --- a/prize_qualification_baselines/external_tuning/pytorch_nadamw_target_setting.py +++ b/prize_qualification_baselines/external_tuning/pytorch_nadamw_target_setting.py @@ -11,8 +11,8 @@ from torch.optim.lr_scheduler import LinearLR from torch.optim.lr_scheduler import SequentialLR -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup USE_PYTORCH_DDP = pytorch_setup()[0] diff --git a/prize_qualification_baselines/self_tuning/jax_nadamw_full_budget.py b/prize_qualification_baselines/self_tuning/jax_nadamw_full_budget.py index 0d194ef7a..3e24e2e89 100644 --- a/prize_qualification_baselines/self_tuning/jax_nadamw_full_budget.py +++ b/prize_qualification_baselines/self_tuning/jax_nadamw_full_budget.py @@ -22,7 +22,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec _GRAD_CLIP_EPS = 1e-6 diff --git a/prize_qualification_baselines/self_tuning/jax_nadamw_target_setting.py b/prize_qualification_baselines/self_tuning/jax_nadamw_target_setting.py index 60fc25ec4..eb6b3ebb3 100644 --- a/prize_qualification_baselines/self_tuning/jax_nadamw_target_setting.py +++ b/prize_qualification_baselines/self_tuning/jax_nadamw_target_setting.py @@ -22,7 +22,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec _GRAD_CLIP_EPS = 1e-6 diff --git a/prize_qualification_baselines/self_tuning/pytorch_nadamw_full_budget.py b/prize_qualification_baselines/self_tuning/pytorch_nadamw_full_budget.py index 2dc29acad..3ef286877 100644 --- a/prize_qualification_baselines/self_tuning/pytorch_nadamw_full_budget.py +++ b/prize_qualification_baselines/self_tuning/pytorch_nadamw_full_budget.py @@ -11,8 +11,8 @@ from torch.optim.lr_scheduler import LinearLR from torch.optim.lr_scheduler import SequentialLR -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup USE_PYTORCH_DDP = pytorch_setup()[0] diff --git a/prize_qualification_baselines/self_tuning/pytorch_nadamw_target_setting.py b/prize_qualification_baselines/self_tuning/pytorch_nadamw_target_setting.py index 6cc44cb12..e9f8810a6 100644 --- a/prize_qualification_baselines/self_tuning/pytorch_nadamw_target_setting.py +++ b/prize_qualification_baselines/self_tuning/pytorch_nadamw_target_setting.py @@ -11,8 +11,8 @@ from torch.optim.lr_scheduler import LinearLR from torch.optim.lr_scheduler import SequentialLR -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup USE_PYTORCH_DDP = pytorch_setup()[0] diff --git a/pyproject.toml b/pyproject.toml index 0788d48a5..c21191adc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ ############################################################################### [project] -name = "algorithmic_efficiency" +name = "algoperf" dynamic = ["version"] description = "Codebase for the AlgoPerf: Training Algorithms benchmark" authors = [ @@ -62,7 +62,7 @@ zip-safe = false find = {} # Scanning implicit namespaces is active by default [tool.setuptools_scm] -version_file = "algorithmic_efficiency/_version.py" +version_file = "algoperf/_version.py" ############################################################################### # (Optional) Dependencies # @@ -70,10 +70,10 @@ version_file = "algorithmic_efficiency/_version.py" [project.optional-dependencies] # All workloads full = [ - "algorithmic_efficiency[criteo1tb,fastmri,ogbg,librispeech_conformer,wmt]", + "algoperf[criteo1tb,fastmri,ogbg,librispeech_conformer,wmt]", ] # All workloads plus development dependencies -full_dev = ["algorithmic_efficiency[full,dev]"] +full_dev = ["algoperf[full,dev]"] # Dependencies for developing the package dev = [ "isort==5.12.0", @@ -106,12 +106,12 @@ jax_core_deps = [ jax_cpu = [ "jax==0.4.10", "jaxlib==0.4.10", - "algorithmic_efficiency[jax_core_deps]", + "algoperf[jax_core_deps]", ] jax_gpu = [ "jax==0.4.10", "jaxlib==0.4.10+cuda12.cudnn88", - "algorithmic_efficiency[jax_core_deps]", + "algoperf[jax_core_deps]", ] pytorch_cpu = ["torch==2.1.0", "torchvision==0.16.0"] pytorch_gpu = [ @@ -130,7 +130,7 @@ based_on_style = "yapf" each_dict_entry_on_separate_line = false split_all_top_level_comma_separated_values = true [tool.yapfignore] -ignore_patterns = ["algorithmic_efficiency/_version.py"] +ignore_patterns = ["algoperf/_version.py"] # isort configuration [tool.isort] diff --git a/reference_algorithms/development_algorithms/cifar/cifar_jax/submission.py b/reference_algorithms/development_algorithms/cifar/cifar_jax/submission.py index e8e0bf4ac..614d66107 100644 --- a/reference_algorithms/development_algorithms/cifar/cifar_jax/submission.py +++ b/reference_algorithms/development_algorithms/cifar/cifar_jax/submission.py @@ -9,7 +9,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec def get_batch_size(workload_name): diff --git a/reference_algorithms/development_algorithms/cifar/cifar_pytorch/submission.py b/reference_algorithms/development_algorithms/cifar/cifar_pytorch/submission.py index c3e7a546b..d8b91f83a 100644 --- a/reference_algorithms/development_algorithms/cifar/cifar_pytorch/submission.py +++ b/reference_algorithms/development_algorithms/cifar/cifar_pytorch/submission.py @@ -7,7 +7,7 @@ from torch.optim.lr_scheduler import LinearLR from torch.optim.lr_scheduler import SequentialLR -from algorithmic_efficiency import spec +from algoperf import spec def get_batch_size(workload_name): diff --git a/reference_algorithms/development_algorithms/mnist/mnist_jax/submission.py b/reference_algorithms/development_algorithms/mnist/mnist_jax/submission.py index b33c0285b..4148148a0 100644 --- a/reference_algorithms/development_algorithms/mnist/mnist_jax/submission.py +++ b/reference_algorithms/development_algorithms/mnist/mnist_jax/submission.py @@ -9,7 +9,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec def get_batch_size(workload_name): diff --git a/reference_algorithms/development_algorithms/mnist/mnist_pytorch/submission.py b/reference_algorithms/development_algorithms/mnist/mnist_pytorch/submission.py index b868bc787..dedd96793 100644 --- a/reference_algorithms/development_algorithms/mnist/mnist_pytorch/submission.py +++ b/reference_algorithms/development_algorithms/mnist/mnist_pytorch/submission.py @@ -4,7 +4,7 @@ import torch -from algorithmic_efficiency import spec +from algoperf import spec def get_batch_size(workload_name): diff --git a/reference_algorithms/paper_baselines/adafactor/jax/submission.py b/reference_algorithms/paper_baselines/adafactor/jax/submission.py index 0fcb9da0f..abaf36ea5 100644 --- a/reference_algorithms/paper_baselines/adafactor/jax/submission.py +++ b/reference_algorithms/paper_baselines/adafactor/jax/submission.py @@ -9,7 +9,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec from reference_algorithms.paper_baselines.adafactor.jax.sharded_adafactor import \ sharded_adafactor diff --git a/reference_algorithms/paper_baselines/adafactor/pytorch/submission.py b/reference_algorithms/paper_baselines/adafactor/pytorch/submission.py index c0eed45ef..7aa457a25 100644 --- a/reference_algorithms/paper_baselines/adafactor/pytorch/submission.py +++ b/reference_algorithms/paper_baselines/adafactor/pytorch/submission.py @@ -10,8 +10,8 @@ from torch.optim.lr_scheduler import LinearLR from torch.optim.lr_scheduler import SequentialLR -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup USE_PYTORCH_DDP = pytorch_setup()[0] diff --git a/reference_algorithms/paper_baselines/adamw/jax/submission.py b/reference_algorithms/paper_baselines/adamw/jax/submission.py index e80a29693..da0ccdc12 100644 --- a/reference_algorithms/paper_baselines/adamw/jax/submission.py +++ b/reference_algorithms/paper_baselines/adamw/jax/submission.py @@ -9,7 +9,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec _GRAD_CLIP_EPS = 1e-6 diff --git a/reference_algorithms/paper_baselines/adamw/pytorch/submission.py b/reference_algorithms/paper_baselines/adamw/pytorch/submission.py index 8da4e1671..21d9b6b57 100644 --- a/reference_algorithms/paper_baselines/adamw/pytorch/submission.py +++ b/reference_algorithms/paper_baselines/adamw/pytorch/submission.py @@ -9,8 +9,8 @@ from torch.optim.lr_scheduler import LinearLR from torch.optim.lr_scheduler import SequentialLR -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup USE_PYTORCH_DDP = pytorch_setup()[0] diff --git a/reference_algorithms/paper_baselines/lamb/jax/submission.py b/reference_algorithms/paper_baselines/lamb/jax/submission.py index ebcdc9914..9623e912a 100644 --- a/reference_algorithms/paper_baselines/lamb/jax/submission.py +++ b/reference_algorithms/paper_baselines/lamb/jax/submission.py @@ -9,7 +9,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec _GRAD_CLIP_EPS = 1e-6 diff --git a/reference_algorithms/paper_baselines/lamb/pytorch/submission.py b/reference_algorithms/paper_baselines/lamb/pytorch/submission.py index c0ecee69e..c1c6cec0a 100644 --- a/reference_algorithms/paper_baselines/lamb/pytorch/submission.py +++ b/reference_algorithms/paper_baselines/lamb/pytorch/submission.py @@ -10,7 +10,7 @@ from torch.optim.lr_scheduler import LinearLR from torch.optim.lr_scheduler import SequentialLR -from algorithmic_efficiency import spec +from algoperf import spec # Modified from github.com/pytorch/pytorch/blob/v1.12.1/torch/optim/adamw.py diff --git a/reference_algorithms/paper_baselines/momentum/jax/submission.py b/reference_algorithms/paper_baselines/momentum/jax/submission.py index 271ef860b..7af999be8 100644 --- a/reference_algorithms/paper_baselines/momentum/jax/submission.py +++ b/reference_algorithms/paper_baselines/momentum/jax/submission.py @@ -9,7 +9,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec _GRAD_CLIP_EPS = 1e-6 diff --git a/reference_algorithms/paper_baselines/momentum/pytorch/submission.py b/reference_algorithms/paper_baselines/momentum/pytorch/submission.py index 272a79b4c..c3760d20e 100644 --- a/reference_algorithms/paper_baselines/momentum/pytorch/submission.py +++ b/reference_algorithms/paper_baselines/momentum/pytorch/submission.py @@ -8,8 +8,8 @@ import torch.distributed.nn as dist_nn from torch.optim.lr_scheduler import LambdaLR -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup USE_PYTORCH_DDP = pytorch_setup()[0] diff --git a/reference_algorithms/paper_baselines/nadamw/jax/submission.py b/reference_algorithms/paper_baselines/nadamw/jax/submission.py index 36e7e5607..445074b69 100644 --- a/reference_algorithms/paper_baselines/nadamw/jax/submission.py +++ b/reference_algorithms/paper_baselines/nadamw/jax/submission.py @@ -22,7 +22,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec _GRAD_CLIP_EPS = 1e-6 diff --git a/reference_algorithms/paper_baselines/nadamw/pytorch/submission.py b/reference_algorithms/paper_baselines/nadamw/pytorch/submission.py index a12523bde..a2f9fb4c5 100644 --- a/reference_algorithms/paper_baselines/nadamw/pytorch/submission.py +++ b/reference_algorithms/paper_baselines/nadamw/pytorch/submission.py @@ -11,8 +11,8 @@ from torch.optim.lr_scheduler import LinearLR from torch.optim.lr_scheduler import SequentialLR -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup USE_PYTORCH_DDP = pytorch_setup()[0] diff --git a/reference_algorithms/paper_baselines/nesterov/jax/submission.py b/reference_algorithms/paper_baselines/nesterov/jax/submission.py index a435643e4..0c9fe48c4 100644 --- a/reference_algorithms/paper_baselines/nesterov/jax/submission.py +++ b/reference_algorithms/paper_baselines/nesterov/jax/submission.py @@ -9,7 +9,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec _GRAD_CLIP_EPS = 1e-6 diff --git a/reference_algorithms/paper_baselines/nesterov/pytorch/submission.py b/reference_algorithms/paper_baselines/nesterov/pytorch/submission.py index aac4146a4..b4432fbff 100644 --- a/reference_algorithms/paper_baselines/nesterov/pytorch/submission.py +++ b/reference_algorithms/paper_baselines/nesterov/pytorch/submission.py @@ -8,8 +8,8 @@ import torch.distributed.nn as dist_nn from torch.optim.lr_scheduler import LambdaLR -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup USE_PYTORCH_DDP = pytorch_setup()[0] diff --git a/reference_algorithms/paper_baselines/sam/jax/submission.py b/reference_algorithms/paper_baselines/sam/jax/submission.py index 5f45901dd..09995d0ef 100644 --- a/reference_algorithms/paper_baselines/sam/jax/submission.py +++ b/reference_algorithms/paper_baselines/sam/jax/submission.py @@ -9,7 +9,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec _GRAD_CLIP_EPS = 1e-6 diff --git a/reference_algorithms/paper_baselines/sam/pytorch/submission.py b/reference_algorithms/paper_baselines/sam/pytorch/submission.py index 243174d34..92603f036 100644 --- a/reference_algorithms/paper_baselines/sam/pytorch/submission.py +++ b/reference_algorithms/paper_baselines/sam/pytorch/submission.py @@ -9,8 +9,8 @@ from torch.optim.lr_scheduler import LinearLR from torch.optim.lr_scheduler import SequentialLR -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup USE_PYTORCH_DDP = pytorch_setup()[0] diff --git a/reference_algorithms/paper_baselines/shampoo/jax/submission.py b/reference_algorithms/paper_baselines/shampoo/jax/submission.py index 294ad2706..41e223c9e 100644 --- a/reference_algorithms/paper_baselines/shampoo/jax/submission.py +++ b/reference_algorithms/paper_baselines/shampoo/jax/submission.py @@ -9,7 +9,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec from reference_algorithms.paper_baselines.shampoo.jax.distributed_shampoo import \ distributed_shampoo diff --git a/reference_algorithms/target_setting_algorithms/data_selection.py b/reference_algorithms/target_setting_algorithms/data_selection.py index ce24482fc..5e70f9f8b 100644 --- a/reference_algorithms/target_setting_algorithms/data_selection.py +++ b/reference_algorithms/target_setting_algorithms/data_selection.py @@ -1,6 +1,6 @@ from typing import Dict, Iterator, Tuple -from algorithmic_efficiency import spec +from algoperf import spec def data_selection( diff --git a/reference_algorithms/target_setting_algorithms/jax_adamw.py b/reference_algorithms/target_setting_algorithms/jax_adamw.py index 6d2cfe245..edf9bae7a 100644 --- a/reference_algorithms/target_setting_algorithms/jax_adamw.py +++ b/reference_algorithms/target_setting_algorithms/jax_adamw.py @@ -4,7 +4,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec from reference_algorithms.target_setting_algorithms import cosine_warmup from reference_algorithms.target_setting_algorithms.data_selection import \ data_selection # pylint: disable=unused-import diff --git a/reference_algorithms/target_setting_algorithms/jax_momentum.py b/reference_algorithms/target_setting_algorithms/jax_momentum.py index 08a0f7e9d..6cdd9a8d6 100644 --- a/reference_algorithms/target_setting_algorithms/jax_momentum.py +++ b/reference_algorithms/target_setting_algorithms/jax_momentum.py @@ -7,7 +7,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec from reference_algorithms.target_setting_algorithms.data_selection import \ data_selection # pylint: disable=unused-import from reference_algorithms.target_setting_algorithms.get_batch_size import \ diff --git a/reference_algorithms/target_setting_algorithms/jax_nadamw.py b/reference_algorithms/target_setting_algorithms/jax_nadamw.py index 21f2a7b2b..9e23cf86f 100644 --- a/reference_algorithms/target_setting_algorithms/jax_nadamw.py +++ b/reference_algorithms/target_setting_algorithms/jax_nadamw.py @@ -8,7 +8,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec from reference_algorithms.target_setting_algorithms import cosine_warmup from reference_algorithms.target_setting_algorithms.data_selection import \ data_selection # pylint: disable=unused-import diff --git a/reference_algorithms/target_setting_algorithms/jax_nesterov.py b/reference_algorithms/target_setting_algorithms/jax_nesterov.py index 6b27e0e2a..9ef43fafb 100644 --- a/reference_algorithms/target_setting_algorithms/jax_nesterov.py +++ b/reference_algorithms/target_setting_algorithms/jax_nesterov.py @@ -7,7 +7,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec from reference_algorithms.target_setting_algorithms.data_selection import \ data_selection # pylint: disable=unused-import from reference_algorithms.target_setting_algorithms.get_batch_size import \ diff --git a/reference_algorithms/target_setting_algorithms/jax_submission_base.py b/reference_algorithms/target_setting_algorithms/jax_submission_base.py index 7a16c07cb..221cdf411 100644 --- a/reference_algorithms/target_setting_algorithms/jax_submission_base.py +++ b/reference_algorithms/target_setting_algorithms/jax_submission_base.py @@ -7,7 +7,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec _GRAD_CLIP_EPS = 1e-6 diff --git a/reference_algorithms/target_setting_algorithms/pytorch_adamw.py b/reference_algorithms/target_setting_algorithms/pytorch_adamw.py index 0dcb5ab14..c87bdfb7d 100644 --- a/reference_algorithms/target_setting_algorithms/pytorch_adamw.py +++ b/reference_algorithms/target_setting_algorithms/pytorch_adamw.py @@ -2,7 +2,7 @@ import torch -from algorithmic_efficiency import spec +from algoperf import spec from reference_algorithms.target_setting_algorithms import cosine_warmup from reference_algorithms.target_setting_algorithms.data_selection import \ data_selection # pylint: disable=unused-import diff --git a/reference_algorithms/target_setting_algorithms/pytorch_momentum.py b/reference_algorithms/target_setting_algorithms/pytorch_momentum.py index 1a2df449a..584caff39 100644 --- a/reference_algorithms/target_setting_algorithms/pytorch_momentum.py +++ b/reference_algorithms/target_setting_algorithms/pytorch_momentum.py @@ -3,7 +3,7 @@ import torch from torch.optim.lr_scheduler import LambdaLR -from algorithmic_efficiency import spec +from algoperf import spec from reference_algorithms.target_setting_algorithms.data_selection import \ data_selection # pylint: disable=unused-import from reference_algorithms.target_setting_algorithms.get_batch_size import \ diff --git a/reference_algorithms/target_setting_algorithms/pytorch_nadamw.py b/reference_algorithms/target_setting_algorithms/pytorch_nadamw.py index 71b819e66..a9dee1d79 100644 --- a/reference_algorithms/target_setting_algorithms/pytorch_nadamw.py +++ b/reference_algorithms/target_setting_algorithms/pytorch_nadamw.py @@ -6,7 +6,7 @@ import torch from torch import Tensor -from algorithmic_efficiency import spec +from algoperf import spec from reference_algorithms.target_setting_algorithms import cosine_warmup from reference_algorithms.target_setting_algorithms.data_selection import \ data_selection # pylint: disable=unused-import diff --git a/reference_algorithms/target_setting_algorithms/pytorch_nesterov.py b/reference_algorithms/target_setting_algorithms/pytorch_nesterov.py index 830e5eac9..8e10db4ef 100644 --- a/reference_algorithms/target_setting_algorithms/pytorch_nesterov.py +++ b/reference_algorithms/target_setting_algorithms/pytorch_nesterov.py @@ -3,7 +3,7 @@ import torch from torch.optim.lr_scheduler import LambdaLR -from algorithmic_efficiency import spec +from algoperf import spec from reference_algorithms.target_setting_algorithms.data_selection import \ data_selection # pylint: disable=unused-import from reference_algorithms.target_setting_algorithms.get_batch_size import \ diff --git a/reference_algorithms/target_setting_algorithms/pytorch_submission_base.py b/reference_algorithms/target_setting_algorithms/pytorch_submission_base.py index 2e2876555..bbfd8b0f2 100644 --- a/reference_algorithms/target_setting_algorithms/pytorch_submission_base.py +++ b/reference_algorithms/target_setting_algorithms/pytorch_submission_base.py @@ -6,8 +6,8 @@ import torch import torch.distributed.nn as dist_nn -from algorithmic_efficiency import spec -from algorithmic_efficiency.pytorch_utils import pytorch_setup +from algoperf import spec +from algoperf.pytorch_utils import pytorch_setup USE_PYTORCH_DDP = pytorch_setup()[0] diff --git a/scoring/performance_profile.py b/scoring/performance_profile.py index f4f2d5679..615ac6fe1 100644 --- a/scoring/performance_profile.py +++ b/scoring/performance_profile.py @@ -38,14 +38,14 @@ import pandas as pd from tabulate import tabulate -from algorithmic_efficiency.workloads.workloads import get_base_workload_name -import algorithmic_efficiency.workloads.workloads as workloads_registry +from algoperf.workloads.workloads import get_base_workload_name +import algoperf.workloads.workloads as workloads_registry from scoring import scoring_utils WORKLOADS = workloads_registry.WORKLOADS BASE_WORKLOADS = workloads_registry.BASE_WORKLOADS WORKLOAD_NAME_PATTERN = '(.*)(_jax|_pytorch)' -BASE_WORKLOADS_DIR = 'algorithmic_efficiency/workloads/' +BASE_WORKLOADS_DIR = 'algoperf/workloads/' # Open json file to read heldout workloads # TODO: This probably shouldn't be hardcoded but passed as an argument. with open("held_out_workloads_algoperf_v05.json", "r") as f: diff --git a/scoring/run_workloads.py b/scoring/run_workloads.py index e474b6910..99c6e810e 100644 --- a/scoring/run_workloads.py +++ b/scoring/run_workloads.py @@ -20,8 +20,8 @@ from absl import flags from absl import logging -from algorithmic_efficiency import random_utils as prng -from algorithmic_efficiency.workloads.workloads import get_base_workload_name +from algoperf import random_utils as prng +from algoperf.workloads.workloads import get_base_workload_name import docker flags.DEFINE_string( diff --git a/scoring/scoring_utils.py b/scoring/scoring_utils.py index 0dd997ab9..ac513816e 100644 --- a/scoring/scoring_utils.py +++ b/scoring/scoring_utils.py @@ -7,7 +7,7 @@ from absl import logging import pandas as pd -import algorithmic_efficiency.workloads.workloads as workloads_registry +import algoperf.workloads.workloads as workloads_registry TRIAL_LINE_REGEX = '(.*) --- Tuning run (\d+)/(\d+) ---' METRICS_LINE_REGEX = '(.*) Metrics: ({.*})' @@ -17,7 +17,7 @@ WORKLOADS = workloads_registry.WORKLOADS WORKLOAD_NAME_PATTERN = '(.*)(_jax|_pytorch)' -BASE_WORKLOADS_DIR = 'algorithmic_efficiency/workloads/' +BASE_WORKLOADS_DIR = 'algoperf/workloads/' #### File IO helper functions ### diff --git a/submission_runner.py b/submission_runner.py index 9f9b8ff42..1be56aeab 100644 --- a/submission_runner.py +++ b/submission_runner.py @@ -40,17 +40,17 @@ # it unavailable to JAX. tf.config.set_visible_devices([], 'GPU') -from algorithmic_efficiency import checkpoint_utils -from algorithmic_efficiency import halton -from algorithmic_efficiency import logger_utils -from algorithmic_efficiency import random_utils as prng -from algorithmic_efficiency import spec -from algorithmic_efficiency.profiler import PassThroughProfiler -from algorithmic_efficiency.profiler import Profiler -from algorithmic_efficiency.pytorch_utils import pytorch_init -from algorithmic_efficiency.pytorch_utils import pytorch_setup -from algorithmic_efficiency.pytorch_utils import sync_ddp_time -from algorithmic_efficiency.workloads import workloads +from algoperf import checkpoint_utils +from algoperf import halton +from algoperf import logger_utils +from algoperf import random_utils as prng +from algoperf import spec +from algoperf.profiler import PassThroughProfiler +from algoperf.profiler import Profiler +from algoperf.pytorch_utils import pytorch_init +from algoperf.pytorch_utils import pytorch_setup +from algoperf.pytorch_utils import sync_ddp_time +from algoperf.workloads import workloads # disable only for deepspeech if it works fine for other workloads. os.environ['XLA_FLAGS'] = '--xla_gpu_enable_triton_gemm=false' diff --git a/submissions/template/submission.py b/submissions/template/submission.py index 20991ab66..a4fdc62b4 100644 --- a/submissions/template/submission.py +++ b/submissions/template/submission.py @@ -6,7 +6,7 @@ """ from typing import Any, Dict, Iterator, List, Optional, Tuple -from algorithmic_efficiency import spec +from algoperf import spec def init_optimizer_state(workload: spec.Workload, diff --git a/tests/modeldiffs/criteo1tb/compare.py b/tests/modeldiffs/criteo1tb/compare.py index adbade983..d280803af 100644 --- a/tests/modeldiffs/criteo1tb/compare.py +++ b/tests/modeldiffs/criteo1tb/compare.py @@ -7,10 +7,10 @@ import numpy as np import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_jax.workload import \ +from algoperf import spec +from algoperf.workloads.criteo1tb.criteo1tb_jax.workload import \ Criteo1TbDlrmSmallWorkload as JaxWorkload -from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_pytorch.workload import \ +from algoperf.workloads.criteo1tb.criteo1tb_pytorch.workload import \ Criteo1TbDlrmSmallWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/criteo1tb_embed_init/compare.py b/tests/modeldiffs/criteo1tb_embed_init/compare.py index 0748e2d71..73744c667 100644 --- a/tests/modeldiffs/criteo1tb_embed_init/compare.py +++ b/tests/modeldiffs/criteo1tb_embed_init/compare.py @@ -7,10 +7,10 @@ import numpy as np import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_jax.workload import \ +from algoperf import spec +from algoperf.workloads.criteo1tb.criteo1tb_jax.workload import \ Criteo1TbDlrmSmallEmbedInitWorkload as JaxWorkload -from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_pytorch.workload import \ +from algoperf.workloads.criteo1tb.criteo1tb_pytorch.workload import \ Criteo1TbDlrmSmallEmbedInitWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/criteo1tb_layernorm/compare.py b/tests/modeldiffs/criteo1tb_layernorm/compare.py index 0a6e5c5ac..96e3cc5cc 100644 --- a/tests/modeldiffs/criteo1tb_layernorm/compare.py +++ b/tests/modeldiffs/criteo1tb_layernorm/compare.py @@ -7,10 +7,10 @@ import numpy as np import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_jax.workload import \ +from algoperf import spec +from algoperf.workloads.criteo1tb.criteo1tb_jax.workload import \ Criteo1TbDlrmSmallLayerNormWorkload as JaxWorkload -from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_pytorch.workload import \ +from algoperf.workloads.criteo1tb.criteo1tb_pytorch.workload import \ Criteo1TbDlrmSmallLayerNormWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/criteo1tb_resnet/compare.py b/tests/modeldiffs/criteo1tb_resnet/compare.py index 288442594..188e4cac3 100644 --- a/tests/modeldiffs/criteo1tb_resnet/compare.py +++ b/tests/modeldiffs/criteo1tb_resnet/compare.py @@ -8,10 +8,10 @@ import numpy as np import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_jax.workload import \ +from algoperf import spec +from algoperf.workloads.criteo1tb.criteo1tb_jax.workload import \ Criteo1TbDlrmSmallResNetWorkload as JaxWorkload -from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_pytorch.workload import \ +from algoperf.workloads.criteo1tb.criteo1tb_pytorch.workload import \ Criteo1TbDlrmSmallResNetWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/fastmri/compare.py b/tests/modeldiffs/fastmri/compare.py index 56b74b32d..da5f0ba0a 100644 --- a/tests/modeldiffs/fastmri/compare.py +++ b/tests/modeldiffs/fastmri/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.fastmri.fastmri_jax.workload import \ +from algoperf import spec +from algoperf.workloads.fastmri.fastmri_jax.workload import \ FastMRIWorkload as JaxWorkload -from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.workload import \ +from algoperf.workloads.fastmri.fastmri_pytorch.workload import \ FastMRIWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/fastmri_layernorm/compare.py b/tests/modeldiffs/fastmri_layernorm/compare.py index 23ccf26d7..5f1eb1842 100644 --- a/tests/modeldiffs/fastmri_layernorm/compare.py +++ b/tests/modeldiffs/fastmri_layernorm/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.fastmri.fastmri_jax.workload import \ +from algoperf import spec +from algoperf.workloads.fastmri.fastmri_jax.workload import \ FastMRILayerNormWorkload as JaxWorkload -from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.workload import \ +from algoperf.workloads.fastmri.fastmri_pytorch.workload import \ FastMRILayerNormWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/fastmri_model_size/compare.py b/tests/modeldiffs/fastmri_model_size/compare.py index b61516c29..ebb8669f8 100644 --- a/tests/modeldiffs/fastmri_model_size/compare.py +++ b/tests/modeldiffs/fastmri_model_size/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.fastmri.fastmri_jax.workload import \ +from algoperf import spec +from algoperf.workloads.fastmri.fastmri_jax.workload import \ FastMRIModelSizeWorkload as JaxWorkload -from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.workload import \ +from algoperf.workloads.fastmri.fastmri_pytorch.workload import \ FastMRIModelSizeWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/fastmri_tanh/compare.py b/tests/modeldiffs/fastmri_tanh/compare.py index 0f455387c..558bc2ba1 100644 --- a/tests/modeldiffs/fastmri_tanh/compare.py +++ b/tests/modeldiffs/fastmri_tanh/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.fastmri.fastmri_jax.workload import \ +from algoperf import spec +from algoperf.workloads.fastmri.fastmri_jax.workload import \ FastMRITanhWorkload as JaxWorkload -from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.workload import \ +from algoperf.workloads.fastmri.fastmri_pytorch.workload import \ FastMRITanhWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/imagenet_resnet/compare.py b/tests/modeldiffs/imagenet_resnet/compare.py index fb730f1bf..0a6a1b7c5 100644 --- a/tests/modeldiffs/imagenet_resnet/compare.py +++ b/tests/modeldiffs/imagenet_resnet/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.workload import \ +from algoperf import spec +from algoperf.workloads.imagenet_resnet.imagenet_jax.workload import \ ImagenetResNetWorkload as JaxWorkload -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.workload import \ +from algoperf.workloads.imagenet_resnet.imagenet_pytorch.workload import \ ImagenetResNetWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/imagenet_resnet/gelu_compare.py b/tests/modeldiffs/imagenet_resnet/gelu_compare.py index 6c8adbec2..4f20873b7 100644 --- a/tests/modeldiffs/imagenet_resnet/gelu_compare.py +++ b/tests/modeldiffs/imagenet_resnet/gelu_compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.workload import \ +from algoperf import spec +from algoperf.workloads.imagenet_resnet.imagenet_jax.workload import \ ImagenetResNetGELUWorkload as JaxWorkload -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.workload import \ +from algoperf.workloads.imagenet_resnet.imagenet_pytorch.workload import \ ImagenetResNetGELUWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff from tests.modeldiffs.imagenet_resnet.compare import key_transform diff --git a/tests/modeldiffs/imagenet_resnet/silu_compare.py b/tests/modeldiffs/imagenet_resnet/silu_compare.py index 7668cdbd9..e94fdcd4c 100644 --- a/tests/modeldiffs/imagenet_resnet/silu_compare.py +++ b/tests/modeldiffs/imagenet_resnet/silu_compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.workload import \ +from algoperf import spec +from algoperf.workloads.imagenet_resnet.imagenet_jax.workload import \ ImagenetResNetSiLUWorkload as JaxWorkload -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.workload import \ +from algoperf.workloads.imagenet_resnet.imagenet_pytorch.workload import \ ImagenetResNetSiLUWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff from tests.modeldiffs.imagenet_resnet.compare import key_transform diff --git a/tests/modeldiffs/imagenet_vit/compare.py b/tests/modeldiffs/imagenet_vit/compare.py index ba21e63da..b7b9af794 100644 --- a/tests/modeldiffs/imagenet_vit/compare.py +++ b/tests/modeldiffs/imagenet_vit/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.imagenet_vit.imagenet_jax.workload import \ +from algoperf import spec +from algoperf.workloads.imagenet_vit.imagenet_jax.workload import \ ImagenetVitWorkload as JaxWorkload -from algorithmic_efficiency.workloads.imagenet_vit.imagenet_pytorch.workload import \ +from algoperf.workloads.imagenet_vit.imagenet_pytorch.workload import \ ImagenetVitWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/imagenet_vit_glu/compare.py b/tests/modeldiffs/imagenet_vit_glu/compare.py index 2c0aa546d..11edcd84e 100644 --- a/tests/modeldiffs/imagenet_vit_glu/compare.py +++ b/tests/modeldiffs/imagenet_vit_glu/compare.py @@ -9,10 +9,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.imagenet_vit.imagenet_jax.workload import \ +from algoperf import spec +from algoperf.workloads.imagenet_vit.imagenet_jax.workload import \ ImagenetVitGluWorkload as JaxWorkload -from algorithmic_efficiency.workloads.imagenet_vit.imagenet_pytorch.workload import \ +from algoperf.workloads.imagenet_vit.imagenet_pytorch.workload import \ ImagenetVitGluWorkload as PyTorchWorkload sd_transform = None diff --git a/tests/modeldiffs/imagenet_vit_map/compare.py b/tests/modeldiffs/imagenet_vit_map/compare.py index e7c4c2ee8..70bcd2e04 100644 --- a/tests/modeldiffs/imagenet_vit_map/compare.py +++ b/tests/modeldiffs/imagenet_vit_map/compare.py @@ -9,10 +9,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.imagenet_vit.imagenet_jax.workload import \ +from algoperf import spec +from algoperf.workloads.imagenet_vit.imagenet_jax.workload import \ ImagenetVitMapWorkload as JaxWorkload -from algorithmic_efficiency.workloads.imagenet_vit.imagenet_pytorch.workload import \ +from algoperf.workloads.imagenet_vit.imagenet_pytorch.workload import \ ImagenetVitMapWorkload as PytWorkload diff --git a/tests/modeldiffs/imagenet_vit_postln/compare.py b/tests/modeldiffs/imagenet_vit_postln/compare.py index 8a9063cac..113a65a2a 100644 --- a/tests/modeldiffs/imagenet_vit_postln/compare.py +++ b/tests/modeldiffs/imagenet_vit_postln/compare.py @@ -9,10 +9,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.imagenet_vit.imagenet_jax.workload import \ +from algoperf import spec +from algoperf.workloads.imagenet_vit.imagenet_jax.workload import \ ImagenetVitPostLNWorkload as JaxWorkload -from algorithmic_efficiency.workloads.imagenet_vit.imagenet_pytorch.workload import \ +from algoperf.workloads.imagenet_vit.imagenet_pytorch.workload import \ ImagenetViTPostLNWorkload as PyTorchWorkload sd_transform = None diff --git a/tests/modeldiffs/librispeech_conformer/compare.py b/tests/modeldiffs/librispeech_conformer/compare.py index cfe6c7381..5bfbf915a 100644 --- a/tests/modeldiffs/librispeech_conformer/compare.py +++ b/tests/modeldiffs/librispeech_conformer/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax.workload import \ +from algoperf import spec +from algoperf.workloads.librispeech_conformer.librispeech_jax.workload import \ LibriSpeechConformerWorkload as JaxWorkload -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.workload import \ +from algoperf.workloads.librispeech_conformer.librispeech_pytorch.workload import \ LibriSpeechConformerWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/librispeech_conformer_attention_temperature/compare.py b/tests/modeldiffs/librispeech_conformer_attention_temperature/compare.py index 8480fca02..bb9a8fae1 100644 --- a/tests/modeldiffs/librispeech_conformer_attention_temperature/compare.py +++ b/tests/modeldiffs/librispeech_conformer_attention_temperature/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax.workload import \ +from algoperf import spec +from algoperf.workloads.librispeech_conformer.librispeech_jax.workload import \ LibriSpeechConformerAttentionTemperatureWorkload as JaxWorkload -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.workload import \ +from algoperf.workloads.librispeech_conformer.librispeech_pytorch.workload import \ LibriSpeechConformerAttentionTemperatureWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/librispeech_conformer_gelu/compare.py b/tests/modeldiffs/librispeech_conformer_gelu/compare.py index caa9b09b9..629418488 100644 --- a/tests/modeldiffs/librispeech_conformer_gelu/compare.py +++ b/tests/modeldiffs/librispeech_conformer_gelu/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax.workload import \ +from algoperf import spec +from algoperf.workloads.librispeech_conformer.librispeech_jax.workload import \ LibriSpeechConformerGeluWorkload as JaxWorkload -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.workload import \ +from algoperf.workloads.librispeech_conformer.librispeech_pytorch.workload import \ LibriSpeechConformerGeluWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/librispeech_conformer_layernorm/compare.py b/tests/modeldiffs/librispeech_conformer_layernorm/compare.py index 1a94d3c77..48fe991f7 100644 --- a/tests/modeldiffs/librispeech_conformer_layernorm/compare.py +++ b/tests/modeldiffs/librispeech_conformer_layernorm/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax.workload import \ +from algoperf import spec +from algoperf.workloads.librispeech_conformer.librispeech_jax.workload import \ LibriSpeechConformerLayerNormWorkload as JaxWorkload -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.workload import \ +from algoperf.workloads.librispeech_conformer.librispeech_pytorch.workload import \ LibriSpeechConformerLayerNormWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/librispeech_deepspeech/compare.py b/tests/modeldiffs/librispeech_deepspeech/compare.py index edcc3ba87..81e12b15d 100644 --- a/tests/modeldiffs/librispeech_deepspeech/compare.py +++ b/tests/modeldiffs/librispeech_deepspeech/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_jax.workload import \ +from algoperf import spec +from algoperf.workloads.librispeech_deepspeech.librispeech_jax.workload import \ LibriSpeechDeepSpeechWorkload as JaxWorkload -from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_pytorch.workload import \ +from algoperf.workloads.librispeech_deepspeech.librispeech_pytorch.workload import \ LibriSpeechDeepSpeechWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/librispeech_deepspeech_noresnet/compare.py b/tests/modeldiffs/librispeech_deepspeech_noresnet/compare.py index 6c00bdf69..ea106ebe4 100644 --- a/tests/modeldiffs/librispeech_deepspeech_noresnet/compare.py +++ b/tests/modeldiffs/librispeech_deepspeech_noresnet/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_jax.workload import \ +from algoperf import spec +from algoperf.workloads.librispeech_deepspeech.librispeech_jax.workload import \ LibriSpeechDeepSpeechTanhWorkload as JaxWorkload -from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_pytorch.workload import \ +from algoperf.workloads.librispeech_deepspeech.librispeech_pytorch.workload import \ LibriSpeechDeepSpeechTanhWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff from tests.modeldiffs.librispeech_deepspeech.compare import key_transform diff --git a/tests/modeldiffs/librispeech_deepspeech_normaug/compare.py b/tests/modeldiffs/librispeech_deepspeech_normaug/compare.py index c68d6adf9..ecb6d28af 100644 --- a/tests/modeldiffs/librispeech_deepspeech_normaug/compare.py +++ b/tests/modeldiffs/librispeech_deepspeech_normaug/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_jax.workload import \ +from algoperf import spec +from algoperf.workloads.librispeech_deepspeech.librispeech_jax.workload import \ LibriSpeechDeepSpeechNormAndSpecAugWorkload as JaxWorkload -from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_pytorch.workload import \ +from algoperf.workloads.librispeech_deepspeech.librispeech_pytorch.workload import \ LibriSpeechDeepSpeechNormAndSpecAugWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff from tests.modeldiffs.librispeech_deepspeech.compare import key_transform diff --git a/tests/modeldiffs/librispeech_deepspeech_tanh/compare.py b/tests/modeldiffs/librispeech_deepspeech_tanh/compare.py index 4cfdf4f21..31d9029b4 100644 --- a/tests/modeldiffs/librispeech_deepspeech_tanh/compare.py +++ b/tests/modeldiffs/librispeech_deepspeech_tanh/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_jax.workload import \ +from algoperf import spec +from algoperf.workloads.librispeech_deepspeech.librispeech_jax.workload import \ LibriSpeechDeepSpeechNoResNetWorkload as JaxWorkload -from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_pytorch.workload import \ +from algoperf.workloads.librispeech_deepspeech.librispeech_pytorch.workload import \ LibriSpeechDeepSpeechNoResNetWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff from tests.modeldiffs.librispeech_deepspeech.compare import key_transform diff --git a/tests/modeldiffs/ogbg/compare.py b/tests/modeldiffs/ogbg/compare.py index 56316ba12..43ca48764 100644 --- a/tests/modeldiffs/ogbg/compare.py +++ b/tests/modeldiffs/ogbg/compare.py @@ -8,10 +8,10 @@ import numpy as np import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.ogbg.ogbg_jax.workload import \ +from algoperf import spec +from algoperf.workloads.ogbg.ogbg_jax.workload import \ OgbgWorkload as JaxWorkload -from algorithmic_efficiency.workloads.ogbg.ogbg_pytorch.workload import \ +from algoperf.workloads.ogbg.ogbg_pytorch.workload import \ OgbgWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/ogbg_gelu/compare.py b/tests/modeldiffs/ogbg_gelu/compare.py index b58bcd461..062588fe2 100644 --- a/tests/modeldiffs/ogbg_gelu/compare.py +++ b/tests/modeldiffs/ogbg_gelu/compare.py @@ -8,10 +8,10 @@ import numpy as np import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.ogbg.ogbg_jax.workload import \ +from algoperf import spec +from algoperf.workloads.ogbg.ogbg_jax.workload import \ OgbgGeluWorkload as JaxWorkload -from algorithmic_efficiency.workloads.ogbg.ogbg_pytorch.workload import \ +from algoperf.workloads.ogbg.ogbg_pytorch.workload import \ OgbgGeluWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/ogbg_model_size/compare.py b/tests/modeldiffs/ogbg_model_size/compare.py index 62443bbb5..2eb70d097 100644 --- a/tests/modeldiffs/ogbg_model_size/compare.py +++ b/tests/modeldiffs/ogbg_model_size/compare.py @@ -8,10 +8,10 @@ import numpy as np import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.ogbg.ogbg_jax.workload import \ +from algoperf import spec +from algoperf.workloads.ogbg.ogbg_jax.workload import \ OgbgModelSizeWorkload as JaxWorkload -from algorithmic_efficiency.workloads.ogbg.ogbg_pytorch.workload import \ +from algoperf.workloads.ogbg.ogbg_pytorch.workload import \ OgbgModelSizeWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/ogbg_silu/compare.py b/tests/modeldiffs/ogbg_silu/compare.py index 2922b7046..19e446030 100644 --- a/tests/modeldiffs/ogbg_silu/compare.py +++ b/tests/modeldiffs/ogbg_silu/compare.py @@ -8,10 +8,10 @@ import numpy as np import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.ogbg.ogbg_jax.workload import \ +from algoperf import spec +from algoperf.workloads.ogbg.ogbg_jax.workload import \ OgbgSiluWorkload as JaxWorkload -from algorithmic_efficiency.workloads.ogbg.ogbg_pytorch.workload import \ +from algoperf.workloads.ogbg.ogbg_pytorch.workload import \ OgbgSiluWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/vanilla_sgd_jax.py b/tests/modeldiffs/vanilla_sgd_jax.py index d45694bcb..62b98bd17 100644 --- a/tests/modeldiffs/vanilla_sgd_jax.py +++ b/tests/modeldiffs/vanilla_sgd_jax.py @@ -3,7 +3,7 @@ import jax.numpy as jnp import optax -from algorithmic_efficiency import spec +from algoperf import spec from reference_algorithms.target_setting_algorithms.data_selection import \ data_selection # pylint: disable=unused-import from reference_algorithms.target_setting_algorithms.jax_submission_base import \ diff --git a/tests/modeldiffs/vanilla_sgd_pytorch.py b/tests/modeldiffs/vanilla_sgd_pytorch.py index 254ef6018..a6a0c5fa6 100644 --- a/tests/modeldiffs/vanilla_sgd_pytorch.py +++ b/tests/modeldiffs/vanilla_sgd_pytorch.py @@ -1,6 +1,6 @@ import torch -from algorithmic_efficiency import spec +from algoperf import spec from reference_algorithms.target_setting_algorithms.data_selection import \ data_selection # pylint: disable=unused-import from reference_algorithms.target_setting_algorithms.pytorch_submission_base import \ diff --git a/tests/modeldiffs/wmt/compare.py b/tests/modeldiffs/wmt/compare.py index 41fc5ee17..73bc03f78 100644 --- a/tests/modeldiffs/wmt/compare.py +++ b/tests/modeldiffs/wmt/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.wmt.wmt_jax.workload import \ +from algoperf import spec +from algoperf.workloads.wmt.wmt_jax.workload import \ WmtWorkload as JaxWorkload -from algorithmic_efficiency.workloads.wmt.wmt_pytorch.workload import \ +from algoperf.workloads.wmt.wmt_pytorch.workload import \ WmtWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/wmt_attention_temp/compare.py b/tests/modeldiffs/wmt_attention_temp/compare.py index 92ce4eb44..01dc2895c 100644 --- a/tests/modeldiffs/wmt_attention_temp/compare.py +++ b/tests/modeldiffs/wmt_attention_temp/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.wmt.wmt_jax.workload import \ +from algoperf import spec +from algoperf.workloads.wmt.wmt_jax.workload import \ WmtWorkloadAttentionTemp as JaxWorkload -from algorithmic_efficiency.workloads.wmt.wmt_pytorch.workload import \ +from algoperf.workloads.wmt.wmt_pytorch.workload import \ WmtWorkloadAttentionTemp as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/wmt_glu_tanh/compare.py b/tests/modeldiffs/wmt_glu_tanh/compare.py index b8d860479..77e71c826 100644 --- a/tests/modeldiffs/wmt_glu_tanh/compare.py +++ b/tests/modeldiffs/wmt_glu_tanh/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.wmt.wmt_jax.workload import \ +from algoperf import spec +from algoperf.workloads.wmt.wmt_jax.workload import \ WmtWorkloadGLUTanH as JaxWorkload -from algorithmic_efficiency.workloads.wmt.wmt_pytorch.workload import \ +from algoperf.workloads.wmt.wmt_pytorch.workload import \ WmtWorkloadGLUTanH as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/modeldiffs/wmt_post_ln/compare.py b/tests/modeldiffs/wmt_post_ln/compare.py index 3f5469d8d..909fcd672 100644 --- a/tests/modeldiffs/wmt_post_ln/compare.py +++ b/tests/modeldiffs/wmt_post_ln/compare.py @@ -6,10 +6,10 @@ import jax import torch -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.wmt.wmt_jax.workload import \ +from algoperf import spec +from algoperf.workloads.wmt.wmt_jax.workload import \ WmtWorkloadPostLN as JaxWorkload -from algorithmic_efficiency.workloads.wmt.wmt_pytorch.workload import \ +from algoperf.workloads.wmt.wmt_pytorch.workload import \ WmtWorkloadPostLN as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/reference_algorithm_tests.py b/tests/reference_algorithm_tests.py index f107be8d7..3f279a605 100644 --- a/tests/reference_algorithm_tests.py +++ b/tests/reference_algorithm_tests.py @@ -40,14 +40,14 @@ import torch import torch.distributed as dist -from algorithmic_efficiency import halton -from algorithmic_efficiency import pytorch_utils -from algorithmic_efficiency import random_utils as prng -from algorithmic_efficiency.profiler import PassThroughProfiler -from algorithmic_efficiency.workloads import workloads -from algorithmic_efficiency.workloads.ogbg import \ +from algoperf import halton +from algoperf import pytorch_utils +from algoperf import random_utils as prng +from algoperf.profiler import PassThroughProfiler +from algoperf.workloads import workloads +from algoperf.workloads.ogbg import \ input_pipeline as ogbg_input_pipeline -from algorithmic_efficiency.workloads.ogbg.ogbg_pytorch.workload import \ +from algoperf.workloads.ogbg.ogbg_pytorch.workload import \ _graph_map import submission_runner from tests.modeldiffs import diff as diff_utils diff --git a/tests/submission_runner_test.py b/tests/submission_runner_test.py index cc98e603e..ff724b201 100644 --- a/tests/submission_runner_test.py +++ b/tests/submission_runner_test.py @@ -13,7 +13,7 @@ from absl.testing import absltest from absl.testing import parameterized -from algorithmic_efficiency.profiler import PassThroughProfiler +from algoperf.profiler import PassThroughProfiler import submission_runner FLAGS = flags.FLAGS diff --git a/tests/test_baselines.py b/tests/test_baselines.py index f79e629e7..b2be8aa11 100644 --- a/tests/test_baselines.py +++ b/tests/test_baselines.py @@ -12,8 +12,8 @@ from absl.testing import absltest from absl.testing import parameterized -from algorithmic_efficiency.profiler import PassThroughProfiler -from algorithmic_efficiency.workloads import workloads +from algoperf.profiler import PassThroughProfiler +from algoperf.workloads import workloads import submission_runner FLAGS = flags.FLAGS diff --git a/tests/test_num_params.py b/tests/test_num_params.py index 574fd0aa5..83a23c9a4 100644 --- a/tests/test_num_params.py +++ b/tests/test_num_params.py @@ -5,42 +5,42 @@ import pytest import torch -from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_jax.models import \ +from algoperf.workloads.criteo1tb.criteo1tb_jax.models import \ DlrmSmall as JaxDlrmSmall -from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_pytorch.models import \ +from algoperf.workloads.criteo1tb.criteo1tb_pytorch.models import \ DlrmSmall as PyTorchDlrmSmall -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.models import \ +from algoperf.workloads.imagenet_resnet.imagenet_jax.models import \ ResNet18 as JaxResNet_c10 -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.models import \ +from algoperf.workloads.imagenet_resnet.imagenet_jax.models import \ ResNet50 as JaxResNet -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.models import \ +from algoperf.workloads.imagenet_resnet.imagenet_pytorch.models import \ resnet18 as PyTorchResNet_c10 -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.models import \ +from algoperf.workloads.imagenet_resnet.imagenet_pytorch.models import \ resnet50 as PyTorchResNet -from algorithmic_efficiency.workloads.imagenet_vit.imagenet_jax.models import \ +from algoperf.workloads.imagenet_vit.imagenet_jax.models import \ ViT as JaxViT -from algorithmic_efficiency.workloads.imagenet_vit.imagenet_pytorch.models import \ +from algoperf.workloads.imagenet_vit.imagenet_pytorch.models import \ ViT as PyTorchViT -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax.models import \ +from algoperf.workloads.librispeech_conformer.librispeech_jax.models import \ Conformer as JaxConformer -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax.models import \ +from algoperf.workloads.librispeech_conformer.librispeech_jax.models import \ ConformerConfig as JaxConformerConfig -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.models import \ +from algoperf.workloads.librispeech_conformer.librispeech_pytorch.models import \ ConformerConfig as PytorchConformerConfig -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.models import \ +from algoperf.workloads.librispeech_conformer.librispeech_pytorch.models import \ ConformerEncoderDecoder as PytorchConformer -from algorithmic_efficiency.workloads.mnist.mnist_jax.workload import \ +from algoperf.workloads.mnist.mnist_jax.workload import \ _Model as JaxMLP -from algorithmic_efficiency.workloads.mnist.mnist_pytorch.workload import \ +from algoperf.workloads.mnist.mnist_pytorch.workload import \ _Model as PyTorchMLP -from algorithmic_efficiency.workloads.ogbg.ogbg_jax.models import GNN as JaxGNN -from algorithmic_efficiency.workloads.ogbg.ogbg_pytorch.models import \ +from algoperf.workloads.ogbg.ogbg_jax.models import GNN as JaxGNN +from algoperf.workloads.ogbg.ogbg_pytorch.models import \ GNN as PyTorchGNN -from algorithmic_efficiency.workloads.wmt.wmt_jax.models import \ +from algoperf.workloads.wmt.wmt_jax.models import \ Transformer as JaxTransformer -from algorithmic_efficiency.workloads.wmt.wmt_jax.models import \ +from algoperf.workloads.wmt.wmt_jax.models import \ TransformerConfig -from algorithmic_efficiency.workloads.wmt.wmt_pytorch.models import \ +from algoperf.workloads.wmt.wmt_pytorch.models import \ Transformer as PyTorchTransformer WORKLOADS = [ diff --git a/tests/test_param_shapes.py b/tests/test_param_shapes.py index b67625213..96a7bace5 100644 --- a/tests/test_param_shapes.py +++ b/tests/test_param_shapes.py @@ -6,26 +6,26 @@ # isort: skip_file # pylint:disable=line-too-long -from algorithmic_efficiency.workloads.cifar.cifar_jax.workload import CifarWorkload as JaxCifarWorkload -from algorithmic_efficiency.workloads.cifar.cifar_pytorch.workload import CifarWorkload as PyTorchCifarWorkload -from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_jax.workload import Criteo1TbDlrmSmallWorkload as JaxCriteoWorkload -from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_pytorch.workload import Criteo1TbDlrmSmallWorkload as PyTorchCriteoWorkload -from algorithmic_efficiency.workloads.fastmri.fastmri_jax.workload import FastMRIWorkload as JaxFastMRIWorkload -from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.workload import FastMRIWorkload as PyTorchFastMRIWorkload -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.workload import ImagenetResNetWorkload as JaxImagenetResNetWorkload -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.workload import ImagenetResNetWorkload as PyTorchImagenetResNetWorkload -from algorithmic_efficiency.workloads.imagenet_vit.imagenet_jax.workload import ImagenetVitWorkload as JaxImagenetViTWorkload -from algorithmic_efficiency.workloads.imagenet_vit.imagenet_pytorch.workload import ImagenetVitWorkload as PyTorchImagenetViTWorkload -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax.workload import LibriSpeechConformerWorkload as JaxLibriSpeechConformerWorkload -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.workload import LibriSpeechConformerWorkload as PytorchLibriSpeechConformerWorkload -from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_jax.workload import LibriSpeechDeepSpeechWorkload as JaxLibriSpeechDeepSpeechWorkload -from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_pytorch.workload import LibriSpeechDeepSpeechWorkload as PytorchLibriSpeechDeepSpeechWorkload -from algorithmic_efficiency.workloads.mnist.mnist_jax.workload import MnistWorkload as JaxMnistWorkload -from algorithmic_efficiency.workloads.mnist.mnist_pytorch.workload import MnistWorkload as PyTorchMnistWorkload -from algorithmic_efficiency.workloads.ogbg.ogbg_jax.workload import OgbgWorkload as JaxOgbgWorkload -from algorithmic_efficiency.workloads.ogbg.ogbg_pytorch.workload import OgbgWorkload as PyTorchOgbgWorkload -from algorithmic_efficiency.workloads.wmt.wmt_jax.workload import WmtWorkload as JaxWmtWorkload -from algorithmic_efficiency.workloads.wmt.wmt_pytorch.workload import WmtWorkload as PyTorchWmtWorkload +from algoperf.workloads.cifar.cifar_jax.workload import CifarWorkload as JaxCifarWorkload +from algoperf.workloads.cifar.cifar_pytorch.workload import CifarWorkload as PyTorchCifarWorkload +from algoperf.workloads.criteo1tb.criteo1tb_jax.workload import Criteo1TbDlrmSmallWorkload as JaxCriteoWorkload +from algoperf.workloads.criteo1tb.criteo1tb_pytorch.workload import Criteo1TbDlrmSmallWorkload as PyTorchCriteoWorkload +from algoperf.workloads.fastmri.fastmri_jax.workload import FastMRIWorkload as JaxFastMRIWorkload +from algoperf.workloads.fastmri.fastmri_pytorch.workload import FastMRIWorkload as PyTorchFastMRIWorkload +from algoperf.workloads.imagenet_resnet.imagenet_jax.workload import ImagenetResNetWorkload as JaxImagenetResNetWorkload +from algoperf.workloads.imagenet_resnet.imagenet_pytorch.workload import ImagenetResNetWorkload as PyTorchImagenetResNetWorkload +from algoperf.workloads.imagenet_vit.imagenet_jax.workload import ImagenetVitWorkload as JaxImagenetViTWorkload +from algoperf.workloads.imagenet_vit.imagenet_pytorch.workload import ImagenetVitWorkload as PyTorchImagenetViTWorkload +from algoperf.workloads.librispeech_conformer.librispeech_jax.workload import LibriSpeechConformerWorkload as JaxLibriSpeechConformerWorkload +from algoperf.workloads.librispeech_conformer.librispeech_pytorch.workload import LibriSpeechConformerWorkload as PytorchLibriSpeechConformerWorkload +from algoperf.workloads.librispeech_deepspeech.librispeech_jax.workload import LibriSpeechDeepSpeechWorkload as JaxLibriSpeechDeepSpeechWorkload +from algoperf.workloads.librispeech_deepspeech.librispeech_pytorch.workload import LibriSpeechDeepSpeechWorkload as PytorchLibriSpeechDeepSpeechWorkload +from algoperf.workloads.mnist.mnist_jax.workload import MnistWorkload as JaxMnistWorkload +from algoperf.workloads.mnist.mnist_pytorch.workload import MnistWorkload as PyTorchMnistWorkload +from algoperf.workloads.ogbg.ogbg_jax.workload import OgbgWorkload as JaxOgbgWorkload +from algoperf.workloads.ogbg.ogbg_pytorch.workload import OgbgWorkload as PyTorchOgbgWorkload +from algoperf.workloads.wmt.wmt_jax.workload import WmtWorkload as JaxWmtWorkload +from algoperf.workloads.wmt.wmt_pytorch.workload import WmtWorkload as PyTorchWmtWorkload # pylint:enable=line-too-long WORKLOADS = [ diff --git a/tests/test_param_types.py b/tests/test_param_types.py index 7cf8f63c3..d3722ae86 100644 --- a/tests/test_param_types.py +++ b/tests/test_param_types.py @@ -2,30 +2,30 @@ import pytest from absl import logging -from algorithmic_efficiency import spec +from algoperf import spec # isort: skip_file # pylint:disable=line-too-long -from algorithmic_efficiency.workloads.cifar.cifar_jax.workload import CifarWorkload as JaxCifarWorkload -from algorithmic_efficiency.workloads.cifar.cifar_pytorch.workload import CifarWorkload as PyTorchCifarWorkload -from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_jax.workload import Criteo1TbDlrmSmallWorkload as JaxCriteoWorkload -from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_pytorch.workload import Criteo1TbDlrmSmallWorkload as PyTorchCriteoWorkload -from algorithmic_efficiency.workloads.fastmri.fastmri_jax.workload import FastMRIWorkload as JaxFastMRIWorkload -from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.workload import FastMRIWorkload as PyTorchFastMRIWorkload -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.workload import ImagenetResNetWorkload as JaxImagenetResNetWorkload -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.workload import ImagenetResNetWorkload as PyTorchImagenetResNetWorkload -from algorithmic_efficiency.workloads.imagenet_vit.imagenet_jax.workload import ImagenetVitWorkload as JaxImagenetViTWorkload -from algorithmic_efficiency.workloads.imagenet_vit.imagenet_pytorch.workload import ImagenetVitWorkload as PyTorchImagenetViTWorkload -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax.workload import LibriSpeechConformerWorkload as JaxLibriSpeechConformerWorkload -from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.workload import LibriSpeechConformerWorkload as PytorchLibriSpeechConformerWorkload -from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_jax.workload import LibriSpeechDeepSpeechWorkload as JaxLibriSpeechDeepSpeechWorkload -from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_pytorch.workload import LibriSpeechDeepSpeechWorkload as PytorchLibriSpeechDeepSpeechWorkload -from algorithmic_efficiency.workloads.mnist.mnist_jax.workload import MnistWorkload as JaxMnistWorkload -from algorithmic_efficiency.workloads.mnist.mnist_pytorch.workload import MnistWorkload as PyTorchMnistWorkload -from algorithmic_efficiency.workloads.ogbg.ogbg_jax.workload import OgbgWorkload as JaxOgbgWorkload -from algorithmic_efficiency.workloads.ogbg.ogbg_pytorch.workload import OgbgWorkload as PyTorchOgbgWorkload -from algorithmic_efficiency.workloads.wmt.wmt_jax.workload import WmtWorkload as JaxWmtWorkload -from algorithmic_efficiency.workloads.wmt.wmt_pytorch.workload import WmtWorkload as PyTorchWmtWorkload +from algoperf.workloads.cifar.cifar_jax.workload import CifarWorkload as JaxCifarWorkload +from algoperf.workloads.cifar.cifar_pytorch.workload import CifarWorkload as PyTorchCifarWorkload +from algoperf.workloads.criteo1tb.criteo1tb_jax.workload import Criteo1TbDlrmSmallWorkload as JaxCriteoWorkload +from algoperf.workloads.criteo1tb.criteo1tb_pytorch.workload import Criteo1TbDlrmSmallWorkload as PyTorchCriteoWorkload +from algoperf.workloads.fastmri.fastmri_jax.workload import FastMRIWorkload as JaxFastMRIWorkload +from algoperf.workloads.fastmri.fastmri_pytorch.workload import FastMRIWorkload as PyTorchFastMRIWorkload +from algoperf.workloads.imagenet_resnet.imagenet_jax.workload import ImagenetResNetWorkload as JaxImagenetResNetWorkload +from algoperf.workloads.imagenet_resnet.imagenet_pytorch.workload import ImagenetResNetWorkload as PyTorchImagenetResNetWorkload +from algoperf.workloads.imagenet_vit.imagenet_jax.workload import ImagenetVitWorkload as JaxImagenetViTWorkload +from algoperf.workloads.imagenet_vit.imagenet_pytorch.workload import ImagenetVitWorkload as PyTorchImagenetViTWorkload +from algoperf.workloads.librispeech_conformer.librispeech_jax.workload import LibriSpeechConformerWorkload as JaxLibriSpeechConformerWorkload +from algoperf.workloads.librispeech_conformer.librispeech_pytorch.workload import LibriSpeechConformerWorkload as PytorchLibriSpeechConformerWorkload +from algoperf.workloads.librispeech_deepspeech.librispeech_jax.workload import LibriSpeechDeepSpeechWorkload as JaxLibriSpeechDeepSpeechWorkload +from algoperf.workloads.librispeech_deepspeech.librispeech_pytorch.workload import LibriSpeechDeepSpeechWorkload as PytorchLibriSpeechDeepSpeechWorkload +from algoperf.workloads.mnist.mnist_jax.workload import MnistWorkload as JaxMnistWorkload +from algoperf.workloads.mnist.mnist_pytorch.workload import MnistWorkload as PyTorchMnistWorkload +from algoperf.workloads.ogbg.ogbg_jax.workload import OgbgWorkload as JaxOgbgWorkload +from algoperf.workloads.ogbg.ogbg_pytorch.workload import OgbgWorkload as PyTorchOgbgWorkload +from algoperf.workloads.wmt.wmt_jax.workload import WmtWorkload as JaxWmtWorkload +from algoperf.workloads.wmt.wmt_pytorch.workload import WmtWorkload as PyTorchWmtWorkload # pylint:enable=line-too-long WORKLOADS = [ diff --git a/tests/test_ssim.py b/tests/test_ssim.py index fadf41f64..ba0b2ca7f 100644 --- a/tests/test_ssim.py +++ b/tests/test_ssim.py @@ -9,14 +9,14 @@ import numpy as np import torch -from algorithmic_efficiency.pytorch_utils import pytorch_setup -from algorithmic_efficiency.workloads.fastmri.fastmri_jax.ssim import \ +from algoperf.pytorch_utils import pytorch_setup +from algoperf.workloads.fastmri.fastmri_jax.ssim import \ _uniform_filter as _jax_uniform_filter -from algorithmic_efficiency.workloads.fastmri.fastmri_jax.ssim import \ +from algoperf.workloads.fastmri.fastmri_jax.ssim import \ ssim as jax_ssim -from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.ssim import \ +from algoperf.workloads.fastmri.fastmri_pytorch.ssim import \ _uniform_filter as _pytorch_uniform_filter -from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.ssim import \ +from algoperf.workloads.fastmri.fastmri_pytorch.ssim import \ ssim as pytorch_ssim # Make sure no GPU memory is preallocated to Jax. diff --git a/tests/test_version.py b/tests/test_version.py index ef01d4f32..d1bfbd18f 100644 --- a/tests/test_version.py +++ b/tests/test_version.py @@ -1,13 +1,13 @@ """Check whether the __version__ attribute is set correctly.""" -import algorithmic_efficiency +import algoperf def test_version_attribute(): """Check whether __version__ exists and is a valid string.""" - assert hasattr(algorithmic_efficiency, "__version__") - version = algorithmic_efficiency.__version__ + assert hasattr(algoperf, "__version__") + version = algoperf.__version__ assert isinstance(version, str) version_elements = version.split(".") print(version_elements) diff --git a/tests/version_test.py b/tests/version_test.py new file mode 100644 index 000000000..2205b305f --- /dev/null +++ b/tests/version_test.py @@ -0,0 +1,16 @@ +"""Check whether the __version__ attribute is set correctly.""" + +import algoperf + + +def test_version_attribute(): + """Check whether __version__ exists and is a valid string.""" + + assert hasattr(algoperf, "__version__") + version = algoperf.__version__ + assert isinstance(version, str) + version_elements = version.split(".") + print(version_elements) + # Only check the first three elements, i.e. major, minor, patch. + # The remaining elements contain commit hash and dirty status. + assert all(el.isnumeric() for el in version_elements[0:3]) diff --git a/tests/workloads/imagenet_resnet/imagenet_jax/workload_test.py b/tests/workloads/imagenet_resnet/imagenet_jax/workload_test.py index 6a85c2196..66b1dbc6a 100644 --- a/tests/workloads/imagenet_resnet/imagenet_jax/workload_test.py +++ b/tests/workloads/imagenet_resnet/imagenet_jax/workload_test.py @@ -4,8 +4,8 @@ import jax import jax.numpy as jnp -from algorithmic_efficiency import spec -from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.workload import \ +from algoperf import spec +from algoperf.workloads.imagenet_resnet.imagenet_jax.workload import \ ImagenetResNetWorkload From bc666a7420f01a15bd8f96ab66249e6afa6ced9e Mon Sep 17 00:00:00 2001 From: Frank Schneider Date: Wed, 15 Jan 2025 16:47:18 +0100 Subject: [PATCH 14/14] Fix linting (due to shorter package name in imports) --- algoperf/workloads/cifar/cifar_jax/models.py | 3 +-- algoperf/workloads/cifar/cifar_jax/workload.py | 3 +-- algoperf/workloads/cifar/cifar_pytorch/models.py | 3 +-- .../workloads/cifar/cifar_pytorch/workload.py | 3 +-- .../workloads/fastmri/fastmri_jax/workload.py | 3 +-- .../fastmri/fastmri_pytorch/workload.py | 6 ++---- .../imagenet_jax/input_pipeline.py | 3 +-- .../imagenet_resnet/imagenet_jax/workload.py | 6 ++---- .../imagenet_resnet/imagenet_pytorch/workload.py | 6 ++---- .../workloads/imagenet_resnet/imagenet_v2.py | 3 +-- .../imagenet_vit/imagenet_jax/workload.py | 6 ++---- .../imagenet_vit/imagenet_pytorch/models.py | 3 +-- .../imagenet_vit/imagenet_pytorch/workload.py | 9 +++------ .../librispeech_jax/workload.py | 3 +-- .../librispeech_pytorch/workload.py | 3 +-- .../librispeech_jax/workload.py | 3 +-- tests/modeldiffs/wmt/compare.py | 3 +-- tests/reference_algorithm_tests.py | 6 ++---- tests/test_num_params.py | 15 +++++---------- tests/test_ssim.py | 3 +-- tests/version_test.py | 16 ---------------- 21 files changed, 31 insertions(+), 78 deletions(-) delete mode 100644 tests/version_test.py diff --git a/algoperf/workloads/cifar/cifar_jax/models.py b/algoperf/workloads/cifar/cifar_jax/models.py index 4d5df766e..957079272 100644 --- a/algoperf/workloads/cifar/cifar_jax/models.py +++ b/algoperf/workloads/cifar/cifar_jax/models.py @@ -11,8 +11,7 @@ import jax.numpy as jnp from algoperf import spec -from algoperf.workloads.imagenet_resnet.imagenet_jax.models import \ - ResNetBlock +from algoperf.workloads.imagenet_resnet.imagenet_jax.models import ResNetBlock ModuleDef = nn.Module diff --git a/algoperf/workloads/cifar/cifar_jax/workload.py b/algoperf/workloads/cifar/cifar_jax/workload.py index f4bcffbc3..952bb977d 100644 --- a/algoperf/workloads/cifar/cifar_jax/workload.py +++ b/algoperf/workloads/cifar/cifar_jax/workload.py @@ -14,8 +14,7 @@ from algoperf import param_utils from algoperf import spec from algoperf.workloads.cifar.cifar_jax import models -from algoperf.workloads.cifar.cifar_jax.input_pipeline import \ - create_input_iter +from algoperf.workloads.cifar.cifar_jax.input_pipeline import create_input_iter from algoperf.workloads.cifar.workload import BaseCifarWorkload diff --git a/algoperf/workloads/cifar/cifar_pytorch/models.py b/algoperf/workloads/cifar/cifar_pytorch/models.py index 393d568b9..e6a7a8a81 100644 --- a/algoperf/workloads/cifar/cifar_pytorch/models.py +++ b/algoperf/workloads/cifar/cifar_pytorch/models.py @@ -16,8 +16,7 @@ BasicBlock from algoperf.workloads.imagenet_resnet.imagenet_pytorch.models import \ Bottleneck -from algoperf.workloads.imagenet_resnet.imagenet_pytorch.models import \ - conv1x1 +from algoperf.workloads.imagenet_resnet.imagenet_pytorch.models import conv1x1 class ResNet(nn.Module): diff --git a/algoperf/workloads/cifar/cifar_pytorch/workload.py b/algoperf/workloads/cifar/cifar_pytorch/workload.py index 2ba92f0b9..b16d62204 100644 --- a/algoperf/workloads/cifar/cifar_pytorch/workload.py +++ b/algoperf/workloads/cifar/cifar_pytorch/workload.py @@ -16,8 +16,7 @@ from algoperf import param_utils from algoperf import pytorch_utils from algoperf import spec -from algoperf.workloads.cifar.cifar_pytorch.models import \ - resnet18 +from algoperf.workloads.cifar.cifar_pytorch.models import resnet18 from algoperf.workloads.cifar.workload import BaseCifarWorkload USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup() diff --git a/algoperf/workloads/fastmri/fastmri_jax/workload.py b/algoperf/workloads/fastmri/fastmri_jax/workload.py index 393aa19d7..1156cf30a 100644 --- a/algoperf/workloads/fastmri/fastmri_jax/workload.py +++ b/algoperf/workloads/fastmri/fastmri_jax/workload.py @@ -13,8 +13,7 @@ import algoperf.random_utils as prng from algoperf.workloads.fastmri.fastmri_jax.models import UNet from algoperf.workloads.fastmri.fastmri_jax.ssim import ssim -from algoperf.workloads.fastmri.workload import \ - BaseFastMRIWorkload +from algoperf.workloads.fastmri.workload import BaseFastMRIWorkload class FastMRIWorkload(BaseFastMRIWorkload): diff --git a/algoperf/workloads/fastmri/fastmri_pytorch/workload.py b/algoperf/workloads/fastmri/fastmri_pytorch/workload.py index f40654678..58943de2f 100644 --- a/algoperf/workloads/fastmri/fastmri_pytorch/workload.py +++ b/algoperf/workloads/fastmri/fastmri_pytorch/workload.py @@ -13,11 +13,9 @@ from algoperf import pytorch_utils from algoperf import spec import algoperf.random_utils as prng -from algoperf.workloads.fastmri.fastmri_pytorch.models import \ - UNet +from algoperf.workloads.fastmri.fastmri_pytorch.models import UNet from algoperf.workloads.fastmri.fastmri_pytorch.ssim import ssim -from algoperf.workloads.fastmri.workload import \ - BaseFastMRIWorkload +from algoperf.workloads.fastmri.workload import BaseFastMRIWorkload USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup() diff --git a/algoperf/workloads/imagenet_resnet/imagenet_jax/input_pipeline.py b/algoperf/workloads/imagenet_resnet/imagenet_jax/input_pipeline.py index 709a318c2..66105335b 100644 --- a/algoperf/workloads/imagenet_resnet/imagenet_jax/input_pipeline.py +++ b/algoperf/workloads/imagenet_resnet/imagenet_jax/input_pipeline.py @@ -14,8 +14,7 @@ from algoperf import data_utils from algoperf import spec -from algoperf.workloads.imagenet_resnet.imagenet_jax import \ - randaugment +from algoperf.workloads.imagenet_resnet.imagenet_jax import randaugment TFDS_SPLIT_NAME = { 'train': 'train', 'eval_train': 'train', 'validation': 'validation' diff --git a/algoperf/workloads/imagenet_resnet/imagenet_jax/workload.py b/algoperf/workloads/imagenet_resnet/imagenet_jax/workload.py index b445e9f00..9494fd63c 100644 --- a/algoperf/workloads/imagenet_resnet/imagenet_jax/workload.py +++ b/algoperf/workloads/imagenet_resnet/imagenet_jax/workload.py @@ -21,10 +21,8 @@ from algoperf import random_utils as prng from algoperf import spec from algoperf.workloads.imagenet_resnet import imagenet_v2 -from algoperf.workloads.imagenet_resnet.imagenet_jax import \ - input_pipeline -from algoperf.workloads.imagenet_resnet.imagenet_jax import \ - models +from algoperf.workloads.imagenet_resnet.imagenet_jax import input_pipeline +from algoperf.workloads.imagenet_resnet.imagenet_jax import models from algoperf.workloads.imagenet_resnet.workload import \ BaseImagenetResNetWorkload diff --git a/algoperf/workloads/imagenet_resnet/imagenet_pytorch/workload.py b/algoperf/workloads/imagenet_resnet/imagenet_pytorch/workload.py index 7a08f325e..92b651ba2 100644 --- a/algoperf/workloads/imagenet_resnet/imagenet_pytorch/workload.py +++ b/algoperf/workloads/imagenet_resnet/imagenet_pytorch/workload.py @@ -22,10 +22,8 @@ from algoperf import spec import algoperf.random_utils as prng from algoperf.workloads.imagenet_resnet import imagenet_v2 -from algoperf.workloads.imagenet_resnet.imagenet_pytorch import \ - randaugment -from algoperf.workloads.imagenet_resnet.imagenet_pytorch.models import \ - resnet50 +from algoperf.workloads.imagenet_resnet.imagenet_pytorch import randaugment +from algoperf.workloads.imagenet_resnet.imagenet_pytorch.models import resnet50 from algoperf.workloads.imagenet_resnet.workload import \ BaseImagenetResNetWorkload diff --git a/algoperf/workloads/imagenet_resnet/imagenet_v2.py b/algoperf/workloads/imagenet_resnet/imagenet_v2.py index f63ddbc34..84d364586 100644 --- a/algoperf/workloads/imagenet_resnet/imagenet_v2.py +++ b/algoperf/workloads/imagenet_resnet/imagenet_v2.py @@ -10,8 +10,7 @@ from algoperf import data_utils from algoperf import spec -from algoperf.workloads.imagenet_resnet.imagenet_jax import \ - input_pipeline +from algoperf.workloads.imagenet_resnet.imagenet_jax import input_pipeline def get_imagenet_v2_iter(data_dir: str, diff --git a/algoperf/workloads/imagenet_vit/imagenet_jax/workload.py b/algoperf/workloads/imagenet_vit/imagenet_jax/workload.py index 2261aac6d..9a6190f5e 100644 --- a/algoperf/workloads/imagenet_vit/imagenet_jax/workload.py +++ b/algoperf/workloads/imagenet_vit/imagenet_jax/workload.py @@ -12,10 +12,8 @@ from algoperf.workloads.imagenet_resnet.imagenet_jax.workload import \ ImagenetResNetWorkload from algoperf.workloads.imagenet_vit.imagenet_jax import models -from algoperf.workloads.imagenet_vit.workload import \ - BaseImagenetVitWorkload -from algoperf.workloads.imagenet_vit.workload import \ - decode_variant +from algoperf.workloads.imagenet_vit.workload import BaseImagenetVitWorkload +from algoperf.workloads.imagenet_vit.workload import decode_variant # Make sure we inherit from the ViT base workload first. diff --git a/algoperf/workloads/imagenet_vit/imagenet_pytorch/models.py b/algoperf/workloads/imagenet_vit/imagenet_pytorch/models.py index 4fac8bd35..fcf0992d3 100644 --- a/algoperf/workloads/imagenet_vit/imagenet_pytorch/models.py +++ b/algoperf/workloads/imagenet_vit/imagenet_pytorch/models.py @@ -14,8 +14,7 @@ from algoperf import init_utils from algoperf import spec -from algoperf.workloads.wmt.wmt_pytorch.models import \ - MultiheadAttention +from algoperf.workloads.wmt.wmt_pytorch.models import MultiheadAttention def posemb_sincos_2d(patches: spec.Tensor, temperature=10_000.) -> spec.Tensor: diff --git a/algoperf/workloads/imagenet_vit/imagenet_pytorch/workload.py b/algoperf/workloads/imagenet_vit/imagenet_pytorch/workload.py index 20b294b47..97bb38515 100644 --- a/algoperf/workloads/imagenet_vit/imagenet_pytorch/workload.py +++ b/algoperf/workloads/imagenet_vit/imagenet_pytorch/workload.py @@ -11,12 +11,9 @@ from algoperf import spec from algoperf.workloads.imagenet_resnet.imagenet_pytorch.workload import \ ImagenetResNetWorkload -from algoperf.workloads.imagenet_vit.imagenet_pytorch import \ - models -from algoperf.workloads.imagenet_vit.workload import \ - BaseImagenetVitWorkload -from algoperf.workloads.imagenet_vit.workload import \ - decode_variant +from algoperf.workloads.imagenet_vit.imagenet_pytorch import models +from algoperf.workloads.imagenet_vit.workload import BaseImagenetVitWorkload +from algoperf.workloads.imagenet_vit.workload import decode_variant USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup() diff --git a/algoperf/workloads/librispeech_conformer/librispeech_jax/workload.py b/algoperf/workloads/librispeech_conformer/librispeech_jax/workload.py index b4fdb0811..8d9872461 100644 --- a/algoperf/workloads/librispeech_conformer/librispeech_jax/workload.py +++ b/algoperf/workloads/librispeech_conformer/librispeech_jax/workload.py @@ -18,8 +18,7 @@ from algoperf.workloads.librispeech_conformer import workload from algoperf.workloads.librispeech_conformer.input_pipeline import \ LibriSpeechDataset -from algoperf.workloads.librispeech_conformer.librispeech_jax import \ - models +from algoperf.workloads.librispeech_conformer.librispeech_jax import models class LibriSpeechConformerWorkload(workload.BaseLibrispeechWorkload): diff --git a/algoperf/workloads/librispeech_conformer/librispeech_pytorch/workload.py b/algoperf/workloads/librispeech_conformer/librispeech_pytorch/workload.py index 592e63989..974b3bb19 100644 --- a/algoperf/workloads/librispeech_conformer/librispeech_pytorch/workload.py +++ b/algoperf/workloads/librispeech_conformer/librispeech_pytorch/workload.py @@ -19,8 +19,7 @@ from algoperf.workloads.librispeech_conformer import workload from algoperf.workloads.librispeech_conformer.input_pipeline import \ LibriSpeechDataset -from algoperf.workloads.librispeech_conformer.librispeech_pytorch import \ - models +from algoperf.workloads.librispeech_conformer.librispeech_pytorch import models USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup() diff --git a/algoperf/workloads/librispeech_deepspeech/librispeech_jax/workload.py b/algoperf/workloads/librispeech_deepspeech/librispeech_jax/workload.py index 3e0781deb..9fd0898b4 100644 --- a/algoperf/workloads/librispeech_deepspeech/librispeech_jax/workload.py +++ b/algoperf/workloads/librispeech_deepspeech/librispeech_jax/workload.py @@ -10,8 +10,7 @@ from algoperf import spec from algoperf.workloads.librispeech_conformer.librispeech_jax.workload import \ LibriSpeechConformerWorkload -from algoperf.workloads.librispeech_deepspeech.librispeech_jax import \ - models +from algoperf.workloads.librispeech_deepspeech.librispeech_jax import models class LibriSpeechDeepSpeechWorkload(LibriSpeechConformerWorkload): diff --git a/tests/modeldiffs/wmt/compare.py b/tests/modeldiffs/wmt/compare.py index 73bc03f78..64401ef7f 100644 --- a/tests/modeldiffs/wmt/compare.py +++ b/tests/modeldiffs/wmt/compare.py @@ -7,8 +7,7 @@ import torch from algoperf import spec -from algoperf.workloads.wmt.wmt_jax.workload import \ - WmtWorkload as JaxWorkload +from algoperf.workloads.wmt.wmt_jax.workload import WmtWorkload as JaxWorkload from algoperf.workloads.wmt.wmt_pytorch.workload import \ WmtWorkload as PyTorchWorkload from tests.modeldiffs.diff import out_diff diff --git a/tests/reference_algorithm_tests.py b/tests/reference_algorithm_tests.py index 3f279a605..0a17e470c 100644 --- a/tests/reference_algorithm_tests.py +++ b/tests/reference_algorithm_tests.py @@ -45,10 +45,8 @@ from algoperf import random_utils as prng from algoperf.profiler import PassThroughProfiler from algoperf.workloads import workloads -from algoperf.workloads.ogbg import \ - input_pipeline as ogbg_input_pipeline -from algoperf.workloads.ogbg.ogbg_pytorch.workload import \ - _graph_map +from algoperf.workloads.ogbg import input_pipeline as ogbg_input_pipeline +from algoperf.workloads.ogbg.ogbg_pytorch.workload import _graph_map import submission_runner from tests.modeldiffs import diff as diff_utils diff --git a/tests/test_num_params.py b/tests/test_num_params.py index 83a23c9a4..b0633025e 100644 --- a/tests/test_num_params.py +++ b/tests/test_num_params.py @@ -17,8 +17,7 @@ resnet18 as PyTorchResNet_c10 from algoperf.workloads.imagenet_resnet.imagenet_pytorch.models import \ resnet50 as PyTorchResNet -from algoperf.workloads.imagenet_vit.imagenet_jax.models import \ - ViT as JaxViT +from algoperf.workloads.imagenet_vit.imagenet_jax.models import ViT as JaxViT from algoperf.workloads.imagenet_vit.imagenet_pytorch.models import \ ViT as PyTorchViT from algoperf.workloads.librispeech_conformer.librispeech_jax.models import \ @@ -29,17 +28,13 @@ ConformerConfig as PytorchConformerConfig from algoperf.workloads.librispeech_conformer.librispeech_pytorch.models import \ ConformerEncoderDecoder as PytorchConformer -from algoperf.workloads.mnist.mnist_jax.workload import \ - _Model as JaxMLP +from algoperf.workloads.mnist.mnist_jax.workload import _Model as JaxMLP from algoperf.workloads.mnist.mnist_pytorch.workload import \ _Model as PyTorchMLP from algoperf.workloads.ogbg.ogbg_jax.models import GNN as JaxGNN -from algoperf.workloads.ogbg.ogbg_pytorch.models import \ - GNN as PyTorchGNN -from algoperf.workloads.wmt.wmt_jax.models import \ - Transformer as JaxTransformer -from algoperf.workloads.wmt.wmt_jax.models import \ - TransformerConfig +from algoperf.workloads.ogbg.ogbg_pytorch.models import GNN as PyTorchGNN +from algoperf.workloads.wmt.wmt_jax.models import Transformer as JaxTransformer +from algoperf.workloads.wmt.wmt_jax.models import TransformerConfig from algoperf.workloads.wmt.wmt_pytorch.models import \ Transformer as PyTorchTransformer diff --git a/tests/test_ssim.py b/tests/test_ssim.py index ba0b2ca7f..920556964 100644 --- a/tests/test_ssim.py +++ b/tests/test_ssim.py @@ -12,8 +12,7 @@ from algoperf.pytorch_utils import pytorch_setup from algoperf.workloads.fastmri.fastmri_jax.ssim import \ _uniform_filter as _jax_uniform_filter -from algoperf.workloads.fastmri.fastmri_jax.ssim import \ - ssim as jax_ssim +from algoperf.workloads.fastmri.fastmri_jax.ssim import ssim as jax_ssim from algoperf.workloads.fastmri.fastmri_pytorch.ssim import \ _uniform_filter as _pytorch_uniform_filter from algoperf.workloads.fastmri.fastmri_pytorch.ssim import \ diff --git a/tests/version_test.py b/tests/version_test.py deleted file mode 100644 index 2205b305f..000000000 --- a/tests/version_test.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Check whether the __version__ attribute is set correctly.""" - -import algoperf - - -def test_version_attribute(): - """Check whether __version__ exists and is a valid string.""" - - assert hasattr(algoperf, "__version__") - version = algoperf.__version__ - assert isinstance(version, str) - version_elements = version.split(".") - print(version_elements) - # Only check the first three elements, i.e. major, minor, patch. - # The remaining elements contain commit hash and dirty status. - assert all(el.isnumeric() for el in version_elements[0:3])