Skip to content

Commit

Permalink
fix typo for usort config in pyproject.toml
Browse files Browse the repository at this point in the history
Summary:
The `usort` config in `pyproject.toml` has no effect due to a typo. Fixing the typo make `usort` do more and generate the changes in the PR. Except `pyproject.toml`, all changes are generated by `lintrunner -a --take UFMT --all-files`.

X-link: pytorch/pytorch#127126
Approved by: https://github.com/kit1980

Reviewed By: PaliC

Differential Revision: D57894793

fbshipit-source-id: 6af4fcb5eae8fd114024359beb3fae7f3928de2e
  • Loading branch information
XuehaiPan authored and facebook-github-bot committed May 29, 2024
1 parent b9e088a commit d21607d
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 12 deletions.
20 changes: 9 additions & 11 deletions userbenchmark/dynamo/dynamobench/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,20 +36,21 @@
Type,
TYPE_CHECKING,
)

from typing_extensions import Self
from unittest.mock import MagicMock

import numpy as np
import pandas as pd
import psutil
from scipy.stats import gmean, ttest_ind
from tqdm.auto import tqdm, trange

import torch
import torch._dynamo
import torch._dynamo.utils
import torch._export
import torch.distributed
import torch.multiprocessing as mp
from scipy.stats import gmean, ttest_ind
from torch._C import _has_cuda as HAS_CUDA, _has_xpu as HAS_XPU
from torch._dynamo.profiler import fx_insert_profiling, Profiler
from torch._dynamo.testing import (
Expand All @@ -59,8 +60,6 @@
same,
)

from tqdm.auto import tqdm, trange

try:
from torch._dynamo.utils import (
clone_inputs,
Expand All @@ -74,15 +73,14 @@
graph_break_reasons,
maybe_enable_compiled_autograd,
)

import torch._functorch.config
from torch._functorch.aot_autograd import set_model_name
from torch._inductor import config as inductor_config, metrics
from torch._subclasses.fake_tensor import FakeTensorMode

from torch.utils import _pytree as pytree
from torch.utils._pytree import tree_map, tree_map_only


try:
import torch_xla
import torch_xla.core.xla_model as xm
Expand Down Expand Up @@ -2343,17 +2341,17 @@ def get_benchmark_indices(self, length):

def get_fsdp_auto_wrap_policy(self, model_name: str):
from diffusers.models.transformer_2d import Transformer2DModel

from torch.distributed.fsdp.wrap import (
ModuleWrapPolicy,
size_based_auto_wrap_policy,
)
from torchbenchmark.models.nanogpt.model import Block
from transformers.models.llama.modeling_llama import LlamaDecoderLayer

from transformers.models.t5.modeling_t5 import T5Block
from transformers.models.whisper.modeling_whisper import WhisperEncoderLayer

from torch.distributed.fsdp.wrap import (
ModuleWrapPolicy,
size_based_auto_wrap_policy,
)

# handcrafted wrap policy
MODEL_FSDP_WRAP = {
"stable_diffusion_unet": (Transformer2DModel,),
Expand Down
3 changes: 2 additions & 1 deletion userbenchmark/dynamo/dynamobench/torchbench.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,10 @@
from collections import namedtuple
from os.path import abspath, exists

import torch
import yaml

import torch

try:
from .common import BenchmarkRunner, main
except ImportError:
Expand Down

0 comments on commit d21607d

Please sign in to comment.