hysts HF Staff commited on
Commit
371a997
·
1 Parent(s): 2b42301
Files changed (10) hide show
  1. .pre-commit-config.yaml +9 -36
  2. .python-version +1 -0
  3. .vscode/extensions.json +8 -0
  4. .vscode/settings.json +3 -16
  5. README.md +1 -2
  6. app.py +14 -9
  7. model.py +13 -12
  8. pyproject.toml +54 -0
  9. requirements.txt +232 -6
  10. uv.lock +0 -0
.pre-commit-config.yaml CHANGED
@@ -1,6 +1,6 @@
1
  repos:
2
  - repo: https://github.com/pre-commit/pre-commit-hooks
3
- rev: v4.5.0
4
  hooks:
5
  - id: check-executables-have-shebangs
6
  - id: check-json
@@ -13,48 +13,21 @@ repos:
13
  args: ["--fix=lf"]
14
  - id: requirements-txt-fixer
15
  - id: trailing-whitespace
16
- - repo: https://github.com/myint/docformatter
17
- rev: v1.7.5
18
  hooks:
19
- - id: docformatter
20
- args: ["--in-place"]
21
- - repo: https://github.com/pycqa/isort
22
- rev: 5.13.2
23
- hooks:
24
- - id: isort
25
- args: ["--profile", "black"]
26
  - repo: https://github.com/pre-commit/mirrors-mypy
27
- rev: v1.8.0
28
  hooks:
29
  - id: mypy
30
  args: ["--ignore-missing-imports"]
31
  additional_dependencies:
32
  [
33
  "types-python-slugify",
34
- "types-requests",
35
- "types-PyYAML",
36
  "types-pytz",
 
 
37
  ]
38
- - repo: https://github.com/psf/black
39
- rev: 24.2.0
40
- hooks:
41
- - id: black
42
- language_version: python3.10
43
- args: ["--line-length", "119"]
44
- - repo: https://github.com/kynan/nbstripout
45
- rev: 0.7.1
46
- hooks:
47
- - id: nbstripout
48
- args:
49
- [
50
- "--extra-keys",
51
- "metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
52
- ]
53
- - repo: https://github.com/nbQA-dev/nbQA
54
- rev: 1.7.1
55
- hooks:
56
- - id: nbqa-black
57
- - id: nbqa-pyupgrade
58
- args: ["--py37-plus"]
59
- - id: nbqa-isort
60
- args: ["--float-to-top"]
 
1
  repos:
2
  - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v6.0.0
4
  hooks:
5
  - id: check-executables-have-shebangs
6
  - id: check-json
 
13
  args: ["--fix=lf"]
14
  - id: requirements-txt-fixer
15
  - id: trailing-whitespace
16
+ - repo: https://github.com/astral-sh/ruff-pre-commit
17
+ rev: v0.12.12
18
  hooks:
19
+ - id: ruff-check
20
+ args: ["--fix"]
21
+ - id: ruff-format
 
 
 
 
22
  - repo: https://github.com/pre-commit/mirrors-mypy
23
+ rev: v1.17.1
24
  hooks:
25
  - id: mypy
26
  args: ["--ignore-missing-imports"]
27
  additional_dependencies:
28
  [
29
  "types-python-slugify",
 
 
30
  "types-pytz",
31
+ "types-PyYAML",
32
+ "types-requests",
33
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.10
.vscode/extensions.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "recommendations": [
3
+ "ms-python.python",
4
+ "charliermarsh.ruff",
5
+ "streetsidesoftware.code-spell-checker",
6
+ "tamasfe.even-better-toml"
7
+ ]
8
+ }
.vscode/settings.json CHANGED
@@ -2,29 +2,16 @@
2
  "editor.formatOnSave": true,
3
  "files.insertFinalNewline": false,
4
  "[python]": {
5
- "editor.defaultFormatter": "ms-python.black-formatter",
6
  "editor.formatOnType": true,
7
  "editor.codeActionsOnSave": {
 
8
  "source.organizeImports": "explicit"
9
  }
10
  },
11
  "[jupyter]": {
12
  "files.insertFinalNewline": false
13
  },
14
- "black-formatter.args": [
15
- "--line-length=119"
16
- ],
17
- "isort.args": ["--profile", "black"],
18
- "flake8.args": [
19
- "--max-line-length=119"
20
- ],
21
- "ruff.lint.args": [
22
- "--line-length=119"
23
- ],
24
  "notebook.output.scrolling": true,
25
- "notebook.formatOnCellExecution": true,
26
- "notebook.formatOnSave.enabled": true,
27
- "notebook.codeActionsOnSave": {
28
- "source.organizeImports": "explicit"
29
- }
30
  }
 
2
  "editor.formatOnSave": true,
3
  "files.insertFinalNewline": false,
4
  "[python]": {
5
+ "editor.defaultFormatter": "charliermarsh.ruff",
6
  "editor.formatOnType": true,
7
  "editor.codeActionsOnSave": {
8
+ "source.fixAll.ruff": "explicit",
9
  "source.organizeImports": "explicit"
10
  }
11
  },
12
  "[jupyter]": {
13
  "files.insertFinalNewline": false
14
  },
 
 
 
 
 
 
 
 
 
 
15
  "notebook.output.scrolling": true,
16
+ "notebook.formatOnSave.enabled": true
 
 
 
 
17
  }
README.md CHANGED
@@ -4,10 +4,9 @@ emoji: 👁
4
  colorFrom: blue
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 4.36.1
8
  app_file: app.py
9
  pinned: false
10
- suggested_hardware: t4-small
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
4
  colorFrom: blue
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 5.44.1
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py CHANGED
@@ -1,7 +1,5 @@
1
  #!/usr/bin/env python
2
 
3
- from __future__ import annotations
4
-
5
  import gradio as gr
6
  import numpy as np
7
 
@@ -17,9 +15,7 @@ def get_sample_image_url(name: str) -> str:
17
 
18
  def get_sample_image_markdown(name: str) -> str:
19
  url = get_sample_image_url(name)
20
- if name == "celeba-hq":
21
- size = 1024
22
- elif name == "ffhq":
23
  size = 1024
24
  elif name == "lsun-church":
25
  size = 256
@@ -34,7 +30,7 @@ def get_sample_image_markdown(name: str) -> str:
34
 
35
  model = Model()
36
 
37
- with gr.Blocks(css="style.css") as demo:
38
  gr.Markdown(DESCRIPTION)
39
 
40
  with gr.Tabs():
@@ -62,8 +58,17 @@ with gr.Blocks(css="style.css") as demo:
62
  text = get_sample_image_markdown(model_name2.value)
63
  sample_images = gr.Markdown(text)
64
 
65
- run_button.click(fn=model.set_model_and_generate_image, inputs=[model_name, seed], outputs=result, api_name="run")
66
- model_name2.change(fn=get_sample_image_markdown, inputs=model_name2, outputs=sample_images)
 
 
 
 
 
 
 
 
 
67
 
68
  if __name__ == "__main__":
69
- demo.queue(max_size=15).launch()
 
1
  #!/usr/bin/env python
2
 
 
 
3
  import gradio as gr
4
  import numpy as np
5
 
 
15
 
16
  def get_sample_image_markdown(name: str) -> str:
17
  url = get_sample_image_url(name)
18
+ if name in {"celeba-hq", "ffhq"}:
 
 
19
  size = 1024
20
  elif name == "lsun-church":
21
  size = 256
 
30
 
31
  model = Model()
32
 
33
+ with gr.Blocks(css_paths="style.css") as demo:
34
  gr.Markdown(DESCRIPTION)
35
 
36
  with gr.Tabs():
 
58
  text = get_sample_image_markdown(model_name2.value)
59
  sample_images = gr.Markdown(text)
60
 
61
+ run_button.click(
62
+ fn=model.set_model_and_generate_image,
63
+ inputs=[model_name, seed],
64
+ outputs=result,
65
+ api_name="run",
66
+ )
67
+ model_name2.change(
68
+ fn=get_sample_image_markdown,
69
+ inputs=model_name2,
70
+ outputs=sample_images,
71
+ )
72
 
73
  if __name__ == "__main__":
74
+ demo.launch()
model.py CHANGED
@@ -1,35 +1,35 @@
1
- from __future__ import annotations
2
-
3
  import os
4
  import pathlib
 
 
5
  import sys
6
 
7
  import huggingface_hub
8
  import numpy as np
9
  import torch
10
- import torch.nn as nn
11
 
12
  if os.getenv("SYSTEM") == "spaces":
13
- os.system("sed -i '14,21d' StyleSwin/op/fused_act.py")
14
- os.system("sed -i '12,19d' StyleSwin/op/upfirdn2d.py")
15
 
16
  current_dir = pathlib.Path(__file__).parent
17
  submodule_dir = current_dir / "StyleSwin"
18
  sys.path.insert(0, submodule_dir.as_posix())
19
 
20
- from models.generator import Generator
21
 
22
 
23
  class Model:
24
- MODEL_NAMES = [
25
  "CelebAHQ_256",
26
  "FFHQ_256",
27
  "LSUNChurch_256",
28
  "CelebAHQ_1024",
29
  "FFHQ_1024",
30
- ]
31
 
32
- def __init__(self):
33
  self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
34
  self._download_all_models()
35
  self.model_name = self.MODEL_NAMES[3]
@@ -40,7 +40,7 @@ class Model:
40
 
41
  def _load_model(self, model_name: str) -> nn.Module:
42
  size = int(model_name.split("_")[1])
43
- channel_multiplier = 1 if size == 1024 else 2
44
  model = Generator(size, style_dim=512, n_mlp=8, channel_multiplier=channel_multiplier)
45
  ckpt_path = huggingface_hub.hf_hub_download("public-data/StyleSwin", f"models/{model_name}.pt")
46
  ckpt = torch.load(ckpt_path)
@@ -55,7 +55,7 @@ class Model:
55
  self.model_name = model_name
56
  self.model = self._load_model(model_name)
57
 
58
- def _download_all_models(self):
59
  for name in self.MODEL_NAMES:
60
  self._load_model(name)
61
 
@@ -65,7 +65,8 @@ class Model:
65
  return torch.from_numpy(z).float().to(self.device)
66
 
67
  def postprocess(self, tensors: torch.Tensor) -> np.ndarray:
68
- assert tensors.dim() == 4
 
69
  tensors = tensors * self.std + self.mean
70
  tensors = (tensors * 255).clamp(0, 255).to(torch.uint8)
71
  return tensors.permute(0, 2, 3, 1).cpu().numpy()
 
 
 
1
  import os
2
  import pathlib
3
+ import shlex
4
+ import subprocess
5
  import sys
6
 
7
  import huggingface_hub
8
  import numpy as np
9
  import torch
10
+ from torch import nn
11
 
12
  if os.getenv("SYSTEM") == "spaces":
13
+ subprocess.run(shlex.split("sed -i '14,21d' StyleSwin/op/fused_act.py"), check=False) # noqa: S603
14
+ subprocess.run(shlex.split("sed -i '12,19d' StyleSwin/op/upfirdn2d.py"), check=False) # noqa: S603
15
 
16
  current_dir = pathlib.Path(__file__).parent
17
  submodule_dir = current_dir / "StyleSwin"
18
  sys.path.insert(0, submodule_dir.as_posix())
19
 
20
+ from models.generator import Generator # noqa: E402 # pyright: ignore[reportMissingImports]
21
 
22
 
23
  class Model:
24
+ MODEL_NAMES = (
25
  "CelebAHQ_256",
26
  "FFHQ_256",
27
  "LSUNChurch_256",
28
  "CelebAHQ_1024",
29
  "FFHQ_1024",
30
+ )
31
 
32
+ def __init__(self) -> None:
33
  self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
34
  self._download_all_models()
35
  self.model_name = self.MODEL_NAMES[3]
 
40
 
41
  def _load_model(self, model_name: str) -> nn.Module:
42
  size = int(model_name.split("_")[1])
43
+ channel_multiplier = 1 if size == 1024 else 2 # noqa: PLR2004
44
  model = Generator(size, style_dim=512, n_mlp=8, channel_multiplier=channel_multiplier)
45
  ckpt_path = huggingface_hub.hf_hub_download("public-data/StyleSwin", f"models/{model_name}.pt")
46
  ckpt = torch.load(ckpt_path)
 
55
  self.model_name = model_name
56
  self.model = self._load_model(model_name)
57
 
58
+ def _download_all_models(self) -> None:
59
  for name in self.MODEL_NAMES:
60
  self._load_model(name)
61
 
 
65
  return torch.from_numpy(z).float().to(self.device)
66
 
67
  def postprocess(self, tensors: torch.Tensor) -> np.ndarray:
68
+ if not tensors.dim() == 4: # noqa: PLR2004
69
+ raise ValueError("tensors must be 4-dimensional")
70
  tensors = tensors * self.std + self.mean
71
  tensors = (tensors * 255).clamp(0, 255).to(torch.uint8)
72
  return tensors.permute(0, 2, 3, 1).cpu().numpy()
pyproject.toml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "styleswin"
3
+ version = "0.1.0"
4
+ description = ""
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ dependencies = [
8
+ "gradio>=5.44.1",
9
+ "hf-transfer>=0.1.9",
10
+ "timm>=1.0.19",
11
+ "torch==2.8.0",
12
+ "torchvision>=0.23.0",
13
+ ]
14
+
15
+ [tool.ruff]
16
+ line-length = 119
17
+
18
+ [tool.ruff.lint]
19
+ select = ["ALL"]
20
+ ignore = [
21
+ "COM812", # missing-trailing-comma
22
+ "D203", # one-blank-line-before-class
23
+ "D213", # multi-line-summary-second-line
24
+ "E501", # line-too-long
25
+ "SIM117", # multiple-with-statements
26
+ #
27
+ "D100", # undocumented-public-module
28
+ "D101", # undocumented-public-class
29
+ "D102", # undocumented-public-method
30
+ "D103", # undocumented-public-function
31
+ "D104", # undocumented-public-package
32
+ "D105", # undocumented-magic-method
33
+ "D107", # undocumented-public-init
34
+ "EM101", # raw-string-in-exception
35
+ "FBT001", # boolean-type-hint-positional-argument
36
+ "FBT002", # boolean-default-value-positional-argument
37
+ "PD901", # pandas-df-variable-name
38
+ "PGH003", # blanket-type-ignore
39
+ "PLR0913", # too-many-arguments
40
+ "PLR0915", # too-many-statements
41
+ "TRY003", # raise-vanilla-args
42
+ ]
43
+ unfixable = [
44
+ "F401", # unused-import
45
+ ]
46
+
47
+ [tool.ruff.lint.pydocstyle]
48
+ convention = "google"
49
+
50
+ [tool.ruff.lint.per-file-ignores]
51
+ "*.ipynb" = ["T201", "T203"]
52
+
53
+ [tool.ruff.format]
54
+ docstring-code-format = true
requirements.txt CHANGED
@@ -1,6 +1,232 @@
1
- gradio==4.36.1
2
- numpy==1.26.4
3
- Pillow==10.3.0
4
- timm==0.9.16
5
- torch==2.0.1
6
- torchvision==0.15.2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile pyproject.toml -o requirements.txt
3
+ aiofiles==24.1.0
4
+ # via gradio
5
+ annotated-types==0.7.0
6
+ # via pydantic
7
+ anyio==4.10.0
8
+ # via
9
+ # gradio
10
+ # httpx
11
+ # starlette
12
+ brotli==1.1.0
13
+ # via gradio
14
+ certifi==2025.8.3
15
+ # via
16
+ # httpcore
17
+ # httpx
18
+ # requests
19
+ charset-normalizer==3.4.3
20
+ # via requests
21
+ click==8.2.1
22
+ # via
23
+ # typer
24
+ # uvicorn
25
+ exceptiongroup==1.3.0
26
+ # via anyio
27
+ fastapi==0.116.1
28
+ # via gradio
29
+ ffmpy==0.6.1
30
+ # via gradio
31
+ filelock==3.19.1
32
+ # via
33
+ # huggingface-hub
34
+ # torch
35
+ fsspec==2025.9.0
36
+ # via
37
+ # gradio-client
38
+ # huggingface-hub
39
+ # torch
40
+ gradio==5.44.1
41
+ # via styleswin (pyproject.toml)
42
+ gradio-client==1.12.1
43
+ # via gradio
44
+ groovy==0.1.2
45
+ # via gradio
46
+ h11==0.16.0
47
+ # via
48
+ # httpcore
49
+ # uvicorn
50
+ hf-transfer==0.1.9
51
+ # via styleswin (pyproject.toml)
52
+ hf-xet==1.1.9
53
+ # via huggingface-hub
54
+ httpcore==1.0.9
55
+ # via httpx
56
+ httpx==0.28.1
57
+ # via
58
+ # gradio
59
+ # gradio-client
60
+ # safehttpx
61
+ huggingface-hub==0.34.4
62
+ # via
63
+ # gradio
64
+ # gradio-client
65
+ # timm
66
+ idna==3.10
67
+ # via
68
+ # anyio
69
+ # httpx
70
+ # requests
71
+ jinja2==3.1.6
72
+ # via
73
+ # gradio
74
+ # torch
75
+ markdown-it-py==4.0.0
76
+ # via rich
77
+ markupsafe==3.0.2
78
+ # via
79
+ # gradio
80
+ # jinja2
81
+ mdurl==0.1.2
82
+ # via markdown-it-py
83
+ mpmath==1.3.0
84
+ # via sympy
85
+ networkx==3.4.2
86
+ # via torch
87
+ numpy==2.2.6
88
+ # via
89
+ # gradio
90
+ # pandas
91
+ # torchvision
92
+ nvidia-cublas-cu12==12.8.4.1
93
+ # via
94
+ # nvidia-cudnn-cu12
95
+ # nvidia-cusolver-cu12
96
+ # torch
97
+ nvidia-cuda-cupti-cu12==12.8.90
98
+ # via torch
99
+ nvidia-cuda-nvrtc-cu12==12.8.93
100
+ # via torch
101
+ nvidia-cuda-runtime-cu12==12.8.90
102
+ # via torch
103
+ nvidia-cudnn-cu12==9.10.2.21
104
+ # via torch
105
+ nvidia-cufft-cu12==11.3.3.83
106
+ # via torch
107
+ nvidia-cufile-cu12==1.13.1.3
108
+ # via torch
109
+ nvidia-curand-cu12==10.3.9.90
110
+ # via torch
111
+ nvidia-cusolver-cu12==11.7.3.90
112
+ # via torch
113
+ nvidia-cusparse-cu12==12.5.8.93
114
+ # via
115
+ # nvidia-cusolver-cu12
116
+ # torch
117
+ nvidia-cusparselt-cu12==0.7.1
118
+ # via torch
119
+ nvidia-nccl-cu12==2.27.3
120
+ # via torch
121
+ nvidia-nvjitlink-cu12==12.8.93
122
+ # via
123
+ # nvidia-cufft-cu12
124
+ # nvidia-cusolver-cu12
125
+ # nvidia-cusparse-cu12
126
+ # torch
127
+ nvidia-nvtx-cu12==12.8.90
128
+ # via torch
129
+ orjson==3.11.3
130
+ # via gradio
131
+ packaging==25.0
132
+ # via
133
+ # gradio
134
+ # gradio-client
135
+ # huggingface-hub
136
+ pandas==2.3.2
137
+ # via gradio
138
+ pillow==11.3.0
139
+ # via
140
+ # gradio
141
+ # torchvision
142
+ pydantic==2.11.7
143
+ # via
144
+ # fastapi
145
+ # gradio
146
+ pydantic-core==2.33.2
147
+ # via pydantic
148
+ pydub==0.25.1
149
+ # via gradio
150
+ pygments==2.19.2
151
+ # via rich
152
+ python-dateutil==2.9.0.post0
153
+ # via pandas
154
+ python-multipart==0.0.20
155
+ # via gradio
156
+ pytz==2025.2
157
+ # via pandas
158
+ pyyaml==6.0.2
159
+ # via
160
+ # gradio
161
+ # huggingface-hub
162
+ # timm
163
+ requests==2.32.5
164
+ # via huggingface-hub
165
+ rich==14.1.0
166
+ # via typer
167
+ ruff==0.12.12
168
+ # via gradio
169
+ safehttpx==0.1.6
170
+ # via gradio
171
+ safetensors==0.6.2
172
+ # via timm
173
+ semantic-version==2.10.0
174
+ # via gradio
175
+ setuptools==80.9.0
176
+ # via triton
177
+ shellingham==1.5.4
178
+ # via typer
179
+ six==1.17.0
180
+ # via python-dateutil
181
+ sniffio==1.3.1
182
+ # via anyio
183
+ starlette==0.47.3
184
+ # via
185
+ # fastapi
186
+ # gradio
187
+ sympy==1.14.0
188
+ # via torch
189
+ timm==1.0.19
190
+ # via styleswin (pyproject.toml)
191
+ tomlkit==0.13.3
192
+ # via gradio
193
+ torch==2.8.0
194
+ # via
195
+ # styleswin (pyproject.toml)
196
+ # timm
197
+ # torchvision
198
+ torchvision==0.23.0
199
+ # via
200
+ # styleswin (pyproject.toml)
201
+ # timm
202
+ tqdm==4.67.1
203
+ # via huggingface-hub
204
+ triton==3.4.0
205
+ # via torch
206
+ typer==0.17.3
207
+ # via gradio
208
+ typing-extensions==4.15.0
209
+ # via
210
+ # anyio
211
+ # exceptiongroup
212
+ # fastapi
213
+ # gradio
214
+ # gradio-client
215
+ # huggingface-hub
216
+ # pydantic
217
+ # pydantic-core
218
+ # starlette
219
+ # torch
220
+ # typer
221
+ # typing-inspection
222
+ # uvicorn
223
+ typing-inspection==0.4.1
224
+ # via pydantic
225
+ tzdata==2025.2
226
+ # via pandas
227
+ urllib3==2.5.0
228
+ # via requests
229
+ uvicorn==0.35.0
230
+ # via gradio
231
+ websockets==15.0.1
232
+ # via gradio-client
uv.lock ADDED
The diff for this file is too large to render. See raw diff