Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -79,14 +79,14 @@ NUM_IMAGES_PER_PROMPT = 3
|
|
79 |
|
80 |
if torch.cuda.is_available():
|
81 |
pipe = DiffusionPipeline.from_pretrained(
|
82 |
-
"
|
83 |
torch_dtype=torch.float16,
|
84 |
use_safetensors=True,
|
85 |
add_watermarker=False,
|
86 |
variant="fp16",
|
87 |
)
|
88 |
pipe2 = DiffusionPipeline.from_pretrained(
|
89 |
-
"SG161222/
|
90 |
torch_dtype=torch.float16,
|
91 |
use_safetensors=True,
|
92 |
add_watermarker=False,
|
@@ -101,8 +101,8 @@ if torch.cuda.is_available():
|
|
101 |
print("Loaded on Device!")
|
102 |
|
103 |
if USE_TORCH_COMPILE:
|
104 |
-
pipe.unet = torch.compile(pipe.unet)
|
105 |
-
pipe2.unet = torch.compile(pipe2.unet)
|
106 |
print("Model Compiled!")
|
107 |
|
108 |
|
@@ -122,14 +122,14 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
122 |
def generate(
|
123 |
prompt: str,
|
124 |
negative_prompt: str = "",
|
125 |
-
use_negative_prompt: bool =
|
126 |
style: str = DEFAULT_STYLE,
|
127 |
-
seed: int =
|
128 |
width: int = 896,
|
129 |
height: int = 1152,
|
130 |
guidance_scale: float = 3,
|
131 |
randomize_seed: bool = False,
|
132 |
-
use_resolution_binning: bool =
|
133 |
progress=gr.Progress(track_tqdm=True),
|
134 |
):
|
135 |
|
@@ -143,7 +143,7 @@ def generate(
|
|
143 |
"width": width,
|
144 |
"height": height,
|
145 |
"guidance_scale": guidance_scale,
|
146 |
-
"num_inference_steps":
|
147 |
"generator": generator,
|
148 |
"num_images_per_prompt": NUM_IMAGES_PER_PROMPT,
|
149 |
"use_resolution_binning": use_resolution_binning,
|
@@ -158,7 +158,9 @@ def generate(
|
|
158 |
|
159 |
examples = [
|
160 |
(
|
161 |
-
"
|
|
|
|
|
162 |
),
|
163 |
]
|
164 |
|
@@ -284,4 +286,4 @@ with gr.Blocks(css=css, theme="rawrsor1/Everforest") as demo:
|
|
284 |
)
|
285 |
|
286 |
if __name__ == "__main__":
|
287 |
-
demo.queue(max_size=20).launch()
|
|
|
79 |
|
80 |
if torch.cuda.is_available():
|
81 |
pipe = DiffusionPipeline.from_pretrained(
|
82 |
+
"SG161222/RealVisXL_V4.0",
|
83 |
torch_dtype=torch.float16,
|
84 |
use_safetensors=True,
|
85 |
add_watermarker=False,
|
86 |
variant="fp16",
|
87 |
)
|
88 |
pipe2 = DiffusionPipeline.from_pretrained(
|
89 |
+
"SG161222/RealVisXL_V3.0_Turbo",
|
90 |
torch_dtype=torch.float16,
|
91 |
use_safetensors=True,
|
92 |
add_watermarker=False,
|
|
|
101 |
print("Loaded on Device!")
|
102 |
|
103 |
if USE_TORCH_COMPILE:
|
104 |
+
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
105 |
+
pipe2.unet = torch.compile(pipe2.unet, mode="reduce-overhead", fullgraph=True)
|
106 |
print("Model Compiled!")
|
107 |
|
108 |
|
|
|
122 |
def generate(
|
123 |
prompt: str,
|
124 |
negative_prompt: str = "",
|
125 |
+
use_negative_prompt: bool = False,
|
126 |
style: str = DEFAULT_STYLE,
|
127 |
+
seed: int = 0,
|
128 |
width: int = 896,
|
129 |
height: int = 1152,
|
130 |
guidance_scale: float = 3,
|
131 |
randomize_seed: bool = False,
|
132 |
+
use_resolution_binning: bool = True,
|
133 |
progress=gr.Progress(track_tqdm=True),
|
134 |
):
|
135 |
|
|
|
143 |
"width": width,
|
144 |
"height": height,
|
145 |
"guidance_scale": guidance_scale,
|
146 |
+
"num_inference_steps": 25,
|
147 |
"generator": generator,
|
148 |
"num_images_per_prompt": NUM_IMAGES_PER_PROMPT,
|
149 |
"use_resolution_binning": use_resolution_binning,
|
|
|
158 |
|
159 |
examples = [
|
160 |
(
|
161 |
+
"college life of 21 year old, depth of field, bokeh, shallow"
|
162 |
+
" focus, minimalism, fujifilm xh2s with Canon EF lens, cinematic --ar 85:128"
|
163 |
+
" --v 6.0 --style raw"
|
164 |
),
|
165 |
]
|
166 |
|
|
|
286 |
)
|
287 |
|
288 |
if __name__ == "__main__":
|
289 |
+
demo.queue(max_size=20).launch()
|