multimodalart HF staff commited on
Commit
88f16e0
·
verified ·
1 Parent(s): 6e5e1d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -7,7 +7,7 @@ from diffusers import DiffusionPipeline
7
  import torch
8
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/stable-diffusion-3.5-large"
11
 
12
  if torch.cuda.is_available():
13
  torch_dtype = torch.bfloat16
@@ -28,8 +28,8 @@ def infer(
28
  randomize_seed=False,
29
  width=1024,
30
  height=1024,
31
- guidance_scale=4.5,
32
- num_inference_steps=40,
33
  progress=gr.Progress(track_tqdm=True),
34
  ):
35
  if randomize_seed:
@@ -119,7 +119,7 @@ with gr.Blocks(css=css) as demo:
119
  minimum=0.0,
120
  maximum=7.5,
121
  step=0.1,
122
- value=4.5,
123
  )
124
 
125
  num_inference_steps = gr.Slider(
@@ -127,7 +127,7 @@ with gr.Blocks(css=css) as demo:
127
  minimum=1,
128
  maximum=50,
129
  step=1,
130
- value=40,
131
  )
132
 
133
  gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=True, cache_mode="lazy")
 
7
  import torch
8
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
+ model_repo_id = "tensorart/stable-diffusion-3.5-large-TurboX"
11
 
12
  if torch.cuda.is_available():
13
  torch_dtype = torch.bfloat16
 
28
  randomize_seed=False,
29
  width=1024,
30
  height=1024,
31
+ guidance_scale=1.5,
32
+ num_inference_steps=8,
33
  progress=gr.Progress(track_tqdm=True),
34
  ):
35
  if randomize_seed:
 
119
  minimum=0.0,
120
  maximum=7.5,
121
  step=0.1,
122
+ value=1.5,
123
  )
124
 
125
  num_inference_steps = gr.Slider(
 
127
  minimum=1,
128
  maximum=50,
129
  step=1,
130
+ value=8,
131
  )
132
 
133
  gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=True, cache_mode="lazy")