Alexander Bagus commited on
Commit
b894cf4
·
1 Parent(s): 8d6757d
Files changed (1) hide show
  1. app.py +21 -20
app.py CHANGED
@@ -108,11 +108,12 @@ def inference(
108
  mask_image,
109
  control_context_scale = 0.75,
110
  seed=42,
111
- randomize_seed=True,
112
- guidance_scale=1.5,
113
  num_inference_steps=8,
114
  progress=gr.Progress(track_tqdm=True),
115
  ):
 
116
  timestamp = time.time()
117
  print(f"timestamp: {timestamp}")
118
 
@@ -141,13 +142,13 @@ def inference(
141
  else:
142
  mask_image = torch.ones([1, 1, sample_size[0], sample_size[1]]) * 255
143
 
144
- print("DEBUG: control_image_torch")
145
- processor = Processor('canny')
146
- control_image, w, h = image_utils.rescale_image(input_image, scale_target, 8, max_size=1280)
147
- control_image = control_image.resize((1024, 1024))
148
- control_image = processor(control_image, to_pil=True)
149
- control_image = control_image.resize((width, height))
150
- control_image_torch = get_image_latent(control_image, sample_size=sample_size)[:, :, 0]
151
 
152
  # generation
153
  if randomize_seed: seed = random.randint(0, MAX_SEED)
@@ -160,9 +161,9 @@ def inference(
160
  width=width,
161
  generator=generator,
162
  guidance_scale=guidance_scale,
163
- image = inpaint_image,
164
- mask_image = mask_image,
165
- control_image=control_image_torch,
166
  num_inference_steps=num_inference_steps,
167
  control_context_scale=control_context_scale,
168
  ).images[0]
@@ -235,13 +236,13 @@ with gr.Blocks() as demo:
235
  step=0.01,
236
  value=0.40,
237
  )
238
- guidance_scale = gr.Slider(
239
- label="Guidance scale",
240
- minimum=0.0,
241
- maximum=10.0,
242
- step=0.1,
243
- value=1.0,
244
- )
245
 
246
  seed = gr.Slider(
247
  label="Seed",
@@ -286,7 +287,7 @@ with gr.Blocks() as demo:
286
  control_context_scale,
287
  seed,
288
  randomize_seed,
289
- guidance_scale,
290
  num_inference_steps,
291
  ],
292
  outputs=[output_image, seed, control_image],
 
108
  mask_image,
109
  control_context_scale = 0.75,
110
  seed=42,
111
+ randomize_seed=False,
112
+ # guidance_scale=1,
113
  num_inference_steps=8,
114
  progress=gr.Progress(track_tqdm=True),
115
  ):
116
+ guidance_scale=1
117
  timestamp = time.time()
118
  print(f"timestamp: {timestamp}")
119
 
 
142
  else:
143
  mask_image = torch.ones([1, 1, sample_size[0], sample_size[1]]) * 255
144
 
145
+ # print("DEBUG: control_image_torch")
146
+ # processor = Processor('canny')
147
+ # control_image, w, h = image_utils.rescale_image(input_image, scale_target, 8, max_size=1280)
148
+ # control_image = control_image.resize((1024, 1024))
149
+ # control_image = processor(control_image, to_pil=True)
150
+ # control_image = control_image.resize((width, height))
151
+ # control_image_torch = get_image_latent(control_image, sample_size=sample_size)[:, :, 0]
152
 
153
  # generation
154
  if randomize_seed: seed = random.randint(0, MAX_SEED)
 
161
  width=width,
162
  generator=generator,
163
  guidance_scale=guidance_scale,
164
+ image = inpaint_image,
165
+ mask_image = mask_image,
166
+ # control_image=control_image_torch,
167
  num_inference_steps=num_inference_steps,
168
  control_context_scale=control_context_scale,
169
  ).images[0]
 
236
  step=0.01,
237
  value=0.40,
238
  )
239
+ # guidance_scale = gr.Slider(
240
+ # label="Guidance scale",
241
+ # minimum=0.0,
242
+ # maximum=10.0,
243
+ # step=0.1,
244
+ # value=1.0,
245
+ # )
246
 
247
  seed = gr.Slider(
248
  label="Seed",
 
287
  control_context_scale,
288
  seed,
289
  randomize_seed,
290
+ # guidance_scale,
291
  num_inference_steps,
292
  ],
293
  outputs=[output_image, seed, control_image],