=Parameters().from_json('../config.json') params
Exploring Stable diffusion Pipeline
Exploring the stable diffusion pipeline
Load necessary modules
Setup the notebook’s configuration parameters
= device_by_name("Tesla")
params.gpu = 512 # default height of Stable Diffusion
params.height = 512 # default width of Stable Diffusion
params.width = 25 # Number of denoising steps
params.num_inference_steps = 7.5 # Scale for classifier-free guidance
params.guidance_scale = 0 params.seed
we will use the gpu
=params.gpu device
load pretrained model
='stabilityai/stable-diffusion-2-1-base' params.model_name
= StableDiffusionPipeline.from_pretrained(params.model_name, torch_dtype=torch.float16 , requires_safety_checker = False).to(device)
pipe # pipe.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp)
# # Workaround for not accepting attention shape using VAE for Flash Attention
# pipe.vae.enable_xformers_memory_efficient_attention(attention_op=None)
Setup prompt
= ["a photograph of a cute puppy"]
prompt = torch.Generator(device = device)
generator # Seed generator to create the inital latent noise generator.manual_seed(params.seed)
<torch._C.Generator>
Run the standard model
= pipe(prompt, generator = generator).images[0]
image
image
lets try another prompt
= ["a graffity of the monalisa on a brick wall, a red gorilla painter"]
prompt
= pipe(prompt, generator = generator).images[0]
image
image
negative_prompt
= ["a red gorilla painter painting a graffity of the monalisa on a brick wall"]
prompt = ["a photo"]
negative_prompt
= pipe(prompt, negative_prompt = negative_prompt, generator = generator).images[0]
image
image