Path: blob/main/sagemaker/23_stable_diffusion_inference/code/inference.py
4749 views
import base641import torch2from io import BytesIO3from diffusers import StableDiffusionPipeline456def model_fn(model_dir):7# Load stable diffusion and move it to the GPU8pipe = StableDiffusionPipeline.from_pretrained(model_dir, torch_dtype=torch.float16)9pipe = pipe.to("cuda")1011return pipe1213def predict_fn(data, pipe):1415# get prompt & parameters16prompt = data.pop("inputs", data)17# set valid HP for stable diffusion18num_inference_steps = data.pop("num_inference_steps", 50)19guidance_scale = data.pop("guidance_scale", 7.5)20num_images_per_prompt = data.pop("num_images_per_prompt", 4)2122# run generation with parameters23generated_images = pipe(prompt,num_inference_steps=num_inference_steps,guidance_scale=guidance_scale,num_images_per_prompt=num_images_per_prompt)["images"]2425# create response26encoded_images=[]27for image in generated_images:28buffered = BytesIO()29image.save(buffered, format="JPEG")30encoded_images.append(base64.b64encode(buffered.getvalue()).decode())3132# create response33return {"generated_images": encoded_images}343536