CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
huggingface

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

GitHub Repository: huggingface/notebooks
Path: blob/main/sagemaker/23_stable_diffusion_inference/code/inference.py
Views: 2555
1
import base64
2
import torch
3
from io import BytesIO
4
from diffusers import StableDiffusionPipeline
5
6
7
def model_fn(model_dir):
8
# Load stable diffusion and move it to the GPU
9
pipe = StableDiffusionPipeline.from_pretrained(model_dir, torch_dtype=torch.float16)
10
pipe = pipe.to("cuda")
11
12
return pipe
13
14
def predict_fn(data, pipe):
15
16
# get prompt & parameters
17
prompt = data.pop("inputs", data)
18
# set valid HP for stable diffusion
19
num_inference_steps = data.pop("num_inference_steps", 50)
20
guidance_scale = data.pop("guidance_scale", 7.5)
21
num_images_per_prompt = data.pop("num_images_per_prompt", 4)
22
23
# run generation with parameters
24
generated_images = pipe(prompt,num_inference_steps=num_inference_steps,guidance_scale=guidance_scale,num_images_per_prompt=num_images_per_prompt)["images"]
25
26
# create response
27
encoded_images=[]
28
for image in generated_images:
29
buffered = BytesIO()
30
image.save(buffered, format="JPEG")
31
encoded_images.append(base64.b64encode(buffered.getvalue()).decode())
32
33
# create response
34
return {"generated_images": encoded_images}
35
36