Instructions to use Wan-AI/Wan2.1-T2V-1.3B with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Diffusers
How to use Wan-AI/Wan2.1-T2V-1.3B with Diffusers:
pip install -U diffusers transformers accelerate
import torch from diffusers import DiffusionPipeline # switch to "mps" for apple devices pipe = DiffusionPipeline.from_pretrained("Wan-AI/Wan2.1-T2V-1.3B", dtype=torch.bfloat16, device_map="cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipe(prompt).images[0] - Inference
- Notebooks
- Google Colab
- Kaggle
Update README.md
Browse files
README.md
CHANGED
|
@@ -171,7 +171,7 @@ from diffusers import AutoencoderKLWan, WanPipeline
|
|
| 171 |
from diffusers.utils import export_to_video
|
| 172 |
|
| 173 |
# Available models: Wan-AI/Wan2.1-T2V-14B, Wan-AI/Wan2.1-T2V-1.3B
|
| 174 |
-
model_id = "Wan-AI/Wan2.1-T2V-
|
| 175 |
vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
|
| 176 |
pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16)
|
| 177 |
pipe.to("cuda")
|
|
|
|
| 171 |
from diffusers.utils import export_to_video
|
| 172 |
|
| 173 |
# Available models: Wan-AI/Wan2.1-T2V-14B, Wan-AI/Wan2.1-T2V-1.3B
|
| 174 |
+
model_id = "Wan-AI/Wan2.1-T2V-1.3B"
|
| 175 |
vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
|
| 176 |
pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16)
|
| 177 |
pipe.to("cuda")
|