This sets up the original comfyUI and Wan2.2. on A100
The download of Wan2.2 models are the comfyui repackaged versions.
In the comfyUI templates, the image to video template (a few simple steps) will need all those models to run ok.
#!/bin/bash
# Expert Setup: ComfyUI + Wan2.2 Text-to-Video (Repackaged)
# Target: Lambda Labs (A10/A100) | Ubuntu 22.04
# ComfyUI and Lambda stack has a conflict on numpy.
set -e
echo "Initializing Wan2.2 Environment on Lambda Stack..."
# -------------------------------
# System Dependencies
# -------------------------------
sudo apt update && sudo apt install -y \
git-lfs wget tmux python3-pip python3-venv build-essential
sudo apt install -y \
ffmpeg \
libgl1-mesa-glx \
libglib2.0-0 \
libimagequant-dev \
libvips-dev \
libwebp-dev \
libjpeg-dev \
libpng-dev \
libtiff-dev \
libgif-dev \
libopenexr-dev
# Initialize git-lfs
git lfs install
# -------------------------------
# ComfyUI Setup
# -------------------------------
if [ ! -d "ComfyUI" ]; then
git clone https://github.com/Comfy-Org/ComfyUI.git
fi
cd ComfyUI
# 1. The Brain (Manager)
git clone https://github.com/ltdrdata/ComfyUI-Manager.git custom_nodes/ComfyUI-Manager
# 2. Video Infrastructure (VHS)
# Critical for saving MP4s and loading video inputs
git clone https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite.git custom_nodes/ComfyUI-VideoHelperSuite
# 3. Native Wan Support & Model Loading
# Kijai's wrapper is great, but ensure you also have GGUF support
# for the MoE models to save VRAM on the A100s.
git clone https://github.com/city96/ComfyUI-GGUF.git custom_nodes/ComfyUI-GGUF
git clone https://github.com/kijai/ComfyUI-WanVideoWrapper.git custom_nodes/ComfyUI-WanVideoWrapper
# 4. The "Swiss Army Knife" (Essential for UI and workflow cleaning)
# Provides nodes like "Set/Get" which prevent spaghetti in video workflows
git clone https://github.com/kijai/ComfyUI-KJNodes.git custom_nodes/ComfyUI-KJNodes
git clone https://github.com/rgthree/rgthree-comfy.git custom_nodes/rgthree-comfy
# -------------------------------
# Python Virtual Environment
# -------------------------------
echo "Setting up Python environment..."
python3 -m venv venv
source venv/bin/activate
pip install --upgrade pip
# The 'Future-Proof' Numpy Guard ---
# We create a global constraint file and tell PIP to use it for EVERYTHING.
# We write the constraint into the venv's activate script so it's always active.
echo "numpy>=1.26,<2.0" > .numpy_guard.txt
CONSTRAINT_PATH=$(realpath .numpy_guard.txt)
echo "export PIP_CONSTRAINT=\"$CONSTRAINT_PATH\"" >> venv/bin/activate
# Refresh the current session with the new variable
export PIP_CONSTRAINT="$CONSTRAINT_PATH"
# Install ComfyUI + Wan2.2 requirements
# PIP_CONSTRAINT variable ensures no sub-dependency can sneak in Numpy 2.0
# Remove torch packages so pip can't override GPU torch
sed -i '/torch/d' requirements.txt
sed -i '/torchvision/d' requirements.txt
sed -i '/torchaudio/d' requirements.txt
pip install -r requirements.txt
# Optimized for Lambda Stack 22.04 GPU drivers (CUDA 12.4+)
#pip install --upgrade torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu124
CUDA_VERSION=$(nvidia-smi | grep -oP "CUDA Version: \K[0-9]+\.[0-9]+")
echo "Detected CUDA: $CUDA_VERSION"
if [[ "$CUDA_VERSION" == 13* ]]; then
TORCH_INDEX="https://download.pytorch.org/whl/cu130"
elif [[ "$CUDA_VERSION" == 12* ]]; then
TORCH_INDEX="https://download.pytorch.org/whl/cu124"
else
TORCH_INDEX="https://download.pytorch.org/whl/cu121"
fi
pip install --upgrade torch torchvision torchaudio --index-url $TORCH_INDEX
pip install xformers --index-url $TORCH_INDEX #Without xformers, Wan models can be 30–40% slower.
pip install torchsde
# ---------------------------------------------------------
# STABILITY-FIRST INSTALLATION (Lambda Stack 22.04 Optimized)
# ---------------------------------------------------------
# 1. AI Frameworks (The foundation for Wan 2.2)
pip install --no-cache-dir \
"accelerate>=1.2.1" "diffusers>=0.33.0" "transformers" \
"peft>=0.17.0" "einops" "gguf>=0.17.1"
# 2. Text & Data Processing (For UMT5-XXL)
pip install --no-cache-dir \
sentencepiece protobuf ftfy scipy chardet typing-extensions \
pillow>=10.3.0 color-matcher matplotlib mss
# 3. Video & UI Helpers (For VHS and Manager)
pip install --no-cache-dir \
opencv-python-headless imageio-ffmpeg GitPython PyGithub \
matrix-nio typer rich toml uv pyloudnorm \
huggingface_hub hf_transfer
export HF_HUB_ENABLE_HF_TRANSFER=1
#use original models
# download
hf download Comfy-Org/Wan_2.2_ComfyUI_Repackaged \
split_files/vae/wan_2.1_vae.safetensors \
split_files/diffusion_models/wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors \
split_files/diffusion_models/wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors \
split_files/loras/wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors \
split_files/loras/wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors \
split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors \
--local-dir download
# Create only the necessary subdirectories
mkdir -p models/vae models/text_encoders models/diffusion_models models/loras
# Relocate files to corresponding ComfyUI subfolders
echo "Moving files to designated model folders..."
mv download/split_files/vae/wan_2.1_vae.safetensors models/vae/
mv download/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors models/text_encoders/
mv download/split_files/diffusion_models/wan2.2_i2v_*.safetensors models/diffusion_models/
mv download/split_files/loras/wan2.2_i2v_*.safetensors models/loras/
# 5. Review & Launch
echo "Wan2.2 Installation Finished."
echo "--------------------------------------------------------"
echo "SECURITY NOTE: Binding to 127.0.0.1 (Localhost Only)"
echo "--------------------------------------------------------"
# Use --fast-lite or --highvram depending on your Lambda GPU
#python3 main.py --listen 127.0.0.1 --port 8188 #for A10
python3 main.py --listen 127.0.0.1 --port 8188 --preview-method auto --use-pytorch-cross-attention #for A100 or above
#### HOW TO RUN ####
#just use the Cat & Paste trick inside your existing PuTTY window. It handles the key authentication automatically because you're already logged in.
#In PuTTY, type:
# cat << 'EOF' > setup.sh
# Paste your script (Right-click).
# Hit enter to get to a new line
# Type EOF on the new line and hit Enter.
#Run it:
# bash setup.sh