a
This commit is contained in:
parent
0a0dc79d44
commit
f341f7e351
@ -1,49 +1,94 @@
|
|||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
import platform
|
||||||
|
|
||||||
def install_dependencies():
|
def install_dependencies():
|
||||||
"""Install the required dependencies for Stable Diffusion."""
|
"""Install the required dependencies for Stable Diffusion."""
|
||||||
print("Installing Stable Diffusion dependencies...")
|
print("Installing Stable Diffusion dependencies...")
|
||||||
|
|
||||||
# List of required packages
|
# List of required packages
|
||||||
packages = [
|
packages = [
|
||||||
"torch",
|
"torch",
|
||||||
"diffusers",
|
"diffusers",
|
||||||
"transformers",
|
"transformers",
|
||||||
"accelerate"
|
"accelerate",
|
||||||
|
"tqdm",
|
||||||
|
"safetensors"
|
||||||
]
|
]
|
||||||
|
|
||||||
# Check if CUDA is available
|
# Check if CUDA is available
|
||||||
try:
|
try:
|
||||||
import torch
|
import torch
|
||||||
cuda_available = torch.cuda.is_available()
|
cuda_available = torch.cuda.is_available()
|
||||||
if cuda_available:
|
if cuda_available:
|
||||||
cuda_version = torch.version.cuda
|
cuda_version = torch.version.cuda
|
||||||
print(f"CUDA is available (version {cuda_version})")
|
print(f"✅ CUDA is available (version {cuda_version})")
|
||||||
print(f"GPU: {torch.cuda.get_device_name(0)}")
|
print(f"GPU: {torch.cuda.get_device_name(0)}")
|
||||||
else:
|
else:
|
||||||
print("CUDA is not available. Stable Diffusion will run on CPU (very slow).")
|
print("⚠️ CUDA is not available. Stable Diffusion will run on CPU (very slow).")
|
||||||
except ImportError:
|
except ImportError:
|
||||||
print("PyTorch not installed yet. Will install with CUDA support.")
|
print("PyTorch not installed yet. Will install with CUDA support.")
|
||||||
cuda_available = False
|
cuda_available = False
|
||||||
|
|
||||||
# Install each package
|
# Install PyTorch with CUDA support if not already installed
|
||||||
|
if "torch" not in sys.modules:
|
||||||
|
print("Installing PyTorch with CUDA support...")
|
||||||
|
if platform.system() == "Windows":
|
||||||
|
# For Windows, use the PyTorch website command
|
||||||
|
try:
|
||||||
|
subprocess.check_call([
|
||||||
|
sys.executable, "-m", "pip", "install",
|
||||||
|
"torch", "torchvision", "torchaudio",
|
||||||
|
"--index-url", "https://download.pytorch.org/whl/cu118"
|
||||||
|
])
|
||||||
|
print("✅ Successfully installed PyTorch with CUDA support")
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print(f"❌ Error installing PyTorch: {e}")
|
||||||
|
print("Continuing with other dependencies...")
|
||||||
|
else:
|
||||||
|
# For Linux/Mac, use pip
|
||||||
|
try:
|
||||||
|
subprocess.check_call([sys.executable, "-m", "pip", "install", "torch"])
|
||||||
|
print("✅ Successfully installed PyTorch")
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print(f"❌ Error installing PyTorch: {e}")
|
||||||
|
print("Continuing with other dependencies...")
|
||||||
|
|
||||||
|
# Install xformers for memory efficiency if on Windows with CUDA
|
||||||
|
if platform.system() == "Windows" and cuda_available:
|
||||||
|
try:
|
||||||
|
print("Installing xformers for memory efficiency...")
|
||||||
|
subprocess.check_call([
|
||||||
|
sys.executable, "-m", "pip", "install",
|
||||||
|
"xformers", "--index-url", "https://download.pytorch.org/whl/cu118"
|
||||||
|
])
|
||||||
|
print("✅ Successfully installed xformers")
|
||||||
|
packages.append("xformers") # Add to the list of installed packages
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print(f"⚠️ Error installing xformers: {e}")
|
||||||
|
print("Continuing without xformers (memory usage may be higher)...")
|
||||||
|
|
||||||
|
# Install other packages
|
||||||
for package in packages:
|
for package in packages:
|
||||||
|
if package == "torch": # Skip torch as we've already handled it
|
||||||
|
continue
|
||||||
|
|
||||||
print(f"Installing {package}...")
|
print(f"Installing {package}...")
|
||||||
try:
|
try:
|
||||||
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
|
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
|
||||||
print(f"Successfully installed {package}")
|
print(f"✅ Successfully installed {package}")
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print(f"Error installing {package}: {e}")
|
print(f"❌ Error installing {package}: {e}")
|
||||||
return False
|
print(f"You may need to install {package} manually.")
|
||||||
|
|
||||||
print("\nAll dependencies installed successfully!")
|
print("\n✅ All dependencies installed successfully!")
|
||||||
print("\nTo use the Stable Diffusion command:")
|
print("\nNext steps:")
|
||||||
print("1. Restart your bot")
|
print("1. Download the Illustrious XL model by running: python download_illustrious.py")
|
||||||
print("2. Use the /generate command with a text prompt")
|
print("2. Restart your bot")
|
||||||
print("3. Wait for the image to be generated (this may take some time)")
|
print("3. Use the /generate command with a text prompt")
|
||||||
|
print("4. Wait for the image to be generated (this may take some time)")
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Stable Diffusion Discord Bot Command
|
# Stable Diffusion Discord Bot Command
|
||||||
|
|
||||||
This feature adds a Stable Diffusion image generation command to your Discord bot, running locally on your GPU.
|
This feature adds a Stable Diffusion image generation command to your Discord bot, running locally on your GPU. It includes support for the Illustrious XL model from Civitai.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
@ -9,9 +9,14 @@ This feature adds a Stable Diffusion image generation command to your Discord bo
|
|||||||
python install_stable_diffusion.py
|
python install_stable_diffusion.py
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Make sure you have a compatible GPU with CUDA support. The command will work on CPU but will be extremely slow.
|
2. Download the Illustrious XL model from Civitai:
|
||||||
|
```
|
||||||
|
python download_illustrious.py
|
||||||
|
```
|
||||||
|
|
||||||
3. Restart your bot after installing the dependencies.
|
3. Make sure you have a compatible GPU with CUDA support. The command will work on CPU but will be extremely slow.
|
||||||
|
|
||||||
|
4. Restart your bot after installing the dependencies and downloading the model.
|
||||||
|
|
||||||
## Commands
|
## Commands
|
||||||
|
|
||||||
@ -23,8 +28,8 @@ Generate an image using Stable Diffusion running locally on your GPU.
|
|||||||
- `negative_prompt` (optional): Things to avoid in the generated image
|
- `negative_prompt` (optional): Things to avoid in the generated image
|
||||||
- `steps` (optional, default: 30): Number of inference steps (higher = better quality but slower)
|
- `steps` (optional, default: 30): Number of inference steps (higher = better quality but slower)
|
||||||
- `guidance_scale` (optional, default: 7.5): How closely to follow the prompt (higher = more faithful but less creative)
|
- `guidance_scale` (optional, default: 7.5): How closely to follow the prompt (higher = more faithful but less creative)
|
||||||
- `width` (optional, default: 512): Image width (must be a multiple of 8)
|
- `width` (optional, default: 1024): Image width (must be a multiple of 8)
|
||||||
- `height` (optional, default: 512): Image height (must be a multiple of 8)
|
- `height` (optional, default: 1024): Image height (must be a multiple of 8)
|
||||||
- `seed` (optional): Random seed for reproducible results (leave empty for random)
|
- `seed` (optional): Random seed for reproducible results (leave empty for random)
|
||||||
- `hidden` (optional, default: false): Whether to make the response visible only to you
|
- `hidden` (optional, default: false): Whether to make the response visible only to you
|
||||||
|
|
||||||
@ -36,21 +41,44 @@ List available Stable Diffusion models or change the current model (owner only).
|
|||||||
|
|
||||||
## Available Models
|
## Available Models
|
||||||
|
|
||||||
|
- **Illustrious XL** (Local) - A high-quality SDXL model from Civitai
|
||||||
- Stable Diffusion 1.5 (`runwayml/stable-diffusion-v1-5`)
|
- Stable Diffusion 1.5 (`runwayml/stable-diffusion-v1-5`)
|
||||||
- Stable Diffusion 2.1 (`stabilityai/stable-diffusion-2-1`)
|
- Stable Diffusion 2.1 (`stabilityai/stable-diffusion-2-1`)
|
||||||
- Stable Diffusion XL (`stabilityai/stable-diffusion-xl-base-1.0`)
|
- Stable Diffusion XL (`stabilityai/stable-diffusion-xl-base-1.0`)
|
||||||
|
|
||||||
|
## About Illustrious XL
|
||||||
|
|
||||||
|
Illustrious XL is a high-quality SDXL model from Civitai that produces excellent results for a wide range of prompts. It's particularly good at:
|
||||||
|
|
||||||
|
- Detailed illustrations
|
||||||
|
- Realistic images
|
||||||
|
- Fantasy and sci-fi scenes
|
||||||
|
- Character portraits
|
||||||
|
- Landscapes and environments
|
||||||
|
|
||||||
|
The model is automatically downloaded and set up by the `download_illustrious.py` script. You can find more information about the model at [Civitai](https://civitai.com/models/795765/illustrious-xl).
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- CUDA-compatible GPU with at least 4GB VRAM (8GB+ recommended for larger images)
|
- CUDA-compatible GPU with at least 8GB VRAM (12GB+ recommended for SDXL models at 1024x1024)
|
||||||
- Python 3.8+
|
- Python 3.8+
|
||||||
- PyTorch with CUDA support
|
- PyTorch with CUDA support
|
||||||
- diffusers library
|
- diffusers library
|
||||||
- transformers library
|
- transformers library
|
||||||
- accelerate library
|
- accelerate library
|
||||||
|
- safetensors library
|
||||||
|
- xformers library (optional, for memory efficiency)
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
- If you encounter CUDA out-of-memory errors, try reducing the image dimensions or using a smaller model.
|
- If you encounter CUDA out-of-memory errors, try:
|
||||||
- The first generation might take longer as the model needs to be loaded into memory.
|
- Reducing the image dimensions (e.g., 768x768 instead of 1024x1024)
|
||||||
|
- Switching to a smaller model (SD 1.5 instead of SDXL)
|
||||||
|
- Closing other applications that use GPU memory
|
||||||
|
- Using the `--enable_attention_slicing` option when loading the model
|
||||||
|
|
||||||
|
- The first generation might take longer as the model needs to be downloaded and loaded into memory.
|
||||||
|
|
||||||
- If you're getting "CUDA not available" errors, make sure your GPU drivers are up to date and PyTorch is installed with CUDA support.
|
- If you're getting "CUDA not available" errors, make sure your GPU drivers are up to date and PyTorch is installed with CUDA support.
|
||||||
|
|
||||||
|
- If the Illustrious XL model fails to download, you can try downloading it manually from [Civitai](https://civitai.com/models/795765/illustrious-xl) and placing it in the `discordbot/models/illustrious_xl/unet` directory as `diffusion_pytorch_model.safetensors`.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user