From 45e65c6de77cc172d889f25990f0b43bafede914 Mon Sep 17 00:00:00 2001 From: Slipstream Date: Sun, 4 May 2025 16:42:45 -0600 Subject: [PATCH] aa --- download_illustrious.py | 313 ++++++++++++++++++++------------ manual_download_instructions.md | 111 +++++++++++ requirements.txt | 1 + 3 files changed, 307 insertions(+), 118 deletions(-) create mode 100644 manual_download_instructions.md diff --git a/download_illustrious.py b/download_illustrious.py index c792a2e..68b4177 100644 --- a/download_illustrious.py +++ b/download_illustrious.py @@ -7,6 +7,8 @@ import shutil import argparse from tqdm import tqdm import time +import subprocess +import huggingface_hub # Illustrious XL model information MODEL_ID = 795765 @@ -15,37 +17,170 @@ MODEL_VERSION = 1 # Version 1.0 MODEL_URL = "https://civitai.com/api/download/models/795765" MODEL_INFO_URL = f"https://civitai.com/api/v1/models/{MODEL_ID}" +# Base SDXL model from HuggingFace (we'll use this as a base and replace the unet) +SDXL_BASE_MODEL = "stabilityai/stable-diffusion-xl-base-1.0" + def download_file(url, destination, filename=None): """Download a file with progress bar""" if filename is None: local_filename = os.path.join(destination, url.split('/')[-1]) else: local_filename = os.path.join(destination, filename) - + with requests.get(url, stream=True) as r: r.raise_for_status() total_size = int(r.headers.get('content-length', 0)) - + # Create directory if it doesn't exist os.makedirs(os.path.dirname(local_filename), exist_ok=True) - + with open(local_filename, 'wb') as f: with tqdm(total=total_size, unit='B', unit_scale=True, desc=f"Downloading {os.path.basename(local_filename)}") as pbar: for chunk in r.iter_content(chunk_size=8192): if chunk: f.write(chunk) pbar.update(len(chunk)) - + return local_filename -def create_model_index(model_dir): - """Create a model_index.json file for the diffusers library""" - model_index = { - "_class_name": "StableDiffusionXLPipeline", - "_diffusers_version": "0.21.4", - "force_zeros_for_empty_prompt": True, - "scheduler": [ - { +def download_from_huggingface(repo_id, local_dir, component=None): + """Download a model from HuggingFace""" + try: + # Use huggingface_hub to download the model + if component: + print(f"Downloading {component} from {repo_id}...") + huggingface_hub.snapshot_download( + repo_id=repo_id, + local_dir=local_dir, + local_dir_use_symlinks=False, + allow_patterns=f"{component}/*" + ) + else: + print(f"Downloading full model from {repo_id}...") + huggingface_hub.snapshot_download( + repo_id=repo_id, + local_dir=local_dir, + local_dir_use_symlinks=False + ) + return True + except Exception as e: + print(f"Error downloading from HuggingFace: {e}") + return False + +def download_illustrious_xl(): + """Download and set up the Illustrious XL model""" + # Set up directories + script_dir = os.path.dirname(os.path.abspath(__file__)) + models_dir = os.path.join(script_dir, "models") + illustrious_dir = os.path.join(models_dir, "illustrious_xl") + temp_dir = os.path.join(models_dir, "temp") + + # Create directories if they don't exist + os.makedirs(models_dir, exist_ok=True) + os.makedirs(temp_dir, exist_ok=True) + + # Check if model already exists + if os.path.exists(os.path.join(illustrious_dir, "unet", "diffusion_pytorch_model.safetensors")) and \ + os.path.getsize(os.path.join(illustrious_dir, "unet", "diffusion_pytorch_model.safetensors")) > 100000000: # Check if file is larger than 100MB + print(f"⚠️ {MODEL_NAME} model already exists at {illustrious_dir}") + choice = input("Do you want to re-download and reinstall the model? (y/n): ") + if choice.lower() != 'y': + print("Download cancelled.") + return + + # Remove existing model + print(f"Removing existing {MODEL_NAME} model...") + shutil.rmtree(illustrious_dir, ignore_errors=True) + + # Create illustrious directory + os.makedirs(illustrious_dir, exist_ok=True) + + # Get model info from Civitai API + print(f"Fetching information about {MODEL_NAME} from Civitai...") + try: + response = requests.get(MODEL_INFO_URL) + response.raise_for_status() + model_info = response.json() + + # Save model info for reference + with open(os.path.join(illustrious_dir, "model_info.json"), "w") as f: + json.dump(model_info, f, indent=2) + + print(f"Model: {model_info['name']} by {model_info['creator']['username']}") + if 'description' in model_info: + print(f"Description: {model_info['description'][:100]}...") + + except Exception as e: + print(f"⚠️ Failed to fetch model info: {e}") + print("Continuing with download anyway...") + + # First, download the base SDXL model from HuggingFace + print(f"Step 1: Downloading base SDXL model from HuggingFace...") + print("This will download the VAE, text encoders, and tokenizers needed for the model.") + print("This may take a while (several GB of data)...") + + # Download each component separately to avoid downloading the full model + components = ["vae", "text_encoder", "text_encoder_2", "tokenizer", "tokenizer_2", "scheduler"] + for component in components: + success = download_from_huggingface(SDXL_BASE_MODEL, illustrious_dir, component) + if not success: + print(f"Failed to download {component} from HuggingFace.") + print("Trying alternative method...") + + # Try using diffusers to download the model + try: + print(f"Installing diffusers if not already installed...") + subprocess.check_call([sys.executable, "-m", "pip", "install", "diffusers"]) + + # Use Python to download the model components + from diffusers import StableDiffusionXLPipeline + print(f"Downloading {component} using diffusers...") + + # Create a temporary directory for the download + temp_model_dir = os.path.join(temp_dir, "sdxl_base") + os.makedirs(temp_model_dir, exist_ok=True) + + # Download only the specified components + StableDiffusionXLPipeline.from_pretrained( + SDXL_BASE_MODEL, + torch_dtype="float16", + variant="fp16", + use_safetensors=True, + cache_dir=temp_model_dir + ) + + # Copy the component to the illustrious directory + component_dir = os.path.join(temp_model_dir, component) + if os.path.exists(component_dir): + shutil.copytree(component_dir, os.path.join(illustrious_dir, component), dirs_exist_ok=True) + print(f"Successfully copied {component} to {illustrious_dir}") + else: + print(f"Could not find {component} in downloaded model.") + + except Exception as e: + print(f"Error using diffusers to download {component}: {e}") + print("You may need to manually download the SDXL base model and copy the components.") + + # Now download the Illustrious XL model from Civitai + print(f"\nStep 2: Downloading {MODEL_NAME} from Civitai...") + try: + # Download to temp directory + model_file = download_file(MODEL_URL, temp_dir, "illustrious_xl.safetensors") + + # Create the unet directory if it doesn't exist + os.makedirs(os.path.join(illustrious_dir, "unet"), exist_ok=True) + + # Move the model file to the unet directory + print(f"Moving {MODEL_NAME} model to the unet directory...") + shutil.move(model_file, os.path.join(illustrious_dir, "unet", "diffusion_pytorch_model.safetensors")) + + # Create a model_index.json file + print("Creating model_index.json file...") + model_index = { + "_class_name": "StableDiffusionXLPipeline", + "_diffusers_version": "0.21.4", + "force_zeros_for_empty_prompt": True, + "scheduler": { "_class_name": "DPMSolverMultistepScheduler", "_diffusers_version": "0.21.4", "beta_end": 0.012, @@ -59,143 +194,85 @@ def create_model_index(model_dir): "timestep_spacing": "leading", "trained_betas": None, "use_karras_sigmas": True - } - ], - "text_encoder": [ - { - "_class_name": "CLIPTextModel", + }, + "text_encoder": [ + { + "_class_name": "CLIPTextModel", + "_diffusers_version": "0.21.4" + }, + { + "_class_name": "CLIPTextModelWithProjection", + "_diffusers_version": "0.21.4" + } + ], + "tokenizer": [ + { + "_class_name": "CLIPTokenizer", + "_diffusers_version": "0.21.4" + }, + { + "_class_name": "CLIPTokenizer", + "_diffusers_version": "0.21.4" + } + ], + "unet": { + "_class_name": "UNet2DConditionModel", "_diffusers_version": "0.21.4" }, - { - "_class_name": "CLIPTextModelWithProjection", + "vae": { + "_class_name": "AutoencoderKL", "_diffusers_version": "0.21.4" } - ], - "tokenizer": [ - { - "_class_name": "CLIPTokenizer", - "_diffusers_version": "0.21.4" - }, - { - "_class_name": "CLIPTokenizer", - "_diffusers_version": "0.21.4" - } - ], - "unet": { - "_class_name": "UNet2DConditionModel", - "_diffusers_version": "0.21.4" - }, - "vae": { - "_class_name": "AutoencoderKL", - "_diffusers_version": "0.21.4" } - } - - with open(os.path.join(model_dir, "model_index.json"), "w") as f: - json.dump(model_index, f, indent=2) -def download_illustrious_xl(): - """Download and set up the Illustrious XL model""" - # Set up directories - script_dir = os.path.dirname(os.path.abspath(__file__)) - models_dir = os.path.join(script_dir, "models") - illustrious_dir = os.path.join(models_dir, "illustrious_xl") - temp_dir = os.path.join(models_dir, "temp") - - # Create directories if they don't exist - os.makedirs(models_dir, exist_ok=True) - os.makedirs(temp_dir, exist_ok=True) - - # Check if model already exists - if os.path.exists(os.path.join(illustrious_dir, "model_index.json")): - print(f"⚠️ {MODEL_NAME} model already exists at {illustrious_dir}") - choice = input("Do you want to re-download and reinstall the model? (y/n): ") - if choice.lower() != 'y': - print("Download cancelled.") - return - - # Remove existing model - print(f"Removing existing {MODEL_NAME} model...") - shutil.rmtree(illustrious_dir, ignore_errors=True) - - # Create illustrious directory - os.makedirs(illustrious_dir, exist_ok=True) - - # Get model info from Civitai API - print(f"Fetching information about {MODEL_NAME} from Civitai...") - try: - response = requests.get(MODEL_INFO_URL) - response.raise_for_status() - model_info = response.json() - - # Save model info for reference - with open(os.path.join(illustrious_dir, "model_info.json"), "w") as f: - json.dump(model_info, f, indent=2) - - print(f"Model: {model_info['name']} by {model_info['creator']['username']}") - print(f"Description: {model_info['description'][:100]}...") - - except Exception as e: - print(f"⚠️ Failed to fetch model info: {e}") - print("Continuing with download anyway...") - - # Download the model - print(f"Downloading {MODEL_NAME} from Civitai...") - try: - # Download to temp directory - model_file = download_file(MODEL_URL, temp_dir, "illustrious_xl.safetensors") - - # Move the file to the model directory - print(f"Setting up {MODEL_NAME} model...") - - # Create the necessary directory structure for diffusers - os.makedirs(os.path.join(illustrious_dir, "unet"), exist_ok=True) - os.makedirs(os.path.join(illustrious_dir, "vae"), exist_ok=True) - os.makedirs(os.path.join(illustrious_dir, "text_encoder"), exist_ok=True) - os.makedirs(os.path.join(illustrious_dir, "text_encoder_2"), exist_ok=True) - os.makedirs(os.path.join(illustrious_dir, "tokenizer"), exist_ok=True) - os.makedirs(os.path.join(illustrious_dir, "tokenizer_2"), exist_ok=True) - - # Move the model file to the unet directory - shutil.move(model_file, os.path.join(illustrious_dir, "unet", "diffusion_pytorch_model.safetensors")) - - # Create a model_index.json file - create_model_index(illustrious_dir) - + with open(os.path.join(illustrious_dir, "model_index.json"), "w") as f: + json.dump(model_index, f, indent=2) + # Create a README.md file with information about the model with open(os.path.join(illustrious_dir, "README.md"), "w") as f: f.write(f"# {MODEL_NAME}\n\n") f.write(f"Downloaded from Civitai: https://civitai.com/models/{MODEL_ID}\n\n") f.write("This model requires the diffusers library to use.\n") f.write("Use the /generate command in the Discord bot to generate images with this model.\n") - - print(f"✅ {MODEL_NAME} model has been downloaded and set up successfully!") + + # Check if the model file is large enough (should be several GB) + unet_file = os.path.join(illustrious_dir, "unet", "diffusion_pytorch_model.safetensors") + if os.path.exists(unet_file): + file_size_gb = os.path.getsize(unet_file) / (1024 * 1024 * 1024) + print(f"Model file size: {file_size_gb:.2f} GB") + + if file_size_gb < 1.0: + print(f"⚠️ Warning: Model file seems too small ({file_size_gb:.2f} GB). It may not be complete.") + print("The download might have been interrupted or the model might not be the full version.") + print("You may want to try downloading again with the --force flag.") + + print(f"\n✅ {MODEL_NAME} model has been downloaded and set up successfully!") print(f"Model location: {illustrious_dir}") print("You can now use the model with the /generate command in the Discord bot.") - + except Exception as e: print(f"❌ Error downloading or setting up the model: {e}") import traceback traceback.print_exc() - + # Clean up print("Cleaning up...") shutil.rmtree(illustrious_dir, ignore_errors=True) shutil.rmtree(temp_dir, ignore_errors=True) - + print("Download failed. Please try again later.") return False - + # Clean up temp directory shutil.rmtree(temp_dir, ignore_errors=True) - + return True if __name__ == "__main__": parser = argparse.ArgumentParser(description=f"Download and set up the {MODEL_NAME} model from Civitai") parser.add_argument("--force", action="store_true", help="Force download even if the model already exists") args = parser.parse_args() - + if args.force: # Remove existing model if it exists script_dir = os.path.dirname(os.path.abspath(__file__)) @@ -203,5 +280,5 @@ if __name__ == "__main__": if os.path.exists(illustrious_dir): print(f"Removing existing {MODEL_NAME} model...") shutil.rmtree(illustrious_dir, ignore_errors=True) - + download_illustrious_xl() diff --git a/manual_download_instructions.md b/manual_download_instructions.md new file mode 100644 index 0000000..5f7c020 --- /dev/null +++ b/manual_download_instructions.md @@ -0,0 +1,111 @@ +# Manual Download Instructions for Illustrious XL + +If the automatic download script fails, you can manually download and set up the Illustrious XL model by following these steps: + +## Step 1: Download the Model Files + +1. Download the Illustrious XL model from Civitai: + - Go to: https://civitai.com/models/795765/illustrious-xl + - Click the "Download" button to download the model file + - Save the file as `illustrious_xl.safetensors` + +2. Download the base SDXL model components from Hugging Face: + - Go to: https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0 + - Download the following files/folders: + - `vae` folder + - `text_encoder` folder + - `text_encoder_2` folder + - `tokenizer` folder + - `tokenizer_2` folder + - `scheduler` folder + +## Step 2: Set Up the Directory Structure + +1. Create the following directory structure in your Discord bot folder: + ``` + discordbot/ + └── models/ + └── illustrious_xl/ + ├── unet/ + ├── vae/ + ├── text_encoder/ + ├── text_encoder_2/ + ├── tokenizer/ + └── tokenizer_2/ + ``` + +2. Place the downloaded files in the appropriate directories: + - Move `illustrious_xl.safetensors` to `discordbot/models/illustrious_xl/unet/` and rename it to `diffusion_pytorch_model.safetensors` + - Copy the contents of the downloaded `vae` folder to `discordbot/models/illustrious_xl/vae/` + - Copy the contents of the downloaded `text_encoder` folder to `discordbot/models/illustrious_xl/text_encoder/` + - Copy the contents of the downloaded `text_encoder_2` folder to `discordbot/models/illustrious_xl/text_encoder_2/` + - Copy the contents of the downloaded `tokenizer` folder to `discordbot/models/illustrious_xl/tokenizer/` + - Copy the contents of the downloaded `tokenizer_2` folder to `discordbot/models/illustrious_xl/tokenizer_2/` + +## Step 3: Create the Model Index File + +Create a file named `model_index.json` in the `discordbot/models/illustrious_xl/` directory with the following content: + +```json +{ + "_class_name": "StableDiffusionXLPipeline", + "_diffusers_version": "0.21.4", + "force_zeros_for_empty_prompt": true, + "scheduler": { + "_class_name": "DPMSolverMultistepScheduler", + "_diffusers_version": "0.21.4", + "beta_end": 0.012, + "beta_schedule": "scaled_linear", + "beta_start": 0.00085, + "num_train_timesteps": 1000, + "prediction_type": "epsilon", + "solver_order": 2, + "solver_type": "midpoint", + "thresholding": false, + "timestep_spacing": "leading", + "trained_betas": null, + "use_karras_sigmas": true + }, + "text_encoder": [ + { + "_class_name": "CLIPTextModel", + "_diffusers_version": "0.21.4" + }, + { + "_class_name": "CLIPTextModelWithProjection", + "_diffusers_version": "0.21.4" + } + ], + "tokenizer": [ + { + "_class_name": "CLIPTokenizer", + "_diffusers_version": "0.21.4" + }, + { + "_class_name": "CLIPTokenizer", + "_diffusers_version": "0.21.4" + } + ], + "unet": { + "_class_name": "UNet2DConditionModel", + "_diffusers_version": "0.21.4" + }, + "vae": { + "_class_name": "AutoencoderKL", + "_diffusers_version": "0.21.4" + } +} +``` + +## Step 4: Verify the Installation + +1. Check that the directory structure is correct and all files are in place +2. Make sure the `diffusion_pytorch_model.safetensors` file in the `unet` directory is large (should be several GB) +3. Restart your Discord bot +4. Use the `/generate` command to test if the model works correctly + +## Troubleshooting + +- If you get errors about missing files, make sure all the required components are downloaded and placed in the correct directories +- If you get CUDA out-of-memory errors, try reducing the image dimensions (e.g., 768x768 instead of 1024x1024) +- If the model is not showing up in the `/sd_models` command, make sure the directory structure and file names are exactly as specified above diff --git a/requirements.txt b/requirements.txt index 9a52ec1..5b8fe54 100644 --- a/requirements.txt +++ b/requirements.txt @@ -36,3 +36,4 @@ accelerate tqdm safetensors xformers +huggingface_hub