Setting Up Your AI Development Environment
A proper AI dev environment uses conda or venv for package isolation, PyTorch with CUDA for GPU acceleration, and Jupyter or VS Code for interactive development. Getting this right from the start saves hours of debugging dependency conflicts.
Full AI Environment Setup
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# AI Development Environment Setup
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# OPTION 1: Conda (recommended — handles CUDA + Python together)
# conda create -n ai-course python=3.11 -y
# conda activate ai-course
# conda install pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch -c nvidia
# OPTION 2: pip + venv
# python -m venv venv
# source venv/bin/activate (Linux/Mac) or venv\Scripts\activate (Windows)
# pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121
# Core AI packages
# pip install transformers datasets accelerate peft # HuggingFace stack
# pip install openai langchain langchain-community # LLM apps
# pip install diffusers # Stable Diffusion
# pip install scikit-learn matplotlib seaborn # ML + visualization
# pip install jupyter jupyterlab ipywidgets # notebooks
# pip install fastapi uvicorn # model serving
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# VERIFY YOUR SETUP — run this script to confirm everything works
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
import sys
import torch
def check_environment():
print("=" * 50)
print("AI Environment Health Check")
print("=" * 50)
# Python version
print(f"Python: {sys.version.split()[0]}")
# PyTorch
print(f"PyTorch: {torch.__version__}")
# GPU / CUDA
if torch.cuda.is_available():
print(f"GPU: {torch.cuda.get_device_name(0)}")
print(f"CUDA: {torch.version.cuda}")
print(f"VRAM: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
print("GPU: Apple Silicon MPS (Metal Performance Shaders)")
else:
print("GPU: Not available — using CPU (slower but works)")
# Test tensor computation on GPU/MPS or CPU
device = (
"cuda" if torch.cuda.is_available()
else "mps" if hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
else "cpu"
)
print(f"\nActive compute device: {device.upper()}")
# Quick GPU test
a = torch.randn(1000, 1000, device=device)
b = torch.randn(1000, 1000, device=device)
c = a @ b # matrix multiply — GPU should be 100x faster
print(f"Test matmul (1000x1000): OK — result shape {tuple(c.shape)}")
# HuggingFace
try:
import transformers
print(f"Transformers: {transformers.__version__}")
except ImportError:
print("Transformers: NOT installed — run: pip install transformers")
# OpenAI
try:
import openai
print(f"OpenAI SDK: {openai.__version__}")
except ImportError:
print("OpenAI: NOT installed — run: pip install openai")
print("\n✅ Environment check complete!")
return device
device = check_environment()
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Google Colab — free GPU (T4, A100 with Colab Pro)
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Open: colab.research.google.com
# Runtime → Change runtime type → GPU (T4)
# Free tier: ~12h sessions, resets state between sessions
# Colab Pro ($10/mo): A100 access, longer sessions, more RAM
# GPU VRAM requirements for different tasks:
gpu_requirements = {
"Train small CNN (CIFAR-10)": "4 GB VRAM",
"Fine-tune DistilBERT": "4-8 GB VRAM",
"Fine-tune Llama 3 8B (4-bit QLoRA)": "6-8 GB VRAM",
"Fine-tune Llama 3 70B (4-bit QLoRA)": "40 GB VRAM (A100)",
"Run Stable Diffusion XL inference": "8-12 GB VRAM",
"Train GPT-2 from scratch": "8 GB VRAM",
}
print("\nGPU VRAM requirements:")
for task, vram in gpu_requirements.items():
print(f" {task:45s} → {vram}")Tip
Tip
Practice Setting Up Your AI Development Environment in small, isolated examples before integrating into larger projects. Breaking concepts into small experiments builds genuine understanding faster than reading alone.
Modern NLP = Transformer-based. Pre-train, then fine-tune.
Practice Task
Note
Practice Task — (1) Write a working example of Setting Up Your AI Development Environment from scratch without looking at notes. (2) Modify it to handle an edge case (empty input, null value, or error state). (3) Share your solution in the Priygop community for feedback.
Quick Quiz
Common Mistake
Warning
A common mistake with Setting Up Your AI Development Environment is skipping edge case testing — empty inputs, null values, and unexpected data types. Always validate boundary conditions to write robust, production-ready ai code.