# locustfile.py - Minimal examplefromlocustimportHttpUser,task,betweenclassWebsiteUser(HttpUser):wait_time=between(1,5)# Wait 1-5 seconds between tasks@taskdefindex_page(self):self.client.get("/")@task(3)# 3x more likely than other tasksdefview_item(self):self.client.get("/item/123")
fromlocustimportHttpUser,task,betweenimportrandomclassAuthenticatedUser(HttpUser):wait_time=between(1,3)defon_start(self):"""Called when user starts - login here"""response=self.client.post("/login",json={"username":"testuser","password":"password123"})self.token=response.json()["token"]@taskdefprotected_endpoint(self):headers={"Authorization":f"Bearer {self.token}"}self.client.get("/api/protected",headers=headers)@task(2)defcreate_resource(self):headers={"Authorization":f"Bearer {self.token}"}self.client.post("/api/items",json={"name":"Test","value":random.randint(1,100)},headers=headers)
fromlocustimportLoadTestShapeclassStagesLoadShape(LoadTestShape):""" Custom load pattern with stages: - Ramp to 100 users over 60s - Hold at 100 for 120s - Ramp to 500 over 60s - Hold at 500 for 180s """stages=[{"duration":60,"users":100,"spawn_rate":10},{"duration":180,"users":100,"spawn_rate":10},{"duration":240,"users":500,"spawn_rate":50},{"duration":420,"users":500,"spawn_rate":50},]deftick(self):run_time=self.get_run_time()forstageinself.stages:ifrun_time<stage["duration"]:return(stage["users"],stage["spawn_rate"])returnNone
# Set environment variables for LocustexportLOCUST_LOCUSTFILE=locustfile.py
exportLOCUST_HOST=https://api.example.com
exportLOCUST_USERS=1000exportLOCUST_SPAWN_RATE=100exportLOCUST_RUN_TIME=30m
exportLOCUST_HEADLESS=true# Run with environment variableslocust
# Create locustfile for API testingcat>api_test.py<< 'EOF'from locust import HttpUser, task, betweenclass APIUser(HttpUser): wait_time = between(1, 2) def on_start(self): # Authenticate once per user response = self.client.post("/api/auth/login", json={ "username": "testuser", "password": "testpass" }) self.token = response.json()["access_token"] @task(3) def get_users(self): self.client.get("/api/users", headers={"Authorization": f"Bearer {self.token}"}) @task(1) def create_user(self): self.client.post("/api/users", json={"name": "New User", "email": "test@example.com"}, headers={"Authorization": f"Bearer {self.token}"})EOF# Run the testlocust-fapi_test.py--headless--host=https://api.example.com\-u500-r50-t10m--html=api_report.html
Use Case 2: Distributed Load Testing Across Multiple Machines¶
# On master machine (192.168.1.100)locust-flocustfile.py--master--master-bind-host=0.0.0.0\--expect-workers=3--web-host=0.0.0.0
# On worker machine 1locust-flocustfile.py--worker--master-host=192.168.1.100
# On worker machine 2locust-flocustfile.py--worker--master-host=192.168.1.100
# On worker machine 3locust-flocustfile.py--worker--master-host=192.168.1.100
# Access web UI from any machine# http://192.168.1.100:8089
Use Case 3: CI/CD Integration with Automated Testing¶
# Create test script for CI/CD pipelinecat>run_load_test.sh<< 'EOF'#!/bin/bash# Run load test and capture exit codelocust -f locustfile.py --headless \ --host=https://staging.example.com \ -u 1000 -r 100 -t 5m \ --html=report.html \ --csv=results \ --exit-code-on-error 1# Check if test passedif [ $? -eq 0 ]; then echo "Load test passed" exit 0else echo "Load test failed" exit 1fiEOFchmod+xrun_load_test.sh
./run_load_test.sh
Use realistic wait times: Set wait_time = between(1, 5) to simulate real user behavior with pauses between actions, avoiding unrealistic constant hammering
Implement proper authentication: Use on_start() method to authenticate once per user rather than on every request, reducing overhead and mimicking real sessions
Tag your tasks: Use @tag('critical', 'api') decorators to organize tests and run specific subsets during development or targeted testing
Monitor resource usage: Watch CPU and memory on both Locust machines and target servers; Locust workers should use <80% CPU for accurate results
Start with small loads: Begin tests with 10-50 users to verify test logic works correctly before scaling to thousands of concurrent users
Use distributed mode for scale: Single machine limited to ~5000-10000 users; use master-worker setup to simulate larger loads across multiple machines
Implement proper error handling: Use response.failure() to mark failed requests and catch exceptions to prevent test crashes from stopping load generation
Version control your tests: Store locustfiles in Git alongside application code, treating performance tests as first-class citizens in your testing strategy
Set realistic spawn rates: Don't spawn all users instantly; use gradual ramp-up (10-100 users/sec) to avoid overwhelming systems and getting false failures
Generate reports for analysis: Always use --html and --csv flags to capture results for post-test analysis and historical comparison