コンテンツにスキップ

AFL++

# Clone AFL++ repository
git clone https://github.com/AFLplusplus/AFLplusplus.git
cd AFLplusplus

# Install dependencies (Ubuntu/Debian)
sudo apt-get install build-essential python3-dev automake git flex bison libglib2.0-dev pkg-config

# Build AFL++
make distrib
sudo make install

# Verify installation
afl-fuzz -h
# Using official AFL++ Docker image
docker pull aflplusplus/aflplusplus:latest

# Run interactive container
docker run -it aflplusplus/aflplusplus:latest /bin/bash

# Mount local directories for fuzzing
docker run -it -v /path/to/target:/work aflplusplus/aflplusplus:latest
cd AFLplusplus
# Build QEMU mode for x86_64 targets
cd qemu_mode
./build_qemu_support.sh

# For ARM/other architectures
CPU_TARGET=arm ./build_qemu_support.sh
# Compile C program with AFL++
afl-cc -o target target.c

# Compile C++ program
afl-c++ -o target target.cpp

# With optimizations (recommended)
afl-cc -O3 -o target target.c

# With debugging symbols
afl-cc -g -O1 -o target target.c

# With ASAN (AddressSanitizer) for memory bugs
afl-cc -fsanitize=address -o target target.c
# Compile and link multiple files
afl-cc -o target main.c utils.c helper.c -lm

# Using make with AFL++
export CC=afl-cc
export CXX=afl-c++
make clean && make
# Faster, more efficient instrumentation
afl-cc -flto=full -o target target.c

# Check if LTO is available
afl-cc -flto=full -dumpspecs 2>&1 | grep -i lto
# Create input directory with seed samples
mkdir -p corpus/seeds
cd corpus/seeds

# Add minimal valid inputs
echo "test" > test1.txt
echo "hello" > test2.txt
# Extract seeds from real files
mkdir corpus
cp /path/to/example/files/*.txt corpus/

# For binary formats, capture network traffic or export samples
# From binaries: strings binary | head -1000 > corpus/sample
# AFL++ can start with empty corpus
mkdir empty_corpus

# Run fuzzer, which will generate initial tests
afl-fuzz -i empty_corpus -o findings ./target
# Simple fuzzing run
afl-fuzz -i corpus -o findings ./target

# Non-interactive mode (useful for scripts)
afl-fuzz -i corpus -o findings ./target @@

# Run with specific number of fuzz iterations
afl-fuzz -i corpus -o findings -N 10000 ./target
# Target reads from stdin
afl-fuzz -i corpus -o findings ./target

# Target reads from file (@@ replaced with input filename)
afl-fuzz -i corpus -o findings ./target @@

# Target reads from specific file argument
afl-fuzz -i corpus -o findings ./target -f /tmp/input
# Main fuzzer (worker 1)
afl-fuzz -i corpus -o findings -M main ./target

# Secondary fuzzer (worker 2)
afl-fuzz -i corpus -o findings -S worker2 ./target

# Multiple workers (4 total)
afl-fuzz -i corpus -o findings -M main ./target &
afl-fuzz -i corpus -o findings -S worker2 ./target &
afl-fuzz -i corpus -o findings -S worker3 ./target &
afl-fuzz -i corpus -o findings -S worker4 ./target &
# Memory limit (default 50MB)
afl-fuzz -i corpus -o findings -m 100 ./target

# Timeout per input (default 1000ms)
afl-fuzz -i corpus -o findings -t 5000 ./target

# CPU affinity (bind to specific cores)
afl-fuzz -i corpus -o findings -c 0,1,2,3 ./target

# Disable deterministic mutations (faster, less thorough)
afl-fuzz -i corpus -o findings -d ./target

# Extended memory map (for complex targets)
afl-fuzz -i corpus -o findings -x corpus/dictionary.txt ./target
# Build QEMU support first (see Installation)
# Then fuzz binary without source code

afl-fuzz -i corpus -o findings -Q ./binary_target

# For ARM binaries
afl-fuzz -i corpus -o findings -Q -m 100 ./arm_binary
# QEMU mode is ~2-5x slower than native instrumentation
# Increase timeout for QEMU targets
afl-fuzz -i corpus -o findings -Q -t 5000 ./binary

# Reduce memory overhead
afl-fuzz -i corpus -o findings -Q -m 50 ./binary
// target.c - Persistent mode example
#include <unistd.h>

int main() {
    while (__AFL_LOOP(1000)) {
        unsigned char buf[1024];
        ssize_t n = read(0, buf, sizeof(buf));
        if (n <= 0) break;
        
        // Process input
        process_input(buf, n);
    }
    return 0;
}
# Compile with AFL++ (supports __AFL_LOOP macro)
afl-cc -o persistent_target target.c

# Fuzz persistent target (no timeout needed per iteration)
afl-fuzz -i corpus -o findings ./persistent_target
# Enable shared memory mode (automatic in most cases)
afl-fuzz -i corpus -o findings ./target

# Check /dev/shm for AFL++ memory maps
ls -la /dev/shm/afl-*

# Increase shared memory if needed
echo "vm.max_map_count=262144" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p
// custom_mutator.c
#include "afl-cc.h"
#include <stdlib.h>

size_t afl_custom_init(void *data, unsigned int seed) {
    srand(seed);
    return 0;
}

size_t afl_custom_mutate(unsigned char *data, size_t size,
                         unsigned char *out, size_t max_size,
                         unsigned char *add_buf, size_t add_size) {
    // Custom mutation logic
    size_t out_size = size < max_size ? size : max_size;
    memcpy(out, data, out_size);
    
    // Flip a random bit
    if (out_size > 0) {
        out[rand() % out_size] ^= (1 << (rand() % 8));
    }
    
    return out_size;
}
# Compile mutator library
gcc -shared -fPIC -o custom_mutator.so custom_mutator.c

# Use with AFL++
afl-fuzz -i corpus -o findings -l custom_mutator.so ./target
# Crashes stored in findings/crashes directory
ls -la findings/crashes/

# Copy crash for analysis
cp findings/crashes/id:000000,sig:* /tmp/crash_input

# Reproduce crash manually
./target < /tmp/crash_input
./target /tmp/crash_input
# Use AFL++ crash analyzer
afl-tmin -i findings/crashes/id:000000* -o /tmp/minimized_crash -- ./target

# Run with GDB for detailed analysis
gdb --args ./target /tmp/crash_input
(gdb) run
(gdb) bt  # Backtrace
(gdb) info locals
# Reduce crash input to minimal size
afl-tmin -i crash_input -o minimized_input -- ./target

# For cases with file argument
afl-tmin -i crash_input -o minimized_input -- ./target @@

# Merge crash minimization with timeout
afl-tmin -i crash_input -o minimized_input -t 5000 -- ./target
# Create coverage-instrumented build
CC=afl-cc afl-cc -fprofile-arcs -ftest-coverage -o target_cov target.c

# Run fuzzer (generates .gcda coverage data)
afl-fuzz -i corpus -o findings ./target_cov

# Generate coverage report with gcov
gcov target.c
cat target.c.gcov
# Install lcov
sudo apt-get install lcov

# Create baseline coverage
lcov --capture --initial --directory . --output-file coverage_base.info

# Run fuzzer, then capture coverage
lcov --capture --directory . --output-file coverage_test.info

# Combine and generate HTML
lcov --add-tracefile coverage_base.info --add-tracefile coverage_test.info \
     --output-file coverage_total.info
genhtml coverage_total.info --output-directory coverage_report
# Create dictionary file (one token per line)
cat > mydict.txt << 'EOF'
"GET "
"HTTP/1.1"
"Content-Length:"
"\r\n"
"400"
"500"
EOF
# Use dictionary during fuzzing
afl-fuzz -i corpus -o findings -x mydict.txt ./target

# AFL++ also auto-extracts tokens from crashing inputs
afl-fuzz -i corpus -o findings ./target  # Builds tokens automatically
# Default schedule (implicit)
afl-fuzz -i corpus -o findings ./target

# Coverage-guided schedule (explore new paths)
afl-fuzz -i corpus -o findings -p cov ./target

# Fast schedule (speed-optimized)
afl-fuzz -i corpus -o findings -p fast ./target

# Rare schedule (focus on rare behavior)
afl-fuzz -i corpus -o findings -p rare ./target

# Exponential schedule (aggressive exploration)
afl-fuzz -i corpus -o findings -p exp ./target
# Disable crash reporting (faster)
export AFL_SKIP_CRASHES=1
afl-fuzz -i corpus -o findings ./target

# Disable CPU frequency scaling issues
echo performance | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor

# Increase file descriptor limit
ulimit -n 65536

# Disable memory protection for speed
export AFL_DISABLE_TRIM=1
afl-fuzz -i corpus -o findings ./target
# Compile with aggressive optimization
afl-cc -O3 -march=native -o target target.c

# Use LTO mode (faster, better coverage)
afl-cc -flto=full -O3 -o target target.c

# Minimize target size (remove debug symbols in release)
afl-cc -O3 -s -o target target.c
# Minimize corpus before fuzzing (faster seeds)
afl-cmin -i corpus -o corpus_min -- ./target

# Merge duplicate inputs
afl-cmin -i findings/queue -o reduced_queue -- ./target

# Extract only new coverage paths
afl-fuzz -i corpus -o findings -c ./target  # Collect coverage
# Interactive status display (during fuzzing)
# Shown automatically during run, displays:
# - Cycles completed
# - Execs/sec (executions per second)
# - Corpus size
# - Crashes found
# - Hangs detected

# After fuzzing, check results
ls -la findings/
find findings/crashes -type f | wc -l  # Count crashes
find findings/hangs -type f | wc -l    # Count hangs
# Summary of findings
echo "=== Fuzzing Results ===" 
echo "Crashes: $(ls findings/crashes/ 2>/dev/null | wc -l)"
echo "Hangs: $(ls findings/hangs/ 2>/dev/null | wc -l)"
echo "Corpus size: $(ls findings/queue/ 2>/dev/null | wc -l)"

# Reproduce all crashes
for crash in findings/crashes/*; do
    ./target < "$crash" 2>&1 | head -5
done