566 lines
19 KiB
Python
566 lines
19 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Economy Optimizer for Village Simulation
|
|
|
|
This script runs multiple simulations with different configurations to find
|
|
optimal parameters for a balanced, active economy with:
|
|
- Active trading
|
|
- Diverse resource production (including hunting)
|
|
- Good survival rates
|
|
- Wealth accumulation and circulation
|
|
|
|
Usage:
|
|
python tools/optimize_economy.py [--iterations 20] [--steps 500]
|
|
"""
|
|
|
|
import argparse
|
|
import json
|
|
import random
|
|
import re
|
|
import sys
|
|
from collections import defaultdict
|
|
from dataclasses import dataclass, field
|
|
from datetime import datetime
|
|
from pathlib import Path
|
|
from typing import Optional
|
|
|
|
# Add parent directory for imports
|
|
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
|
|
from backend.config import get_config, reload_config
|
|
from backend.core.engine import GameEngine
|
|
from backend.core.logger import reset_simulation_logger
|
|
from backend.domain.action import reset_action_config_cache
|
|
from backend.domain.resources import reset_resource_cache
|
|
|
|
|
|
@dataclass
|
|
class SimulationMetrics:
|
|
"""Metrics collected from a simulation run."""
|
|
total_turns: int = 0
|
|
completed_trades: int = 0
|
|
market_listings: int = 0
|
|
total_deaths: int = 0
|
|
final_population: int = 0
|
|
|
|
# Action diversity
|
|
hunt_actions: int = 0
|
|
gather_actions: int = 0
|
|
chop_wood_actions: int = 0
|
|
get_water_actions: int = 0
|
|
trade_actions: int = 0
|
|
trade_success: int = 0
|
|
|
|
# Resource diversity
|
|
meat_produced: int = 0
|
|
berries_produced: int = 0
|
|
wood_produced: int = 0
|
|
water_produced: int = 0
|
|
|
|
# Trade diversity (actual completed trades)
|
|
trades_meat: int = 0
|
|
trades_berries: int = 0
|
|
trades_wood: int = 0
|
|
trades_water: int = 0
|
|
|
|
# Economy
|
|
total_trade_value: int = 0
|
|
|
|
@property
|
|
def hunt_ratio(self) -> float:
|
|
"""Ratio of hunting to gathering."""
|
|
total_food = self.hunt_actions + self.gather_actions
|
|
return self.hunt_actions / total_food if total_food > 0 else 0
|
|
|
|
@property
|
|
def trade_success_rate(self) -> float:
|
|
"""Success rate of trade actions."""
|
|
return self.trade_success / self.trade_actions if self.trade_actions > 0 else 0
|
|
|
|
@property
|
|
def survival_rate(self) -> float:
|
|
"""Percentage of agents surviving."""
|
|
initial = 10 # Assuming 10 initial agents
|
|
return self.final_population / initial if initial > 0 else 0
|
|
|
|
@property
|
|
def trades_per_turn(self) -> float:
|
|
"""Average trades per turn."""
|
|
return self.completed_trades / self.total_turns if self.total_turns > 0 else 0
|
|
|
|
@property
|
|
def trade_diversity(self) -> float:
|
|
"""How diverse are traded items (0-1 score)."""
|
|
trades = [self.trades_meat, self.trades_berries, self.trades_wood, self.trades_water]
|
|
total = sum(trades)
|
|
if total == 0:
|
|
return 0
|
|
# Entropy-like measure: best is when all 4 are equal
|
|
proportions = [t / total for t in trades if t > 0]
|
|
if len(proportions) <= 1:
|
|
return 0.25 * len(proportions)
|
|
# Simple diversity: count how many resources are traded
|
|
return len([t for t in trades if t > 0]) / 4
|
|
|
|
@property
|
|
def production_diversity(self) -> float:
|
|
"""How diverse is resource production."""
|
|
prod = [self.meat_produced, self.berries_produced, self.wood_produced, self.water_produced]
|
|
total = sum(prod)
|
|
if total == 0:
|
|
return 0
|
|
return len([p for p in prod if p > 0]) / 4
|
|
|
|
def score(self) -> float:
|
|
"""Calculate overall economy health score (0-100)."""
|
|
score = 0
|
|
|
|
# Trading activity (0-25 points)
|
|
# Target: at least 0.5 trades per turn
|
|
trade_score = min(25, self.trades_per_turn * 50)
|
|
score += trade_score
|
|
|
|
# Trade diversity (0-20 points)
|
|
# Target: all 4 resource types being traded
|
|
score += self.trade_diversity * 20
|
|
|
|
# Hunt ratio (0-15 points)
|
|
# Target: at least 20% hunting
|
|
hunt_score = min(15, self.hunt_ratio * 75)
|
|
score += hunt_score
|
|
|
|
# Survival rate (0-20 points)
|
|
# Target: at least 50% survival
|
|
survival_score = min(20, self.survival_rate * 40)
|
|
score += survival_score
|
|
|
|
# Trade success rate (0-10 points)
|
|
# Target: at least 50% success
|
|
trade_success_score = min(10, self.trade_success_rate * 20)
|
|
score += trade_success_score
|
|
|
|
# Production diversity (0-10 points)
|
|
score += self.production_diversity * 10
|
|
|
|
return score
|
|
|
|
|
|
def run_quick_simulation(config_overrides: dict, num_steps: int = 500, num_agents: int = 10) -> SimulationMetrics:
|
|
"""Run a simulation with custom config and return metrics."""
|
|
# Apply config overrides
|
|
config_path = Path("config.json")
|
|
with open(config_path) as f:
|
|
config = json.load(f)
|
|
|
|
# Deep merge overrides
|
|
for section, values in config_overrides.items():
|
|
if section in config:
|
|
config[section].update(values)
|
|
else:
|
|
config[section] = values
|
|
|
|
# Save temp config
|
|
temp_config = Path("config_temp.json")
|
|
with open(temp_config, 'w') as f:
|
|
json.dump(config, f, indent=2)
|
|
|
|
# Reload config
|
|
reload_config(str(temp_config))
|
|
reset_action_config_cache()
|
|
reset_resource_cache()
|
|
|
|
# Initialize engine
|
|
engine = GameEngine._instance = None # Reset singleton
|
|
engine = GameEngine()
|
|
engine.reset()
|
|
engine.world.config.initial_agents = num_agents
|
|
engine.world.initialize()
|
|
|
|
# Suppress logging
|
|
import logging
|
|
logging.getLogger("simulation").setLevel(logging.ERROR)
|
|
|
|
metrics = SimulationMetrics()
|
|
|
|
# Run simulation
|
|
for step in range(num_steps):
|
|
if not engine.is_running:
|
|
break
|
|
|
|
turn_log = engine.next_step()
|
|
metrics.total_turns += 1
|
|
|
|
# Process actions
|
|
for action_data in turn_log.agent_actions:
|
|
decision = action_data.get("decision", {})
|
|
result = action_data.get("result", {})
|
|
action_type = decision.get("action", "")
|
|
|
|
# Count actions
|
|
if action_type == "hunt":
|
|
metrics.hunt_actions += 1
|
|
elif action_type == "gather":
|
|
metrics.gather_actions += 1
|
|
elif action_type == "chop_wood":
|
|
metrics.chop_wood_actions += 1
|
|
elif action_type == "get_water":
|
|
metrics.get_water_actions += 1
|
|
elif action_type == "trade":
|
|
metrics.trade_actions += 1
|
|
if result and result.get("success"):
|
|
metrics.trade_success += 1
|
|
|
|
# Parse trade message
|
|
message = result.get("message", "")
|
|
if "Listed" in message:
|
|
metrics.market_listings += 1
|
|
elif "Bought" in message:
|
|
match = re.search(r"Bought (\d+) (\w+) for (\d+)c", message)
|
|
if match:
|
|
qty = int(match.group(1))
|
|
res = match.group(2)
|
|
value = int(match.group(3))
|
|
metrics.completed_trades += 1
|
|
metrics.total_trade_value += value
|
|
|
|
if res == "meat":
|
|
metrics.trades_meat += 1
|
|
elif res == "berries":
|
|
metrics.trades_berries += 1
|
|
elif res == "wood":
|
|
metrics.trades_wood += 1
|
|
elif res == "water":
|
|
metrics.trades_water += 1
|
|
|
|
# Track production
|
|
if result and result.get("success"):
|
|
for res in result.get("resources_gained", []):
|
|
res_type = res.get("type", "")
|
|
qty = res.get("quantity", 0)
|
|
if res_type == "meat":
|
|
metrics.meat_produced += qty
|
|
elif res_type == "berries":
|
|
metrics.berries_produced += qty
|
|
elif res_type == "wood":
|
|
metrics.wood_produced += qty
|
|
elif res_type == "water":
|
|
metrics.water_produced += qty
|
|
|
|
# Process deaths
|
|
metrics.total_deaths += len(turn_log.deaths)
|
|
|
|
metrics.final_population = len(engine.world.get_living_agents())
|
|
|
|
# Cleanup
|
|
engine.logger.close()
|
|
temp_config.unlink(missing_ok=True)
|
|
|
|
return metrics
|
|
|
|
|
|
def generate_random_config() -> dict:
|
|
"""Generate a random configuration to test."""
|
|
return {
|
|
"agent_stats": {
|
|
"start_hunger": random.randint(70, 90),
|
|
"start_thirst": random.randint(60, 80),
|
|
"hunger_decay": random.randint(1, 3),
|
|
"thirst_decay": random.randint(2, 4),
|
|
"heat_decay": random.randint(1, 3),
|
|
},
|
|
"resources": {
|
|
"meat_hunger": random.randint(30, 50),
|
|
"berries_hunger": random.randint(8, 15),
|
|
"water_thirst": random.randint(40, 60),
|
|
},
|
|
"actions": {
|
|
"hunt_energy": random.randint(-9, -5),
|
|
"gather_energy": random.randint(-5, -3),
|
|
"hunt_success": round(random.uniform(0.6, 0.9), 2),
|
|
"hunt_meat_min": random.randint(2, 3),
|
|
"hunt_meat_max": random.randint(4, 6),
|
|
},
|
|
"economy": {
|
|
"energy_to_money_ratio": round(random.uniform(1.0, 2.0), 2),
|
|
"buy_efficiency_threshold": round(random.uniform(0.6, 0.9), 2),
|
|
"wealth_desire": round(random.uniform(0.2, 0.5), 2),
|
|
"min_wealth_target": random.randint(30, 80),
|
|
},
|
|
}
|
|
|
|
|
|
def mutate_config(config: dict, mutation_rate: float = 0.3) -> dict:
|
|
"""Mutate a configuration slightly."""
|
|
new_config = json.loads(json.dumps(config)) # Deep copy
|
|
|
|
for section, values in new_config.items():
|
|
for key, value in values.items():
|
|
if random.random() < mutation_rate:
|
|
if isinstance(value, int):
|
|
# Mutate by ±20%
|
|
delta = max(1, abs(value) // 5)
|
|
new_config[section][key] = value + random.randint(-delta, delta)
|
|
elif isinstance(value, float):
|
|
# Mutate by ±10%
|
|
delta = abs(value) * 0.1
|
|
new_config[section][key] = round(value + random.uniform(-delta, delta), 2)
|
|
|
|
return new_config
|
|
|
|
|
|
def crossover_configs(config1: dict, config2: dict) -> dict:
|
|
"""Crossover two configurations."""
|
|
new_config = {}
|
|
for section in set(config1.keys()) | set(config2.keys()):
|
|
if section in config1 and section in config2:
|
|
new_config[section] = {}
|
|
for key in set(config1[section].keys()) | set(config2[section].keys()):
|
|
if random.random() < 0.5:
|
|
if key in config1[section]:
|
|
new_config[section][key] = config1[section][key]
|
|
else:
|
|
if key in config2[section]:
|
|
new_config[section][key] = config2[section][key]
|
|
elif section in config1:
|
|
new_config[section] = config1[section].copy()
|
|
else:
|
|
new_config[section] = config2[section].copy()
|
|
return new_config
|
|
|
|
|
|
def print_metrics(metrics: SimulationMetrics, config: dict = None):
|
|
"""Print metrics in a nice format."""
|
|
print(f"\n 📊 Score: {metrics.score():.1f}/100")
|
|
print(f" ├─ Trades: {metrics.completed_trades} ({metrics.trades_per_turn:.2f}/turn)")
|
|
print(f" ├─ Trade diversity: {metrics.trade_diversity*100:.0f}%")
|
|
print(f" │ └─ meat:{metrics.trades_meat} berries:{metrics.trades_berries} wood:{metrics.trades_wood} water:{metrics.trades_water}")
|
|
print(f" ├─ Hunt ratio: {metrics.hunt_ratio*100:.1f}% ({metrics.hunt_actions}/{metrics.hunt_actions + metrics.gather_actions})")
|
|
print(f" ├─ Survival: {metrics.survival_rate*100:.0f}% ({metrics.final_population} alive)")
|
|
print(f" └─ Trade success: {metrics.trade_success_rate*100:.0f}%")
|
|
|
|
|
|
def optimize_economy(iterations: int = 20, steps_per_sim: int = 500, population_size: int = 6):
|
|
"""Run genetic optimization to find best config."""
|
|
print("\n" + "=" * 70)
|
|
print("🧬 ECONOMY OPTIMIZER - Genetic Algorithm")
|
|
print("=" * 70)
|
|
print(f" Iterations: {iterations}")
|
|
print(f" Steps per simulation: {steps_per_sim}")
|
|
print(f" Population size: {population_size}")
|
|
print("=" * 70)
|
|
|
|
# Initialize population with random configs
|
|
population = []
|
|
|
|
# Include a "sane defaults" config as baseline
|
|
baseline_config = {
|
|
"agent_stats": {
|
|
"start_hunger": 80,
|
|
"start_thirst": 70,
|
|
"hunger_decay": 2,
|
|
"thirst_decay": 3,
|
|
"heat_decay": 2,
|
|
},
|
|
"resources": {
|
|
"meat_hunger": 40,
|
|
"berries_hunger": 10,
|
|
"water_thirst": 50,
|
|
},
|
|
"actions": {
|
|
"hunt_energy": -6, # Reduced from -7
|
|
"gather_energy": -4,
|
|
"hunt_success": 0.80, # Increased from 0.75
|
|
"hunt_meat_min": 3, # Increased from 2
|
|
"hunt_meat_max": 5, # Increased from 4
|
|
},
|
|
"economy": {
|
|
"energy_to_money_ratio": 1.2,
|
|
"buy_efficiency_threshold": 0.85, # More willing to buy
|
|
"wealth_desire": 0.25,
|
|
"min_wealth_target": 40,
|
|
},
|
|
}
|
|
population.append(baseline_config)
|
|
|
|
# Add more hunting-focused config
|
|
hunt_focused = {
|
|
"agent_stats": {
|
|
"start_hunger": 75,
|
|
"start_thirst": 65,
|
|
"hunger_decay": 2,
|
|
"thirst_decay": 3,
|
|
"heat_decay": 2,
|
|
},
|
|
"resources": {
|
|
"meat_hunger": 50, # More valuable meat
|
|
"meat_energy": 15, # More energy from meat
|
|
"berries_hunger": 8, # Less valuable berries
|
|
"water_thirst": 50,
|
|
},
|
|
"actions": {
|
|
"hunt_energy": -5, # Cheaper hunting
|
|
"gather_energy": -5, # Same as hunting
|
|
"hunt_success": 0.85, # Higher success
|
|
"hunt_meat_min": 3,
|
|
"hunt_meat_max": 5,
|
|
"hunt_hide_min": 1, # Always get hide
|
|
"hunt_hide_max": 2,
|
|
},
|
|
"economy": {
|
|
"energy_to_money_ratio": 1.5,
|
|
"buy_efficiency_threshold": 0.9, # Very willing to buy
|
|
"wealth_desire": 0.3,
|
|
"min_wealth_target": 30,
|
|
},
|
|
}
|
|
population.append(hunt_focused)
|
|
|
|
# Fill rest with random
|
|
while len(population) < population_size:
|
|
population.append(generate_random_config())
|
|
|
|
best_config = None
|
|
best_score = 0
|
|
best_metrics = None
|
|
|
|
for gen in range(iterations):
|
|
print(f"\n📍 Generation {gen + 1}/{iterations}")
|
|
print("-" * 40)
|
|
|
|
# Evaluate all configs
|
|
scored_population = []
|
|
for i, config in enumerate(population):
|
|
sys.stdout.write(f"\r Evaluating config {i + 1}/{len(population)}...")
|
|
sys.stdout.flush()
|
|
|
|
metrics = run_quick_simulation(config, steps_per_sim)
|
|
score = metrics.score()
|
|
scored_population.append((config, metrics, score))
|
|
|
|
# Sort by score
|
|
scored_population.sort(key=lambda x: x[2], reverse=True)
|
|
|
|
# Print top results
|
|
print(f"\r Top configs this generation:")
|
|
for i, (config, metrics, score) in enumerate(scored_population[:3]):
|
|
print(f"\n #{i + 1}: Score {score:.1f}")
|
|
print_metrics(metrics)
|
|
|
|
# Track best overall
|
|
if scored_population[0][2] > best_score:
|
|
best_config = scored_population[0][0]
|
|
best_score = scored_population[0][2]
|
|
best_metrics = scored_population[0][1]
|
|
print(f"\n ⭐ New best score: {best_score:.1f}")
|
|
|
|
# Create next generation
|
|
new_population = []
|
|
|
|
# Keep top 2 (elitism)
|
|
new_population.append(scored_population[0][0])
|
|
new_population.append(scored_population[1][0])
|
|
|
|
# Crossover and mutate
|
|
while len(new_population) < population_size:
|
|
# Select parents (tournament selection)
|
|
parent1 = random.choice(scored_population[:3])[0]
|
|
parent2 = random.choice(scored_population[:4])[0]
|
|
|
|
# Crossover
|
|
child = crossover_configs(parent1, parent2)
|
|
|
|
# Mutate
|
|
child = mutate_config(child, mutation_rate=0.25)
|
|
|
|
new_population.append(child)
|
|
|
|
population = new_population
|
|
|
|
print("\n" + "=" * 70)
|
|
print("🏆 OPTIMIZATION COMPLETE")
|
|
print("=" * 70)
|
|
|
|
print(f"\n Best Score: {best_score:.1f}/100")
|
|
print_metrics(best_metrics)
|
|
|
|
print("\n 📝 Best Configuration:")
|
|
print("-" * 40)
|
|
print(json.dumps(best_config, indent=2))
|
|
|
|
# Save best config
|
|
output_path = Path("config_optimized.json")
|
|
|
|
# Merge with original config
|
|
with open("config.json") as f:
|
|
full_config = json.load(f)
|
|
|
|
for section, values in best_config.items():
|
|
if section in full_config:
|
|
full_config[section].update(values)
|
|
else:
|
|
full_config[section] = values
|
|
|
|
with open(output_path, 'w') as f:
|
|
json.dump(full_config, f, indent=2)
|
|
|
|
print(f"\n ✅ Saved to: {output_path}")
|
|
print("\n To apply: cp config_optimized.json config.json")
|
|
|
|
return best_config, best_metrics
|
|
|
|
|
|
def quick_test_config(config_overrides: dict, steps: int = 300):
|
|
"""Quickly test a specific config."""
|
|
print("\n🧪 Testing configuration...")
|
|
print("-" * 40)
|
|
print(json.dumps(config_overrides, indent=2))
|
|
print("-" * 40)
|
|
|
|
metrics = run_quick_simulation(config_overrides, steps)
|
|
print_metrics(metrics)
|
|
return metrics
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(description="Optimize Village Simulation economy")
|
|
parser.add_argument("--iterations", "-i", type=int, default=15, help="Optimization iterations")
|
|
parser.add_argument("--steps", "-s", type=int, default=400, help="Steps per simulation")
|
|
parser.add_argument("--population", "-p", type=int, default=6, help="Population size for GA")
|
|
parser.add_argument("--quick-test", "-q", action="store_true", help="Quick test of preset config")
|
|
|
|
args = parser.parse_args()
|
|
|
|
if args.quick_test:
|
|
# Test a specific configuration
|
|
test_config = {
|
|
"agent_stats": {
|
|
"start_hunger": 75,
|
|
"hunger_decay": 2,
|
|
"thirst_decay": 3,
|
|
},
|
|
"resources": {
|
|
"meat_hunger": 45,
|
|
"meat_energy": 12,
|
|
"berries_hunger": 8,
|
|
},
|
|
"actions": {
|
|
"hunt_energy": -5,
|
|
"gather_energy": -5,
|
|
"hunt_success": 0.85,
|
|
"hunt_meat_min": 3,
|
|
"hunt_meat_max": 5,
|
|
},
|
|
"economy": {
|
|
"buy_efficiency_threshold": 0.9,
|
|
"min_wealth_target": 30,
|
|
},
|
|
}
|
|
quick_test_config(test_config, args.steps)
|
|
else:
|
|
optimize_economy(args.iterations, args.steps, args.population)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|
|
|