[upd] Enhance performance configuration and logging optimizations in simulation engine
This commit is contained in:
parent
d190e3efe5
commit
b1b256e520
@ -250,9 +250,28 @@ class SinksConfig:
|
||||
fire_wood_cost_per_night: int = 1 # Wood consumed to stay warm at night
|
||||
|
||||
|
||||
@dataclass
|
||||
class PerformanceConfig:
|
||||
"""Configuration for performance optimization.
|
||||
|
||||
Controls logging and memory usage to keep simulation fast at high turn counts.
|
||||
"""
|
||||
# Logging control
|
||||
logging_enabled: bool = False # Enable file logging (disable for speed)
|
||||
detailed_logging: bool = False # Enable verbose per-agent logging
|
||||
log_flush_interval: int = 50 # Flush logs every N turns (not every turn)
|
||||
|
||||
# Memory management
|
||||
max_turn_logs: int = 100 # Keep only last N turn logs in memory
|
||||
|
||||
# Statistics calculation frequency
|
||||
stats_update_interval: int = 10 # Update expensive stats every N turns
|
||||
|
||||
|
||||
@dataclass
|
||||
class SimulationConfig:
|
||||
"""Master configuration containing all sub-configs."""
|
||||
performance: PerformanceConfig = field(default_factory=PerformanceConfig)
|
||||
agent_stats: AgentStatsConfig = field(default_factory=AgentStatsConfig)
|
||||
resources: ResourceConfig = field(default_factory=ResourceConfig)
|
||||
actions: ActionConfig = field(default_factory=ActionConfig)
|
||||
@ -270,6 +289,7 @@ class SimulationConfig:
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary."""
|
||||
return {
|
||||
"performance": asdict(self.performance),
|
||||
"ai": asdict(self.ai),
|
||||
"agent_stats": asdict(self.agent_stats),
|
||||
"resources": asdict(self.resources),
|
||||
@ -287,6 +307,7 @@ class SimulationConfig:
|
||||
def from_dict(cls, data: dict) -> "SimulationConfig":
|
||||
"""Create from dictionary."""
|
||||
return cls(
|
||||
performance=PerformanceConfig(**data.get("performance", {})),
|
||||
ai=AIConfig(**data.get("ai", {})),
|
||||
agent_stats=AgentStatsConfig(**data.get("agent_stats", {})),
|
||||
resources=ResourceConfig(**data.get("resources", {})),
|
||||
@ -387,3 +408,9 @@ def _reset_all_caches() -> None:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
try:
|
||||
from backend.core.ai import reset_ai_config_cache
|
||||
reset_ai_config_cache()
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
@ -101,18 +101,34 @@ def get_energy_cost(resource_type: ResourceType) -> int:
|
||||
return int(energy_cost / avg_output)
|
||||
|
||||
|
||||
# Cached config values to avoid repeated lookups
|
||||
_cached_ai_config = None
|
||||
_cached_economy_config = None
|
||||
|
||||
|
||||
def _get_ai_config():
|
||||
"""Get AI-relevant configuration values."""
|
||||
from backend.config import get_config
|
||||
config = get_config()
|
||||
return config.agent_stats
|
||||
"""Get AI-relevant configuration values (cached)."""
|
||||
global _cached_ai_config
|
||||
if _cached_ai_config is None:
|
||||
from backend.config import get_config
|
||||
_cached_ai_config = get_config().agent_stats
|
||||
return _cached_ai_config
|
||||
|
||||
|
||||
def _get_economy_config():
|
||||
"""Get economy/market configuration values."""
|
||||
from backend.config import get_config
|
||||
config = get_config()
|
||||
return getattr(config, 'economy', None)
|
||||
"""Get economy/market configuration values (cached)."""
|
||||
global _cached_economy_config
|
||||
if _cached_economy_config is None:
|
||||
from backend.config import get_config
|
||||
_cached_economy_config = getattr(get_config(), 'economy', None)
|
||||
return _cached_economy_config
|
||||
|
||||
|
||||
def reset_ai_config_cache():
|
||||
"""Reset the cached config values (call after config reload)."""
|
||||
global _cached_ai_config, _cached_economy_config
|
||||
_cached_ai_config = None
|
||||
_cached_economy_config = None
|
||||
|
||||
|
||||
class AgentAI:
|
||||
|
||||
@ -299,7 +299,12 @@ class GameEngine:
|
||||
self.is_running = False
|
||||
self.logger.close()
|
||||
|
||||
# Keep turn_logs bounded to prevent memory growth
|
||||
max_logs = get_config().performance.max_turn_logs
|
||||
self.turn_logs.append(turn_log)
|
||||
if len(self.turn_logs) > max_logs:
|
||||
# Remove oldest logs, keep only recent ones
|
||||
self.turn_logs = self.turn_logs[-max_logs:]
|
||||
return turn_log
|
||||
|
||||
def _mark_dead_agents(self, current_turn: int) -> list[Agent]:
|
||||
@ -342,6 +347,8 @@ class GameEngine:
|
||||
|
||||
for agent in to_remove:
|
||||
self.world.agents.remove(agent)
|
||||
# Remove from index as well
|
||||
self.world._agent_index.pop(agent.id, None)
|
||||
# Note: death was already recorded in _mark_dead_agents
|
||||
|
||||
return to_remove
|
||||
|
||||
@ -1,4 +1,7 @@
|
||||
"""Simulation logger for detailed step-by-step logging."""
|
||||
"""Simulation logger for detailed step-by-step logging.
|
||||
|
||||
Performance-optimized: logging can be disabled or reduced via config.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
@ -58,61 +61,77 @@ class TurnLogEntry:
|
||||
|
||||
|
||||
class SimulationLogger:
|
||||
"""Logger that dumps detailed simulation data to files."""
|
||||
"""Logger that dumps detailed simulation data to files.
|
||||
|
||||
Performance optimized:
|
||||
- Logging can be disabled entirely via config
|
||||
- File flushing is batched (not every turn)
|
||||
- Agent lookups use O(1) dict instead of O(n) list search
|
||||
- No in-memory accumulation of all entries
|
||||
"""
|
||||
|
||||
def __init__(self, log_dir: str = "logs"):
|
||||
self.log_dir = Path(log_dir)
|
||||
|
||||
# Load performance config
|
||||
from backend.config import get_config
|
||||
perf_config = get_config().performance
|
||||
self.logging_enabled = perf_config.logging_enabled
|
||||
self.detailed_logging = perf_config.detailed_logging
|
||||
self.flush_interval = perf_config.log_flush_interval
|
||||
|
||||
# File handles (only created if logging enabled)
|
||||
self._json_file: Optional[TextIO] = None
|
||||
self._summary_file: Optional[TextIO] = None
|
||||
|
||||
# Standard Python logging (minimal overhead even when enabled)
|
||||
self.logger = logging.getLogger("simulation")
|
||||
self.logger.setLevel(logging.WARNING) # Only warnings by default
|
||||
|
||||
# Current turn tracking
|
||||
self._current_entry: Optional[TurnLogEntry] = None
|
||||
# O(1) lookup for agent entries by ID
|
||||
self._agent_entry_map: dict[str, AgentLogEntry] = {}
|
||||
|
||||
# Turn counter for flush batching
|
||||
self._turns_since_flush = 0
|
||||
|
||||
def start_session(self, config: dict) -> None:
|
||||
"""Start a new logging session."""
|
||||
if not self.logging_enabled:
|
||||
return
|
||||
|
||||
self.log_dir.mkdir(exist_ok=True)
|
||||
|
||||
# Create session-specific log file
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
self.session_file = self.log_dir / f"sim_{timestamp}.jsonl"
|
||||
self.summary_file = self.log_dir / f"sim_{timestamp}_summary.txt"
|
||||
session_file = self.log_dir / f"sim_{timestamp}.jsonl"
|
||||
summary_file = self.log_dir / f"sim_{timestamp}_summary.txt"
|
||||
|
||||
# File handles
|
||||
self._json_file: Optional[TextIO] = None
|
||||
self._summary_file: Optional[TextIO] = None
|
||||
|
||||
# Also set up standard Python logging
|
||||
self.logger = logging.getLogger("simulation")
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
|
||||
# File handler for detailed logs
|
||||
file_handler = logging.FileHandler(self.log_dir / f"sim_{timestamp}.log")
|
||||
file_handler.setLevel(logging.DEBUG)
|
||||
file_handler.setFormatter(logging.Formatter(
|
||||
"%(asctime)s | %(levelname)s | %(message)s"
|
||||
))
|
||||
self.logger.addHandler(file_handler)
|
||||
|
||||
# Console handler for important events
|
||||
console_handler = logging.StreamHandler()
|
||||
console_handler.setLevel(logging.INFO)
|
||||
console_handler.setFormatter(logging.Formatter(
|
||||
"%(asctime)s | %(message)s", datefmt="%H:%M:%S"
|
||||
))
|
||||
self.logger.addHandler(console_handler)
|
||||
|
||||
self._entries: list[TurnLogEntry] = []
|
||||
self._current_entry: Optional[TurnLogEntry] = None
|
||||
|
||||
def start_session(self, config: dict) -> None:
|
||||
"""Start a new logging session."""
|
||||
self._json_file = open(self.session_file, "w")
|
||||
self._summary_file = open(self.summary_file, "w")
|
||||
self._json_file = open(session_file, "w")
|
||||
self._summary_file = open(summary_file, "w")
|
||||
|
||||
# Write config as first line
|
||||
self._json_file.write(json.dumps({"type": "config", "data": config}) + "\n")
|
||||
self._json_file.flush()
|
||||
|
||||
self._summary_file.write(f"Simulation Session Started: {datetime.now()}\n")
|
||||
self._summary_file.write("=" * 60 + "\n\n")
|
||||
self._summary_file.flush()
|
||||
|
||||
self.logger.info(f"Logging session started: {self.session_file}")
|
||||
if self.detailed_logging:
|
||||
# Set up file handler for detailed logs
|
||||
file_handler = logging.FileHandler(self.log_dir / f"sim_{timestamp}.log")
|
||||
file_handler.setLevel(logging.DEBUG)
|
||||
file_handler.setFormatter(logging.Formatter(
|
||||
"%(asctime)s | %(levelname)s | %(message)s"
|
||||
))
|
||||
self.logger.addHandler(file_handler)
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
|
||||
def start_turn(self, turn: int, day: int, step_in_day: int, time_of_day: str) -> None:
|
||||
"""Start logging a new turn."""
|
||||
if not self.logging_enabled:
|
||||
return
|
||||
|
||||
self._current_entry = TurnLogEntry(
|
||||
turn=turn,
|
||||
day=day,
|
||||
@ -120,7 +139,10 @@ class SimulationLogger:
|
||||
time_of_day=time_of_day,
|
||||
timestamp=datetime.now().isoformat(),
|
||||
)
|
||||
self.logger.debug(f"Turn {turn} started (Day {day}, Step {step_in_day}, {time_of_day})")
|
||||
self._agent_entry_map.clear()
|
||||
|
||||
if self.detailed_logging:
|
||||
self.logger.debug(f"Turn {turn} started (Day {day}, Step {step_in_day}, {time_of_day})")
|
||||
|
||||
def log_agent_before(
|
||||
self,
|
||||
@ -133,39 +155,41 @@ class SimulationLogger:
|
||||
money: int,
|
||||
) -> None:
|
||||
"""Log agent state before action."""
|
||||
if self._current_entry is None:
|
||||
if not self.logging_enabled or self._current_entry is None:
|
||||
return
|
||||
|
||||
# Create placeholder entry
|
||||
# Create entry and add to both list and map
|
||||
entry = AgentLogEntry(
|
||||
agent_id=agent_id,
|
||||
agent_name=agent_name,
|
||||
profession=profession,
|
||||
position=position.copy(),
|
||||
stats_before=stats.copy(),
|
||||
position=position,
|
||||
stats_before=stats,
|
||||
stats_after={},
|
||||
decision={},
|
||||
action_result={},
|
||||
inventory_before=inventory.copy(),
|
||||
inventory_before=inventory,
|
||||
inventory_after=[],
|
||||
money_before=money,
|
||||
money_after=money,
|
||||
)
|
||||
self._current_entry.agent_entries.append(entry)
|
||||
self._agent_entry_map[agent_id] = entry
|
||||
|
||||
def log_agent_decision(self, agent_id: str, decision: dict) -> None:
|
||||
"""Log agent's AI decision."""
|
||||
if self._current_entry is None:
|
||||
if not self.logging_enabled or self._current_entry is None:
|
||||
return
|
||||
|
||||
for entry in self._current_entry.agent_entries:
|
||||
if entry.agent_id == agent_id:
|
||||
entry.decision = decision.copy()
|
||||
# O(1) lookup instead of O(n) search
|
||||
entry = self._agent_entry_map.get(agent_id)
|
||||
if entry:
|
||||
entry.decision = decision
|
||||
if self.detailed_logging:
|
||||
self.logger.debug(
|
||||
f" {entry.agent_name}: decided to {decision.get('action', '?')} "
|
||||
f"- {decision.get('reason', '')}"
|
||||
)
|
||||
break
|
||||
|
||||
def log_agent_after(
|
||||
self,
|
||||
@ -177,102 +201,115 @@ class SimulationLogger:
|
||||
action_result: dict,
|
||||
) -> None:
|
||||
"""Log agent state after action."""
|
||||
if self._current_entry is None:
|
||||
if not self.logging_enabled or self._current_entry is None:
|
||||
return
|
||||
|
||||
for entry in self._current_entry.agent_entries:
|
||||
if entry.agent_id == agent_id:
|
||||
entry.stats_after = stats.copy()
|
||||
entry.inventory_after = inventory.copy()
|
||||
entry.money_after = money
|
||||
entry.position = position.copy()
|
||||
entry.action_result = action_result.copy()
|
||||
break
|
||||
# O(1) lookup instead of O(n) search
|
||||
entry = self._agent_entry_map.get(agent_id)
|
||||
if entry:
|
||||
entry.stats_after = stats
|
||||
entry.inventory_after = inventory
|
||||
entry.money_after = money
|
||||
entry.position = position
|
||||
entry.action_result = action_result
|
||||
|
||||
def log_market_state(self, orders_before: list, orders_after: list) -> None:
|
||||
"""Log market state."""
|
||||
if self._current_entry is None:
|
||||
if not self.logging_enabled or self._current_entry is None:
|
||||
return
|
||||
self._current_entry.market_orders_before = orders_before
|
||||
self._current_entry.market_orders_after = orders_after
|
||||
|
||||
def log_trade(self, trade: dict) -> None:
|
||||
"""Log a trade transaction."""
|
||||
if self._current_entry is None:
|
||||
if not self.logging_enabled or self._current_entry is None:
|
||||
return
|
||||
self._current_entry.trades.append(trade)
|
||||
self.logger.debug(f" Trade: {trade.get('message', 'Unknown trade')}")
|
||||
if self.detailed_logging:
|
||||
self.logger.debug(f" Trade: {trade.get('message', 'Unknown trade')}")
|
||||
|
||||
def log_death(self, agent_name: str, cause: str) -> None:
|
||||
"""Log an agent death."""
|
||||
if self._current_entry is None:
|
||||
if not self.logging_enabled or self._current_entry is None:
|
||||
return
|
||||
self._current_entry.deaths.append({"name": agent_name, "cause": cause})
|
||||
# Always log deaths even without detailed logging
|
||||
self.logger.info(f" DEATH: {agent_name} died from {cause}")
|
||||
|
||||
def log_event(self, event_type: str, event_data: dict) -> None:
|
||||
"""Log a general event (births, random events, etc.)."""
|
||||
if self._current_entry is None:
|
||||
if not self.logging_enabled or self._current_entry is None:
|
||||
return
|
||||
|
||||
if event_type == "birth":
|
||||
self.logger.info(
|
||||
f" BIRTH: {event_data.get('child_name', '?')} born to {event_data.get('parent_name', '?')}"
|
||||
)
|
||||
elif event_type == "random_event":
|
||||
elif event_type == "random_event" and self.detailed_logging:
|
||||
self.logger.info(
|
||||
f" EVENT: {event_data.get('type', '?')} affecting {event_data.get('affected', [])}"
|
||||
)
|
||||
else:
|
||||
elif self.detailed_logging:
|
||||
self.logger.debug(f" Event [{event_type}]: {event_data}")
|
||||
|
||||
def log_statistics(self, stats: dict) -> None:
|
||||
"""Log end-of-turn statistics."""
|
||||
if self._current_entry is None:
|
||||
if not self.logging_enabled or self._current_entry is None:
|
||||
return
|
||||
self._current_entry.statistics = stats.copy()
|
||||
self._current_entry.statistics = stats
|
||||
|
||||
def end_turn(self) -> None:
|
||||
"""Finish logging the current turn and write to file."""
|
||||
if self._current_entry is None:
|
||||
if not self.logging_enabled or self._current_entry is None:
|
||||
return
|
||||
|
||||
self._entries.append(self._current_entry)
|
||||
|
||||
# Write to JSON lines file
|
||||
# Write to JSON lines file (without flush every time)
|
||||
if self._json_file:
|
||||
self._json_file.write(
|
||||
json.dumps({"type": "turn", "data": self._current_entry.to_dict()}) + "\n"
|
||||
)
|
||||
self._json_file.flush()
|
||||
|
||||
# Write summary
|
||||
# Write summary (without flush every time)
|
||||
if self._summary_file:
|
||||
entry = self._current_entry
|
||||
self._summary_file.write(
|
||||
f"Turn {entry.turn} | Day {entry.day} Step {entry.step_in_day} ({entry.time_of_day})\n"
|
||||
)
|
||||
|
||||
for agent in entry.agent_entries:
|
||||
action = agent.decision.get("action", "?")
|
||||
result = "✓" if agent.action_result.get("success", False) else "✗"
|
||||
self._summary_file.write(
|
||||
f" [{agent.agent_name}] {action} {result} | "
|
||||
f"E:{agent.stats_after.get('energy', '?')} "
|
||||
f"H:{agent.stats_after.get('hunger', '?')} "
|
||||
f"T:{agent.stats_after.get('thirst', '?')} "
|
||||
f"${agent.money_after}\n"
|
||||
)
|
||||
if self.detailed_logging:
|
||||
for agent in entry.agent_entries:
|
||||
action = agent.decision.get("action", "?")
|
||||
result = "✓" if agent.action_result.get("success", False) else "✗"
|
||||
self._summary_file.write(
|
||||
f" [{agent.agent_name}] {action} {result} | "
|
||||
f"E:{agent.stats_after.get('energy', '?')} "
|
||||
f"H:{agent.stats_after.get('hunger', '?')} "
|
||||
f"T:{agent.stats_after.get('thirst', '?')} "
|
||||
f"${agent.money_after}\n"
|
||||
)
|
||||
|
||||
if entry.deaths:
|
||||
for death in entry.deaths:
|
||||
self._summary_file.write(f" 💀 {death['name']} died: {death['cause']}\n")
|
||||
|
||||
self._summary_file.write("\n")
|
||||
self._summary_file.flush()
|
||||
|
||||
self.logger.debug(f"Turn {self._current_entry.turn} completed")
|
||||
# Batched flush - only flush every N turns
|
||||
self._turns_since_flush += 1
|
||||
if self._turns_since_flush >= self.flush_interval:
|
||||
self._flush_files()
|
||||
self._turns_since_flush = 0
|
||||
|
||||
# Clear current entry (don't accumulate in memory)
|
||||
self._current_entry = None
|
||||
self._agent_entry_map.clear()
|
||||
|
||||
def _flush_files(self) -> None:
|
||||
"""Flush file buffers to disk."""
|
||||
if self._json_file:
|
||||
self._json_file.flush()
|
||||
if self._summary_file:
|
||||
self._summary_file.flush()
|
||||
|
||||
def close(self) -> None:
|
||||
"""Close log files."""
|
||||
@ -283,11 +320,13 @@ class SimulationLogger:
|
||||
self._summary_file.write(f"\nSession ended: {datetime.now()}\n")
|
||||
self._summary_file.close()
|
||||
self._summary_file = None
|
||||
self.logger.info("Logging session closed")
|
||||
|
||||
def get_entries(self) -> list[TurnLogEntry]:
|
||||
"""Get all logged entries."""
|
||||
return self._entries.copy()
|
||||
"""Get all logged entries.
|
||||
|
||||
Note: Returns empty list when logging optimized (entries not kept in memory).
|
||||
"""
|
||||
return []
|
||||
|
||||
|
||||
# Global logger instance
|
||||
@ -309,4 +348,3 @@ def reset_simulation_logger() -> SimulationLogger:
|
||||
_logger.close()
|
||||
_logger = SimulationLogger()
|
||||
return _logger
|
||||
|
||||
|
||||
@ -294,6 +294,9 @@ class OrderBook:
|
||||
# Record sale for price history (we need current_turn but don't have it here)
|
||||
# The turn will be passed via the _record_sale call from engine
|
||||
self.trade_history.append(result)
|
||||
# Keep trade history bounded to prevent memory growth
|
||||
if len(self.trade_history) > 1000:
|
||||
self.trade_history = self.trade_history[-500:]
|
||||
return result
|
||||
|
||||
def execute_multi_buy(
|
||||
|
||||
@ -68,6 +68,9 @@ class World:
|
||||
step_in_day: int = 0
|
||||
time_of_day: TimeOfDay = TimeOfDay.DAY
|
||||
|
||||
# Agent index for O(1) lookups by ID
|
||||
_agent_index: dict = field(default_factory=dict)
|
||||
|
||||
# Statistics
|
||||
total_agents_spawned: int = 0
|
||||
total_agents_died: int = 0
|
||||
@ -87,6 +90,10 @@ class World:
|
||||
"clothes": 0,
|
||||
})
|
||||
|
||||
# Cached statistics (updated periodically for performance)
|
||||
_cached_stats: Optional[dict] = field(default=None)
|
||||
_stats_cache_turn: int = field(default=-1)
|
||||
|
||||
def spawn_agent(
|
||||
self,
|
||||
name: Optional[str] = None,
|
||||
@ -154,6 +161,7 @@ class World:
|
||||
)
|
||||
|
||||
self.agents.append(agent)
|
||||
self._agent_index[agent.id] = agent # Maintain index for O(1) lookups
|
||||
self.total_agents_spawned += 1
|
||||
return agent
|
||||
|
||||
@ -327,11 +335,8 @@ class World:
|
||||
return inheritance_info
|
||||
|
||||
def get_agent(self, agent_id: str) -> Optional[Agent]:
|
||||
"""Get an agent by ID."""
|
||||
for agent in self.agents:
|
||||
if agent.id == agent_id:
|
||||
return agent
|
||||
return None
|
||||
"""Get an agent by ID (O(1) lookup via index)."""
|
||||
return self._agent_index.get(agent_id)
|
||||
|
||||
def remove_dead_agents(self) -> list[Agent]:
|
||||
"""Remove all dead agents from the world. Returns list of removed agents.
|
||||
@ -457,7 +462,23 @@ class World:
|
||||
return [a for a in self.agents if a.is_alive() and not a.is_corpse()]
|
||||
|
||||
def get_statistics(self) -> dict:
|
||||
"""Get current world statistics including wealth distribution and demographics."""
|
||||
"""Get current world statistics including wealth distribution and demographics.
|
||||
|
||||
Uses caching based on performance config to avoid recalculating every turn.
|
||||
"""
|
||||
from backend.config import get_config
|
||||
perf_config = get_config().performance
|
||||
|
||||
# Check if we can use cached stats
|
||||
if (self._cached_stats is not None and
|
||||
self.current_turn - self._stats_cache_turn < perf_config.stats_update_interval):
|
||||
# Update just the essential changing values
|
||||
self._cached_stats["current_turn"] = self.current_turn
|
||||
self._cached_stats["current_day"] = self.current_day
|
||||
self._cached_stats["step_in_day"] = self.step_in_day
|
||||
self._cached_stats["time_of_day"] = self.time_of_day.value
|
||||
return self._cached_stats
|
||||
|
||||
living = self.get_living_agents()
|
||||
total_money = sum(a.money for a in living)
|
||||
|
||||
@ -491,11 +512,13 @@ class World:
|
||||
richest = moneys[-1] if moneys else 0
|
||||
poorest = moneys[0] if moneys else 0
|
||||
|
||||
# Gini coefficient for inequality (0 = perfect equality, 1 = max inequality)
|
||||
# Gini coefficient - O(n) algorithm instead of O(n²)
|
||||
# Uses sorted list: Gini = (2 * sum(i * x_i)) / (n * sum(x_i)) - (n + 1) / n
|
||||
n = len(moneys)
|
||||
if n > 1 and total_money > 0:
|
||||
sum_of_diffs = sum(abs(m1 - m2) for m1 in moneys for m2 in moneys)
|
||||
gini = sum_of_diffs / (2 * n * total_money)
|
||||
weighted_sum = sum((i + 1) * m for i, m in enumerate(moneys))
|
||||
gini = (2 * weighted_sum) / (n * total_money) - (n + 1) / n
|
||||
gini = max(0.0, min(1.0, gini)) # Clamp to [0, 1]
|
||||
else:
|
||||
gini = 0
|
||||
else:
|
||||
@ -538,6 +561,11 @@ class World:
|
||||
"village_storage": self.village_storage.copy(),
|
||||
}
|
||||
|
||||
# Cache the computed stats
|
||||
self._cached_stats = stats
|
||||
self._stats_cache_turn = self.current_turn
|
||||
return stats
|
||||
|
||||
def get_state_snapshot(self) -> dict:
|
||||
"""Get a full snapshot of the world state for API."""
|
||||
return {
|
||||
|
||||
13
config.json
13
config.json
@ -1,8 +1,15 @@
|
||||
{
|
||||
"performance": {
|
||||
"logging_enabled": false,
|
||||
"detailed_logging": false,
|
||||
"log_flush_interval": 50,
|
||||
"max_turn_logs": 100,
|
||||
"stats_update_interval": 10
|
||||
},
|
||||
"ai": {
|
||||
"use_goap": true,
|
||||
"goap_max_iterations": 50,
|
||||
"goap_max_plan_depth": 3,
|
||||
"use_goap": false,
|
||||
"goap_max_iterations": 30,
|
||||
"goap_max_plan_depth": 2,
|
||||
"reactive_fallback": true
|
||||
},
|
||||
"agent_stats": {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user