refactor: Improve code quality, error handling, and test coverage
- Add file_lock context manager to eliminate duplicate locking patterns - Add ValidationError, ConfigurationError, CertificateError exceptions - Improve rollback logic in haproxy_add_servers (track successful ops only) - Decompose haproxy_add_domain into smaller helper functions - Consolidate certificate constants (CERTS_DIR, ACME_HOME) to config.py - Enhance docstrings for internal functions and magic numbers - Add pytest framework with 48 new tests (269 -> 317 total) - Increase test coverage from 76% to 86% - servers.py: 58% -> 82% - certificates.py: 67% -> 86% - configuration.py: 69% -> 94% Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -4,7 +4,8 @@ import fcntl
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from typing import Any, Optional
|
||||
from contextlib import contextmanager
|
||||
from typing import Any, Generator, Optional
|
||||
|
||||
from .config import (
|
||||
MAP_FILE,
|
||||
@@ -16,6 +17,34 @@ from .config import (
|
||||
from .validation import domain_to_backend
|
||||
|
||||
|
||||
@contextmanager
|
||||
def file_lock(lock_path: str) -> Generator[None, None, None]:
|
||||
"""Acquire exclusive file lock for atomic operations.
|
||||
|
||||
This context manager provides a consistent locking mechanism for
|
||||
read-modify-write operations on configuration files to prevent
|
||||
race conditions during concurrent access.
|
||||
|
||||
Args:
|
||||
lock_path: Path to the lock file (typically config_file.lock)
|
||||
|
||||
Yields:
|
||||
None - the lock is held for the duration of the context
|
||||
|
||||
Example:
|
||||
with file_lock("/path/to/config.json.lock"):
|
||||
config = load_config()
|
||||
config["key"] = "value"
|
||||
save_config(config)
|
||||
"""
|
||||
with open(lock_path, 'w') as lock_file:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_EX)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)
|
||||
|
||||
|
||||
def atomic_write_file(file_path: str, content: str) -> None:
|
||||
"""Write content to file atomically using temp file + rename.
|
||||
|
||||
@@ -118,17 +147,61 @@ def split_domain_entries(entries: list[tuple[str, str]]) -> tuple[list[tuple[str
|
||||
|
||||
|
||||
def save_map_file(entries: list[tuple[str, str]]) -> None:
|
||||
"""Save entries to separate map files for 2-stage matching.
|
||||
"""Save domain-to-backend entries using 2-stage map routing architecture.
|
||||
|
||||
Uses 2-stage matching for performance:
|
||||
- domains.map: Exact domain matches (used with map_str, O(log n))
|
||||
- wildcards.map: Wildcard entries (used with map_dom, O(n))
|
||||
This function implements HAProxy's 2-stage domain routing for optimal
|
||||
performance. Entries are automatically split into two separate map files
|
||||
based on whether they are exact domains or wildcard patterns.
|
||||
|
||||
2-Stage Routing Architecture:
|
||||
Stage 1 - Exact Match (domains.map):
|
||||
- HAProxy directive: map_str(req.hdr(host),"/path/domains.map")
|
||||
- Data structure: ebtree (elastic binary tree)
|
||||
- Lookup complexity: O(log n)
|
||||
- Use case: Exact domain matches (e.g., "api.example.com")
|
||||
|
||||
Stage 2 - Wildcard Match (wildcards.map):
|
||||
- HAProxy directive: map_dom(req.hdr(host),"/path/wildcards.map")
|
||||
- Data structure: Linear suffix search
|
||||
- Lookup complexity: O(n) where n = number of wildcard entries
|
||||
- Use case: Wildcard domains (e.g., ".example.com" matches *.example.com)
|
||||
- Typically small set, so O(n) is acceptable
|
||||
|
||||
Performance Characteristics:
|
||||
- 1000 exact domains: ~10 comparisons (log2(1000) approx 10)
|
||||
- 10 wildcard entries: 10 suffix comparisons (acceptable)
|
||||
- By separating exact and wildcard entries, we avoid O(n) lookup
|
||||
for the common case (exact domain match)
|
||||
|
||||
HAProxy Configuration Example:
|
||||
use_backend %[req.hdr(host),lower,map_str(/etc/haproxy/domains.map)]
|
||||
if { req.hdr(host),lower,map_str(/etc/haproxy/domains.map) -m found }
|
||||
use_backend %[req.hdr(host),lower,map_dom(/etc/haproxy/wildcards.map)]
|
||||
if { req.hdr(host),lower,map_dom(/etc/haproxy/wildcards.map) -m found }
|
||||
|
||||
Args:
|
||||
entries: List of (domain, backend) tuples to write
|
||||
entries: List of (domain, backend) tuples to write.
|
||||
- Exact domains: "api.example.com" -> written to domains.map
|
||||
- Wildcards: ".example.com" (matches *.example.com) -> written
|
||||
to wildcards.map
|
||||
|
||||
Raises:
|
||||
IOError: If the file cannot be written
|
||||
IOError: If either map file cannot be written.
|
||||
|
||||
File Formats:
|
||||
domains.map:
|
||||
# Exact Domain to Backend mapping (for map_str)
|
||||
api.example.com pool_1
|
||||
www.example.com pool_2
|
||||
|
||||
wildcards.map:
|
||||
# Wildcard Domain to Backend mapping (for map_dom)
|
||||
.example.com pool_3 # Matches *.example.com
|
||||
.test.org pool_4 # Matches *.test.org
|
||||
|
||||
Note:
|
||||
Both files are written atomically using temp file + rename to prevent
|
||||
corruption during concurrent access or system failures.
|
||||
"""
|
||||
# Split into exact and wildcard entries
|
||||
exact_entries, wildcard_entries = split_domain_entries(entries)
|
||||
@@ -170,13 +243,48 @@ def get_domain_backend(domain: str) -> Optional[str]:
|
||||
|
||||
|
||||
def is_legacy_backend(backend: str) -> bool:
|
||||
"""Check if backend is a legacy static backend (not a pool).
|
||||
"""Check if backend is a legacy static backend (not a dynamic pool).
|
||||
|
||||
This function distinguishes between two backend naming conventions used
|
||||
in the HAProxy MCP system:
|
||||
|
||||
Pool Backends (Dynamic):
|
||||
- Named: pool_1, pool_2, ..., pool_100
|
||||
- Pre-configured in haproxy.cfg with 10 server slots each
|
||||
- Domains are dynamically assigned to available pools via domains.map
|
||||
- Server slots configured at runtime via Runtime API
|
||||
- Allows zero-reload domain management
|
||||
|
||||
Legacy Backends (Static):
|
||||
- Named: {domain}_backend (e.g., "api_example_com_backend")
|
||||
- Defined statically in haproxy.cfg
|
||||
- Requires HAProxy reload to add new backends
|
||||
- Used for domains that were configured before pool-based routing
|
||||
|
||||
Args:
|
||||
backend: Backend name to check
|
||||
backend: Backend name to check (e.g., "pool_5" or "api_example_com_backend").
|
||||
|
||||
Returns:
|
||||
True if this is a legacy backend, False if it's a pool
|
||||
True if this is a legacy backend (does not start with "pool_"),
|
||||
False if it's a pool backend.
|
||||
|
||||
Usage Scenarios:
|
||||
- When listing servers: Determines server naming convention
|
||||
(pool backends use pool_N_M, legacy use {domain}_M)
|
||||
- When adding servers: Determines which backend configuration
|
||||
approach to use
|
||||
- During migration: Helps identify domains that need migration
|
||||
from legacy to pool-based routing
|
||||
|
||||
Examples:
|
||||
>>> is_legacy_backend("pool_5")
|
||||
False
|
||||
>>> is_legacy_backend("pool_100")
|
||||
False
|
||||
>>> is_legacy_backend("api_example_com_backend")
|
||||
True
|
||||
>>> is_legacy_backend("myservice_backend")
|
||||
True
|
||||
"""
|
||||
return not backend.startswith("pool_")
|
||||
|
||||
@@ -263,17 +371,12 @@ def add_server_to_config(domain: str, slot: int, ip: str, http_port: int) -> Non
|
||||
ip: Server IP address
|
||||
http_port: HTTP port
|
||||
"""
|
||||
lock_path = f"{SERVERS_FILE}.lock"
|
||||
with open(lock_path, 'w') as lock_file:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_EX)
|
||||
try:
|
||||
config = load_servers_config()
|
||||
if domain not in config:
|
||||
config[domain] = {}
|
||||
config[domain][str(slot)] = {"ip": ip, "http_port": http_port}
|
||||
save_servers_config(config)
|
||||
finally:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)
|
||||
with file_lock(f"{SERVERS_FILE}.lock"):
|
||||
config = load_servers_config()
|
||||
if domain not in config:
|
||||
config[domain] = {}
|
||||
config[domain][str(slot)] = {"ip": ip, "http_port": http_port}
|
||||
save_servers_config(config)
|
||||
|
||||
|
||||
def remove_server_from_config(domain: str, slot: int) -> None:
|
||||
@@ -283,18 +386,13 @@ def remove_server_from_config(domain: str, slot: int) -> None:
|
||||
domain: Domain name
|
||||
slot: Server slot to remove
|
||||
"""
|
||||
lock_path = f"{SERVERS_FILE}.lock"
|
||||
with open(lock_path, 'w') as lock_file:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_EX)
|
||||
try:
|
||||
config = load_servers_config()
|
||||
if domain in config and str(slot) in config[domain]:
|
||||
del config[domain][str(slot)]
|
||||
if not config[domain]:
|
||||
del config[domain]
|
||||
save_servers_config(config)
|
||||
finally:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)
|
||||
with file_lock(f"{SERVERS_FILE}.lock"):
|
||||
config = load_servers_config()
|
||||
if domain in config and str(slot) in config[domain]:
|
||||
del config[domain][str(slot)]
|
||||
if not config[domain]:
|
||||
del config[domain]
|
||||
save_servers_config(config)
|
||||
|
||||
|
||||
def remove_domain_from_config(domain: str) -> None:
|
||||
@@ -303,16 +401,11 @@ def remove_domain_from_config(domain: str) -> None:
|
||||
Args:
|
||||
domain: Domain name to remove
|
||||
"""
|
||||
lock_path = f"{SERVERS_FILE}.lock"
|
||||
with open(lock_path, 'w') as lock_file:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_EX)
|
||||
try:
|
||||
config = load_servers_config()
|
||||
if domain in config:
|
||||
del config[domain]
|
||||
save_servers_config(config)
|
||||
finally:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)
|
||||
with file_lock(f"{SERVERS_FILE}.lock"):
|
||||
config = load_servers_config()
|
||||
if domain in config:
|
||||
del config[domain]
|
||||
save_servers_config(config)
|
||||
|
||||
|
||||
# Certificate configuration functions
|
||||
@@ -359,16 +452,11 @@ def add_cert_to_config(domain: str) -> None:
|
||||
Args:
|
||||
domain: Domain name to add
|
||||
"""
|
||||
lock_path = f"{CERTS_FILE}.lock"
|
||||
with open(lock_path, 'w') as lock_file:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_EX)
|
||||
try:
|
||||
domains = load_certs_config()
|
||||
if domain not in domains:
|
||||
domains.append(domain)
|
||||
save_certs_config(domains)
|
||||
finally:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)
|
||||
with file_lock(f"{CERTS_FILE}.lock"):
|
||||
domains = load_certs_config()
|
||||
if domain not in domains:
|
||||
domains.append(domain)
|
||||
save_certs_config(domains)
|
||||
|
||||
|
||||
def remove_cert_from_config(domain: str) -> None:
|
||||
@@ -377,13 +465,8 @@ def remove_cert_from_config(domain: str) -> None:
|
||||
Args:
|
||||
domain: Domain name to remove
|
||||
"""
|
||||
lock_path = f"{CERTS_FILE}.lock"
|
||||
with open(lock_path, 'w') as lock_file:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_EX)
|
||||
try:
|
||||
domains = load_certs_config()
|
||||
if domain in domains:
|
||||
domains.remove(domain)
|
||||
save_certs_config(domains)
|
||||
finally:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)
|
||||
with file_lock(f"{CERTS_FILE}.lock"):
|
||||
domains = load_certs_config()
|
||||
if domain in domains:
|
||||
domains.remove(domain)
|
||||
save_certs_config(domains)
|
||||
|
||||
Reference in New Issue
Block a user