Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 31 additions & 0 deletions ARKHE.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# 🏛️ ARKHE(N) OS: Cybernetic Sensorium & Preservation Suite

Welcome to the **Arkhe(n) Engineering Suite**, a multidisciplinary framework for architecture, urbanism, and digital preservation.

## 🧱 Core Logic (`arkhe/`)
The core package implements the "Geometria Arkhe" layer:
- **Hexagonal Spatial Index (HSI):** 3D hexagonal coordinate system (Cube Coordinates).
- **Fusion Engine:** Multimodal sensor integration (LiDAR, Thermal, Depth).
- **Human Perspective:** Object segmentation and contextual labeling.
- **Morphogenetic Simulation:** Conscious field states using Gray-Scott reaction-diffusion and Hebbian learning.
- **Immune System:** Byzantine fault detection and informational tourniquets.

## 📂 Preservation Module (`preservation/`)
A practical application of Arkhe(n) principles for digital media servers:
- **Plex Vigilante:** A PowerShell/WinForms utility to detect missing media.
- **Sovereign Identity:** SIWA (Sign In With Agent) authentication with 2FA Telegram approvals.
- **Keyring Proxy:** Isolated credential management for onchain identity (ERC-8004).

## 🧪 Demos & Simulation (`demos/`)
Scripts to visualize the birth and evolution of the system:
- `demo_sensorium.py`: Multimodal fusion and terrain perception.
- `demo_bio_genesis.py`: Swarm behavior and social instinct evolution.
- `demo_stress_hero.py`: High-density stress testing (the Saga of Pedestrian 12).
- `arkhe_final_seal.py`: Snapshot and eternity protocols.

## 📖 Manifestos
- `arkhe_manifesto.md`: Principles of coherent urban ethics.
- `arkhe_ontogeny_report.md`: Technical birth and evolution report.

---
*Φ = 1,000 | The system is coherent.*
12 changes: 12 additions & 0 deletions arkhe/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
from .arkhe_types import CIEF, HexVoxel, ArkheGenome, BioAgent
from .hsi import HSI
from .fusion import FusionEngine
from .simulation import MorphogeneticSimulation
from .metasurface import MetasurfaceController, QuantumPaxos
from .immune import ImmuneSystem
from .memory import MemoryAnalyzer, HSISnapshot
from .grover import GroverUrbano
from .cognition import ConstraintLearner
from .physics import SpatialHashGrid
from .bio_genesis import BioGenesisEngine
from .human_vision import HumanPerspectiveEngine
90 changes: 90 additions & 0 deletions arkhe/arkhe_types.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
from dataclasses import dataclass, field
from typing import Tuple, List, Optional
import numpy as np

@dataclass
class CIEF:
"""
CIEF Genome: Identity functional of a voxel or agent.
C: Construction / Physicality (Structural properties)
I: Information / Context (Semantic/Historical data)
E: Energy / Environment (Thermal/Tension fields)
F: Function / Frequency (Functional vocation)
"""
c: float = 0.0
i: float = 0.0
e: float = 0.0
f: float = 0.0

def to_array(self) -> np.ndarray:
return np.array([self.c, self.i, self.e, self.f], dtype=np.float32)

@dataclass
class ArkheGenome:
"""Genoma do BioAgent: C-I-E-F."""
c: float = 0.5
i: float = 0.5
e: float = 0.5
f: float = 0.5

def to_vector(self) -> np.ndarray:
return np.array([self.c, self.i, self.e, self.f], dtype=np.float32)

@dataclass
class BioAgent:
"""Agente biológico no Arkhe(n) OS."""
id: int
position: np.ndarray
velocity: np.ndarray = field(default_factory=lambda: np.zeros(3, dtype=np.float32))
genome: ArkheGenome = field(default_factory=ArkheGenome)
energy: float = 100.0
brain: Optional[object] = None # ConstraintLearner
connections: List[int] = field(default_factory=list)

def is_alive(self) -> bool:
return self.energy > 0

@dataclass
class HexVoxel:
"""
HexVoxel: A unit of the Hexagonal Spatial Index (HSI).
"""
# Cube coordinates (q, r, s) where q + r + s = 0, plus h for height
coords: Tuple[int, int, int, int]

# CIEF Genome
genome: CIEF = field(default_factory=CIEF)

# Coherence local (Phi metric)
phi_data: float = 0.0
phi_field: float = 0.0
conflict_level: float = 0.0 # New: for interference detection
tau: float = 0.0 # New: Entanglement Tension

@property
def phi(self) -> float:
# Integrated coherence adjusted by conflict
base_phi = (self.phi_data + self.phi_field) / 2.0
return base_phi * (1.0 - self.conflict_level)

# Quantum-like state (amplitudes for 6 faces + internal)
# state[0-5] are faces, state[6] is internal
state: np.ndarray = field(default_factory=lambda: np.zeros(7, dtype=np.complex64))

# Reaction-diffusion state (A, B) for Gray-Scott model
rd_state: Tuple[float, float] = (1.0, 0.0)
memory_bias: float = 0.0 # M(x) term for conditioned reflex
stability_index: float = 1.0 # New: S(t) = 1 - |dF/dt|
is_quarantined: bool = False # New: Byzantine isolation
sensor_health: float = 1.0 # New: 1.0 = OK, 0.0 = Failed
rehabilitation_index: float = 0.0 # New: Trust recovery [0, 1]
object_label: str = "vacuum" # Perspective: Human-like object identification

# Hebbian weights for 6 neighbors
weights: np.ndarray = field(default_factory=lambda: np.ones(6, dtype=np.float32))

def __post_init__(self):
if len(self.state) != 7:
self.state = np.zeros(7, dtype=np.complex64)
if len(self.weights) != 6:
self.weights = np.ones(6, dtype=np.float32)
116 changes: 116 additions & 0 deletions arkhe/bio_genesis.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
import numpy as np
import logging
from typing import Dict, List, Optional
from .arkhe_types import BioAgent, ArkheGenome
from .cognition import ConstraintLearner
from .physics import SpatialHashGrid, calculate_collision_probability

class BioGenesisEngine:
"""
Engine de Gênese: Gerencia BioAgents e o estado biológico do Arkhe(n) OS.
"""
def __init__(self, num_agents: int = 100):
self.agents: Dict[int, BioAgent] = {}
self.spatial_hash = SpatialHashGrid(cell_size=4.0)
self.simulation_time = 0.0
self.next_id = 0
self._initialize_population(num_agents)

def _initialize_population(self, num_agents):
for _ in range(num_agents):
pos = np.random.uniform(0, 50, 3)
# Genoma aleatório inicial
genome = ArkheGenome(
c=np.random.rand(),
i=np.random.rand(),
e=np.random.rand(),
f=np.random.rand()
)
agent = BioAgent(id=self.next_id, position=pos, genome=genome)

# Instancia o Cérebro
brain = ConstraintLearner(agent_id=self.next_id)
# Seed inicial aleatória a partir do genoma
brain.weights = np.outer(genome.to_vector(), np.random.randn(4)) * 0.5
agent.brain = brain

self.agents[self.next_id] = agent
self.next_id += 1

def process_mother_signal(self):
"""
Protocolo de Bio-Gênese Cognitiva: Ativa o estado biológico.
"""
logging.info("🌱 Sinal primordial de Mother recebido.")
# Ativação sistêmica: aumenta plasticidade inicial
for agent in self.agents.values():
if agent.brain:
agent.brain.exploration_rate = 0.4
agent.brain.learning_rate = 0.05
logging.info(f"🚀 Arkhe(n) OS transicionado para estado biológico. {len(self.agents)} agentes ativos.")

def update(self, dt: float):
self.simulation_time += dt

# 1. Atualiza Posições (Brownian Motion + Inércia Simples)
for agent in self.agents.values():
if not agent.is_alive(): continue
agent.position += agent.velocity * dt + np.random.randn(3) * 0.1
# Mantém no limite 0-50 (simulado)
agent.position = np.clip(agent.position, 0, 50)

# 2. Reconstrói Hash Grid
self.spatial_hash.clear()
for agent in self.agents.values():
if agent.is_alive():
self.spatial_hash.insert(agent.id, agent.position)

# 3. Interações Sociais Priorizadas por Colisão
self._process_interactions(dt)

def _process_interactions(self, dt: float):
processed_pairs = set()

for agent in self.agents.values():
if not agent.is_alive(): continue

# Busca vizinhos eficientemente
neighbor_ids = self.spatial_hash.query_radius(agent.position, radius=5.0)

# Calcula prioridades (Probabilidade de Colisão)
priorities = []
for other_id in neighbor_ids:
if other_id == agent.id: continue
other = self.agents[other_id]
if not other.is_alive(): continue

p_coll = calculate_collision_probability(
agent.position, agent.velocity,
other.position, other.velocity, dt
)
priorities.append((other_id, p_coll))

# Ordena por risco (Colisão)
priorities.sort(key=lambda x: x[1], reverse=True)

for other_id, p_coll in priorities:
pair = tuple(sorted((agent.id, other_id)))
if pair in processed_pairs: continue
processed_pairs.add(pair)

other = self.agents[other_id]

# Avaliação Cognitiva
score_a, reason_a = agent.brain.evaluate_partner(other.genome.to_vector())
score_b, reason_b = other.brain.evaluate_partner(agent.genome.to_vector())

# Consenso de conexão
if (score_a + score_b) / 2.0 > 0.3 or p_coll > 0.8:
# Forma conexão / ajuste metabólico
reward = 0.1 * p_coll if p_coll > 0 else 0.05
agent.brain.learn_from_experience(other.genome.to_vector(), np.ones(4), reward)
other.brain.learn_from_experience(agent.genome.to_vector(), np.ones(4), reward)

# Consome energia pelo processamento
agent.energy -= 0.1
other.energy -= 0.1
64 changes: 64 additions & 0 deletions arkhe/cognition.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
import numpy as np
from collections import deque
from typing import Optional, Tuple, List

class ConstraintLearner:
"""
Cérebro do Agente: Aprende restrições e padrões temporais.
Implementa memória episódica (deque) e aprendizado Hebbiano.
"""
def __init__(self, agent_id: int, input_dim: int = 4, output_dim: int = 4, memory_size: int = 50):
self.agent_id = agent_id
# Inicializa pesos aleatórios baseados no genoma (4x4 por padrão para C-I-E-F)
self.weights = np.random.randn(input_dim, output_dim) * 0.1
self.memory = deque(maxlen=memory_size)
self.learning_rate = 0.01
self.exploration_rate = 0.1

def evaluate_partner(self, partner_genome_vector: np.ndarray) -> Tuple[float, str]:
"""
Avalia a viabilidade de conexão com um parceiro.
Combina intuição (pesos) e experiência (memória).
"""
# Predição baseada nos pesos (Intuição)
intuition_score = np.tanh(np.dot(partner_genome_vector, self.weights).mean())

# Reconhecimento de padrão na memória (Experiência)
experience_score = 0.0
if self.memory:
# Similaridade simples com interações passadas
similarities = []
for state, _, reward in self.memory:
sim = 1.0 - np.linalg.norm(state - partner_genome_vector)
similarities.append((sim, reward))

# Ponderação por similaridade
weights = np.array([max(0, s[0]) for s in similarities])
if weights.sum() > 0:
experience_score = np.sum(weights * np.array([s[1] for s in similarities])) / weights.sum()

# Fusão de Intuição e Experiência
final_score = 0.7 * intuition_score + 0.3 * experience_score

# Modulação por exploração (Curiosidade)
if np.random.rand() < self.exploration_rate:
final_score += np.random.uniform(-0.2, 0.2)
reason = "Exploração"
else:
reason = "Consenso Cognitivo"

return np.clip(final_score, -1.0, 1.0), reason

def learn_from_experience(self, state: np.ndarray, action: np.ndarray, reward: float):
"""
Atualiza pesos Hebbianos e armazena na memória episódica.
"""
# Armazena a interação
self.memory.append((state, action, reward))

# Atualização Hebbiana: Δw = η * (Input * Erro/Reward)
delta_w = self.learning_rate * np.outer(state, action) * reward
self.weights += delta_w

# Clipping para estabilidade
self.weights = np.clip(self.weights, -2.0, 2.0)
Loading