2025-05-07
coding is done by using https://claude.ai/chat/2db1b986-441b-4c73-b69e-8f24d5c2ab67
```
=== SEGWAY CASE STUDY RESULTS ===
Bottleneck Sequence: a_demand: Competitive product analysis, a_invest: File patents, a_demand: Customer interviews and demos, a_demand: Diffusion modeling, a_supply: Complete alpha prototype, a_supply: Design supply chain, a_invest: Recruit management team, a_supply: Complete production-intent design, a_invest: Lobby for legislation
Segway Sequence (PRISM taxonomy): a_supply, a_supply, a_supply, a_invest, a_invest, a_invest, a_demand, a_demand, a_demand
Key Efficiency Metrics:
Bottleneck Efficiency: 0.0425
Segway Efficiency: 0.0425
Improvement: -0.00%
Uncertainty Advantage: 2.71%
Axiom Satisfiability:
Initial State Axiom:
Bottleneck respects: True
Random respects: False
Weight Axiom:
Bottleneck respects: True
Random respects: False
```
2025-05-07
Test framework using Segway case study comparing original vs alternative paths
![[3.3💸Evaluate solution(⚡) 2025-05-05-2.svg]]
%%[[3.3💸Evaluate solution(⚡) 2025-05-05-2|🖋 Edit in Excalidraw]]%%
```{python}
import jax
import jax.numpy as jnp
import xarray as xr
import matplotlib.pyplot as plt
# Optional: import GenJAX for generative modeling (not used for actual sampling in this code snippet)
try:
from genjax import gen, flip, categorical
except ImportError:
# If GenJAX is not installed, define dummy decorators/functions for demonstration
def gen(func):
return func
def flip(p):
# Simulate a Bernoulli flip (just for demonstration, not used in this code)
return (jax.random.uniform(jax.random.PRNGKey(0)) < p).astype(int)
def categorical(probs):
# Dummy categorical using JAX (for demonstration, not used directly)
return jax.random.categorical(jax.random.PRNGKey(0), jnp.log(probs))
# 1. Define tasks and parameters
tasks = ["Prototype", "Design", "Supply Chain", "Lobby", "Patents", "Team", "Demos", "Surveys", "Forecast"]
n_tasks = len(tasks)
# Cost of each task (in $ millions)
costs = jnp.array([7.0, 5.0, 3.0, 0.5, 0.1, 2.0, 0.1, 0.05, 0.1])
# Subjective uncertainty reduction effect (1=lowest, 5=highest)
effects = jnp.array([3, 5, 3, 2, 1, 1, 4, 1, 1])
# Uncertainty starts at 100% (1.0 in fractional terms)
initial_uncertainty = 1.0
# Factor alpha to translate effect rating to fractional uncertainty reduction
alpha = 0.15
# Overhead for maintaining a full team (cost per task after team is hired)
team_overhead_per_task = 0.5 # $0.5M per remaining task
# Task index for Team (for overhead calculations)
team_task_id = tasks.index("Team")
# 2. Define generative models for task sequences using GenJAX
@gen
def random_sequence_model():
"""
Generative program that produces a random task sequence (prototype always first).
"""
sequence = []
available = list(range(n_tasks))
# Always start with Prototype (task 0) first, remove it from available
sequence.append(available.pop(0)) # Prototype index 0
# Randomly order the remaining tasks
for i in range(1, n_tasks):
# Choose a random index from available tasks uniformly
probs = jnp.ones((len(available),)) / len(available)
choice_idx = categorical(probs) @ f"task_{i}"
choice_idx = int(choice_idx) # convert index to Python int for list pop
task = available.pop(choice_idx)
sequence.append(task)
return jnp.array(sequence)
@gen
def guided_sequence_model():
"""
Generative program that produces a guided task sequence, favoring high uncertainty-reduction per cost.
"""
sequence = []
available = list(range(n_tasks))
# Start with Prototype (task 0) first
sequence.append(available.pop(0))
# Precompute utility weights for each task (effect/cost ratio)
weight = {i: float(effects[i] / costs[i]) for i in range(n_tasks)}
for i in range(1, n_tasks):
# Compute selection probabilities proportional to weight, with dependency constraint (Design before Supply Chain)
choices = []
weights = []
for task in available:
# Enforce that task 2 (Supply Chain) is not done before task 1 (Design)
if task == 2 and 1 in available:
continue
choices.append(task)
weights.append(weight[task])
probs = jnp.array(weights) / jnp.sum(jnp.array(weights))
choice_idx = categorical(probs) @ f"task_{i}"
choice_idx = int(choice_idx)
task = choices[choice_idx]
available.remove(task)
sequence.append(task)
return jnp.array(sequence)
# (The generative models above are not explicitly invoked below; instead we sample sequences directly for simplicity.)
# 3. Sample task sequences using the generative strategies
N_random = 50 # number of random sequences to generate
N_guided = 20 # number of guided sequences to generate
# Sample random sequences in parallel using JAX
key = jax.random.PRNGKey(0)
random_keys = jax.random.split(key, N_random)
def sample_random_sequence(key):
# Permute tasks 1..8 and prepend task 0 (Prototype) at start
perm_rest = jax.random.permutation(key, jnp.arange(1, n_tasks))
return jnp.concatenate([jnp.array([0]), perm_rest])
random_sequences = jax.vmap(sample_random_sequence)(random_keys)
# Sample guided sequences via weighted random selection (Python loop for clarity)
import random
guided_sequences_list = []
for _ in range(N_guided):
available = list(range(n_tasks))
seq = [available.pop(0)] # start with Prototype (0)
# While tasks remain, pick next task weighted by effect/cost, with dependency constraint
while available:
choices = []
weights = []
for t in available:
# Exclude Supply Chain (task 2) if Design (task 1) not done yet
if t == 2 and 1 in available:
continue
choices.append(t)
weights.append(float(effects[t] / costs[t]))
# Normalize weights to probabilities
total_w = sum(weights)
probs = [w / total_w for w in weights]
# Randomly pick next task according to probabilities
task = random.choices(choices, weights=probs, k=1)[0]
seq.append(task)
available.remove(task)
guided_sequences_list.append(seq)
guided_sequences = jnp.array(guided_sequences_list)
# Define "original" and "alternative" sequences from the Segway case study (for comparison)
original_sequence = jnp.array([0, 1, 2, 5, 3, 4, 6, 7, 8]) # Prototype->Design->Supply Chain->Team->Lobby->Patents->Demos->Surveys->Forecast
alternative_sequence = jnp.array([0, 6, 7, 8, 4, 3, 1, 2, 5]) # Prototype->Demos->Surveys->Forecast->Patents->Lobby->Design->Supply Chain->Team
# Combine all sequences for simulation
all_sequences = jnp.concatenate([random_sequences, guided_sequences,
original_sequence.reshape(1, -1),
alternative_sequence.reshape(1, -1)], axis=0)
# 4. Simulate sequences to compute uncertainty trajectories
def simulate_uncertainty_and_cost(seqs):
"""
Given an array of sequences (shape: M x n_tasks), compute cumulative cost and remaining uncertainty after each task.
Returns (cumulative_costs, remaining_uncertainty) each of shape M x n_tasks.
"""
# Identify position of Team task in each sequence
team_positions = jnp.argmax(seqs == team_task_id, axis=1)
# Base costs per task in sequence order
base_costs = costs[seqs] # shape (M, n_tasks)
# Overhead mask: 1 for tasks after Team in each sequence
task_idx = jnp.arange(n_tasks)
overhead_mask = task_idx[None, :] > team_positions[:, None]
overhead_cost = team_overhead_per_task * overhead_mask.astype(jnp.float32)
# Total cost per task (including overhead if team already hired)
step_costs = base_costs + overhead_cost # shape (M, n_tasks)
# Cumulative investment after each task
cum_costs = jnp.cumsum(step_costs, axis=1)
# Remaining uncertainty after each task (cumulative product of (1 - alpha*effect) factors)
factors = 1 - alpha * effects[seqs].astype(jnp.float32)
factors = jnp.clip(factors, a_min=0.0, a_max=1.0)
remaining_uncertainty = jnp.cumprod(factors, axis=1)
return cum_costs, remaining_uncertainty
cumulative_costs, remaining_uncertainty = simulate_uncertainty_and_cost(all_sequences)
# Compute a score for each sequence according to "utility per uncertainty per cost"
# Here we define score as the negative area under the uncertainty-vs-cost curve (higher is better).
uncertainty_before = jnp.concatenate([jnp.ones((all_sequences.shape[0], 1)), remaining_uncertainty[:, :-1]], axis=1)
area = jnp.sum(uncertainty_before * (costs[all_sequences] + (jnp.arange(n_tasks)[None, :] > jnp.argmax(all_sequences == team_task_id, axis=1)[:, None]).astype(jnp.float32) * team_overhead_per_task), axis=1)
score = -area
# 5. Organize results in an xarray Dataset
seq_indices = list(range(all_sequences.shape[0]))
step_indices = list(range(1, n_tasks + 1))
ds = xr.Dataset(
{
"task_sequence": (("sequence", "step"), all_sequences.astype(int)),
"cumulative_cost": (("sequence", "step"), cumulative_costs),
"remaining_uncertainty": (("sequence", "step"), remaining_uncertainty),
"score": (("sequence",), score),
},
coords={
"sequence": seq_indices,
"step": step_indices,
}
)
# Label each sequence by strategy type
strategy_labels = ["random"] * N_random + ["guided"] * N_guided + ["original", "alternative"]
ds = ds.assign_coords(strategy=("sequence", strategy_labels))
# 6. Plot uncertainty trajectories vs cumulative investment
plt.figure(figsize=(8, 6))
# Plot a subset of random sequences (gray)
for i in range(min(10, N_random)):
x = [0.0] + list(cumulative_costs[i].tolist())
y = [1.0] + list(remaining_uncertainty[i].tolist())
plt.step(x, y, where='pre', color='gray', alpha=0.5)
# Plot a subset of guided sequences (blue)
for j in range(N_random, N_random + min(5, N_guided)):
x = [0.0] + list(cumulative_costs[j].tolist())
y = [1.0] + list(remaining_uncertainty[j].tolist())
plt.step(x, y, where='pre', color='blue', alpha=0.5)
# Plot the original sequence trajectory (red dashed)
orig_idx = N_random + N_guided # index of original sequence
x = [0.0] + list(cumulative_costs[orig_idx].tolist())
y = [1.0] + list(remaining_uncertainty[orig_idx].tolist())
plt.step(x, y, where='pre', color='red', linestyle='--', linewidth=2.5, label="Original sequence")
# Plot the alternative sequence trajectory (green solid)
alt_idx = N_random + N_guided + 1 # index of alternative sequence
x = [0.0] + list(cumulative_costs[alt_idx].tolist())
y = [1.0] + list(remaining_uncertainty[alt_idx].tolist())
plt.step(x, y, where='pre', color='green', linewidth=2.5, label="Alternative sequence")
plt.xlabel("Cumulative Investment ($ millions)")
plt.ylabel("Remaining Uncertainty (fraction)")
plt.title("Uncertainty reduction trajectories for various task sequences")
plt.legend()
plt.grid(True)
plt.show()
```