整合
This commit is contained in:
236
org/other/question3_multilayer_optimization.py
Normal file
236
org/other/question3_multilayer_optimization.py
Normal file
@@ -0,0 +1,236 @@
|
||||
import itertools
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List, Sequence, Tuple
|
||||
|
||||
import numpy as np
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
plt.rcParams["font.sans-serif"] = ["DejaVu Sans", "Arial", "Helvetica"]
|
||||
plt.rcParams["axes.unicode_minus"] = False
|
||||
|
||||
|
||||
def load_q1_module():
|
||||
here = os.path.dirname(os.path.abspath(__file__))
|
||||
target = os.path.join(here, "question1_pdms_emissivity.py")
|
||||
import importlib.util
|
||||
|
||||
spec = importlib.util.spec_from_file_location("q1", target)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module) # type: ignore[arg-type]
|
||||
return module
|
||||
|
||||
|
||||
def planck_weight(wavelength_um: np.ndarray, temperature: float = 300.0) -> np.ndarray:
|
||||
wl_m = wavelength_um * 1e-6
|
||||
c1 = 3.7418e-16
|
||||
c2 = 1.4388e-2
|
||||
spectral = c1 / (wl_m**5 * (np.exp(c2 / (wl_m * temperature)) - 1))
|
||||
return spectral
|
||||
|
||||
|
||||
def solar_weight(wavelength_um: np.ndarray) -> np.ndarray:
|
||||
center1, width1 = 0.6, 0.35
|
||||
center2, width2 = 1.6, 0.45
|
||||
return np.exp(-((wavelength_um - center1) / width1) ** 2) + 0.35 * np.exp(
|
||||
-((wavelength_um - center2) / width2) ** 2
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Material:
|
||||
name: str
|
||||
n_const: float
|
||||
k_const: float
|
||||
|
||||
def nk(self, wavelength_um: np.ndarray) -> np.ndarray:
|
||||
n = np.full_like(wavelength_um, self.n_const, dtype=np.complex128)
|
||||
k = np.full_like(wavelength_um, self.k_const, dtype=np.complex128)
|
||||
return n - 1j * k
|
||||
|
||||
|
||||
def pdms_index(wavelength_um: np.ndarray) -> np.ndarray:
|
||||
q1 = load_q1_module()
|
||||
n = q1.cauchy_index(wavelength_um)
|
||||
k = q1.extinction_coeff(wavelength_um)
|
||||
return n - 1j * k
|
||||
|
||||
|
||||
def ag_index(wavelength_um: np.ndarray) -> np.ndarray:
|
||||
n = 0.15 + 0.6 * np.exp(-((wavelength_um - 0.5) / 0.4) ** 2)
|
||||
k = 4.5 + 3.5 * np.exp(-((wavelength_um - 10) / 6) ** 2)
|
||||
return n - 1j * k
|
||||
|
||||
|
||||
def transfer_matrix_stack(
|
||||
wavelength_um: np.ndarray,
|
||||
layer_nk: Sequence[np.ndarray],
|
||||
thickness_um: Sequence[float],
|
||||
substrate_nk: np.ndarray,
|
||||
n0: float = 1.0,
|
||||
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
||||
beta = 2 * np.pi / (wavelength_um * 1e-6)
|
||||
q0 = n0
|
||||
qs = substrate_nk
|
||||
|
||||
R = np.zeros_like(wavelength_um)
|
||||
T = np.zeros_like(wavelength_um)
|
||||
|
||||
for idx, wl in enumerate(wavelength_um):
|
||||
M = np.identity(2, dtype=complex)
|
||||
for nk, d in zip(layer_nk, thickness_um):
|
||||
n_layer = nk[idx]
|
||||
delta = beta[idx] * n_layer * d * 1e-6
|
||||
cos = np.cos(delta)
|
||||
sin = 1j * np.sin(delta)
|
||||
q = n_layer
|
||||
Mj = np.array([[cos, sin / q], [q * sin, cos]], dtype=complex)
|
||||
M = M @ Mj
|
||||
|
||||
numerator = (
|
||||
q0 * M[0, 0]
|
||||
+ q0 * qs[idx] * M[0, 1]
|
||||
- M[1, 0]
|
||||
- qs[idx] * M[1, 1]
|
||||
)
|
||||
denominator = (
|
||||
q0 * M[0, 0]
|
||||
+ q0 * qs[idx] * M[0, 1]
|
||||
+ M[1, 0]
|
||||
+ qs[idx] * M[1, 1]
|
||||
)
|
||||
r = numerator / denominator
|
||||
t = 2 * q0 / denominator
|
||||
R[idx] = np.abs(r) ** 2
|
||||
T[idx] = np.real(qs[idx] / q0) * np.abs(t) ** 2
|
||||
|
||||
A = np.clip(1 - R - T, 0, 1)
|
||||
return R, T, A
|
||||
|
||||
|
||||
def evaluate_stack(design: Dict) -> Dict:
|
||||
solar_wl = np.linspace(0.35, 2.5, 120)
|
||||
ir_wl = np.linspace(8, 13, 200)
|
||||
solar_w = solar_weight(solar_wl)
|
||||
ir_w = planck_weight(ir_wl)
|
||||
|
||||
substrate = ag_index
|
||||
|
||||
layer_funcs = []
|
||||
thickness = []
|
||||
for layer in design["layers"]:
|
||||
material = layer["material"]
|
||||
thickness.append(layer["thickness"])
|
||||
if material == "PDMS":
|
||||
layer_funcs.append(pdms_index)
|
||||
else:
|
||||
mat = MATERIAL_LIBRARY[material]
|
||||
layer_funcs.append(lambda wl, m=mat: m.nk(wl))
|
||||
|
||||
solar_nk = [func(solar_wl) for func in layer_funcs]
|
||||
ir_nk = [func(ir_wl) for func in layer_funcs]
|
||||
|
||||
solar_R, _, solar_A = transfer_matrix_stack(
|
||||
solar_wl, solar_nk, thickness, substrate(solar_wl)
|
||||
)
|
||||
ir_R, _, ir_A = transfer_matrix_stack(ir_wl, ir_nk, thickness, substrate(ir_wl))
|
||||
|
||||
alpha = float(np.trapz(solar_A * solar_w, solar_wl) / np.trapz(solar_w, solar_wl))
|
||||
epsilon = float(np.trapz(ir_A * ir_w, ir_wl) / np.trapz(ir_w, ir_wl))
|
||||
|
||||
score = epsilon - 0.3 * alpha
|
||||
return {"alpha": alpha, "epsilon": epsilon, "score": score}
|
||||
|
||||
|
||||
MATERIAL_LIBRARY: Dict[str, Material] = {
|
||||
"SiO2": Material("SiO2", 1.45, 1e-4),
|
||||
"Al2O3": Material("Al2O3", 1.76, 1.5e-3),
|
||||
"TiO2": Material("TiO2", 2.40, 5e-3),
|
||||
"Si3N4": Material("Si3N4", 2.05, 2e-3),
|
||||
"HfO2": Material("HfO2", 1.9, 2e-3),
|
||||
}
|
||||
|
||||
|
||||
def random_design() -> Dict:
|
||||
num_layers = random.choice([2, 3])
|
||||
middle_materials = random.sample(list(MATERIAL_LIBRARY.keys()), num_layers)
|
||||
layers = [{"material": "PDMS", "thickness": random.uniform(10, 50)}]
|
||||
for mat in middle_materials:
|
||||
layers.append(
|
||||
{
|
||||
"material": mat,
|
||||
"thickness": random.uniform(0.05, 2.0),
|
||||
}
|
||||
)
|
||||
return {"layers": layers}
|
||||
|
||||
|
||||
def optimize(iterations: int = 800) -> List[Dict]:
|
||||
best_designs: List[Dict] = []
|
||||
for _ in range(iterations):
|
||||
design = random_design()
|
||||
metrics = evaluate_stack(design)
|
||||
design.update(metrics)
|
||||
best_designs.append(design)
|
||||
|
||||
best_designs.sort(key=lambda x: x["score"], reverse=True)
|
||||
return best_designs[:15]
|
||||
|
||||
|
||||
def write_summary(designs: List[Dict], path: str) -> None:
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
f.write("rank,score,epsilon,alpha,layers\n")
|
||||
for idx, design in enumerate(designs, start=1):
|
||||
layer_desc = ";".join(
|
||||
f"{layer['material']}@{layer['thickness']:.3f}um"
|
||||
for layer in design["layers"]
|
||||
)
|
||||
f.write(
|
||||
f"{idx},{design['score']:.4f},{design['epsilon']:.4f},"
|
||||
f"{design['alpha']:.4f},{layer_desc}\n"
|
||||
)
|
||||
|
||||
|
||||
def plot_pareto(designs: List[Dict], path: str) -> None:
|
||||
eps = [d["epsilon"] for d in designs]
|
||||
alpha = [d["alpha"] for d in designs]
|
||||
scores = [d["score"] for d in designs]
|
||||
fig, ax = plt.subplots(figsize=(6, 5))
|
||||
scatter = ax.scatter(alpha, eps, c=scores, cmap="viridis", s=80)
|
||||
ax.set_xlabel("Solar-weighted Absorption α")
|
||||
ax.set_ylabel("8-13 µm Emissivity ε")
|
||||
ax.set_title("Multilayer Design Performance Distribution")
|
||||
plt.colorbar(scatter, label="Composite Score ε - 0.3α")
|
||||
for idx, design in enumerate(designs[:5]):
|
||||
ax.annotate(str(idx + 1), (design["alpha"], design["epsilon"]))
|
||||
fig.tight_layout()
|
||||
plt.savefig(path, dpi=300)
|
||||
plt.close(fig)
|
||||
|
||||
|
||||
def main():
|
||||
designs = optimize()
|
||||
outdir = os.path.join(os.path.dirname(__file__), "outputs")
|
||||
os.makedirs(outdir, exist_ok=True)
|
||||
summary_path = os.path.join(outdir, "question3_multilayer_summary.csv")
|
||||
write_summary(designs, summary_path)
|
||||
plot_path = os.path.join(outdir, "question3_pareto.png")
|
||||
plot_pareto(designs, plot_path)
|
||||
|
||||
print(f"Optimal designs written to: {summary_path}")
|
||||
print(f"Performance scatter plot: {plot_path}")
|
||||
top = designs[0]
|
||||
layer_desc = "; ".join(
|
||||
f"{layer['material']}@{layer['thickness']:.2f}um" for layer in top["layers"]
|
||||
)
|
||||
print(
|
||||
"Best design: score={:.3f}, ε={:.3f}, α={:.3f}, layers={}".format(
|
||||
top["score"], top["epsilon"], top["alpha"], layer_desc
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user