This commit is contained in:
2025-11-20 20:27:39 +08:00
parent a99694786a
commit c8609ca320
6 changed files with 3779 additions and 1233 deletions

View File

@@ -2,7 +2,21 @@ import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import CubicSpline
def make_strictly_increasing(wl, n, k):
# 去除完全相同的点
unique_wl, indices = np.unique(wl, return_index=True)
if len(unique_wl) != len(wl):
print(f"Removed {len(wl) - len(unique_wl)} duplicate wavelength points")
wl, n, k = wl[indices], n[indices], k[indices]
# 确保严格递增
is_increasing = np.diff(wl) > 0
if not all(is_increasing):
# 移除不满足严格递增的点
valid_indices = np.concatenate([[True], is_increasing])
wl, n, k = wl[valid_indices], n[valid_indices], k[valid_indices]
return wl, n, k
# -----------------------------
# 1. 从data.txt读取分块格式数据先wl+n再wl+k
# -----------------------------
@@ -62,6 +76,8 @@ def read_split_data(file_path):
# 读取数据
wl_all, n_all, k_all = read_split_data('/Users/spasolreisa/IdeaProjects/asiaMath/data.txt')
wl_all, n_all, k_all = make_strictly_increasing(wl_all, n_all, k_all)
#
# 三次样条插值(覆盖全波段,保证计算精度)
cs_n = CubicSpline(wl_all, n_all) # 折射率n的插值函数
cs_k = CubicSpline(wl_all, k_all) # 消光系数k的插值函数
@@ -163,7 +179,7 @@ plt.xlim(wl_min, wl_max)
# 保存图片(高分辨率)
plt.tight_layout()
plt.savefig('PDMS_emissivity_spectrum.png', dpi=300, bbox_inches='tight')
# plt.savefig('PDMS_emissivity_spectrum.png', dpi=300, bbox_inches='tight')
plt.show()
# -----------------------------

347
org/chatgpt2/q1_3.py Normal file
View File

@@ -0,0 +1,347 @@
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import CubicSpline
from scipy.integrate import simpson
# -----------------------------
# 1. 材料级PDMS光学性能计算你的模型核心逻辑修复变量定义错误
# -----------------------------
def make_strictly_increasing(wl, n, k):
# 去除重复点并确保波长严格递增
unique_wl, indices = np.unique(wl, return_index=True)
if len(unique_wl) != len(wl):
print(f"Removed {len(wl) - len(unique_wl)} duplicate wavelength points")
wl, n, k = wl[indices], n[indices], k[indices]
# 确保严格递增
is_increasing = np.diff(wl) > 0
if not all(is_increasing):
valid_indices = np.concatenate([[True], is_increasing])
wl, n, k = wl[valid_indices], n[valid_indices], k[valid_indices]
return wl, n, k
def read_split_data(file_path):
with open(file_path, 'r', encoding='utf-8') as f:
lines = [line.strip() for line in f if line.strip() and not line.startswith('#')]
split_idx = None
for i, line in enumerate(lines):
if line == 'wl k':
split_idx = i
break
if split_idx is None:
raise ValueError("未找到'wl k'表头,请检查数据格式!")
n_lines = lines[1:split_idx]
wl_n, n_list = [], []
for line in n_lines:
parts = line.split()
if len(parts) != 2:
continue # 跳过格式错误的行
wl, n_val = parts
wl_n.append(float(wl)), n_list.append(float(n_val))
k_lines = lines[split_idx + 1:]
wl_k, k_list = [], []
for line in k_lines:
parts = line.split()
if len(parts) != 2:
continue # 跳过格式错误的行
wl, k_val = parts
wl_k.append(float(wl)), k_list.append(float(k_val))
# 转换为numpy数组
wl_n, n_list = np.array(wl_n), np.array(n_list)
wl_k, k_list = np.array(wl_k), np.array(k_list)
# 确保n和k的波长完全一致
assert np.allclose(wl_n, wl_k), "n和k的波长列表不一致"
# 排序
sorted_idx = np.argsort(wl_n)
return wl_n[sorted_idx], n_list[sorted_idx], k_list[sorted_idx]
def fresnel_reflectance(n1, k1, n2, k2):
m1, m2 = n1 + 1j * k1, n2 + 1j * k2
return np.abs((m1 - m2) / (m1 + m2)) ** 2
def thin_film_optical_properties(n_film, k_film, d, wl):
"""修复denominator未定义的错误完整计算R_total和T_total"""
R12 = fresnel_reflectance(1.0, 0.0, n_film, k_film) # 空气→薄膜
R23 = fresnel_reflectance(n_film, k_film, 1.0, 0.0) # 薄膜→空气
delta = 2 * np.pi * n_film * d / wl # 干涉相位差
alpha_abs = 4 * np.pi * k_film * d / wl # 吸收衰减系数
# 计算分母关键修复补充denominator的定义
denominator = 1 + R12 * R23 * np.exp(-alpha_abs) + 2 * np.sqrt(R12 * R23 * np.exp(-alpha_abs)) * np.cos(2 * delta)
# 总反射率
numerator_R = R12 + R23 * np.exp(-alpha_abs) + 2 * np.sqrt(R12 * R23 * np.exp(-alpha_abs)) * np.cos(2 * delta)
R_total = numerator_R / denominator
# 总透射率(修复后)
T_total = (1 - R12) * (1 - R23) * np.exp(-alpha_abs) / denominator
# 吸收率=发射率(热平衡下)
alpha_total = 1 - R_total - T_total
return alpha_total, R_total, T_total # alpha_total即ε发射率
# -----------------------------
# 2. 数据读取与预处理
# -----------------------------
# 替换为你的data.txt实际路径确保正确
DATA_PATH = '/Users/spasolreisa/IdeaProjects/asiaMath/data.txt'
wl_all, n_all, k_all = read_split_data(DATA_PATH)
wl_all, n_all, k_all = make_strictly_increasing(wl_all, n_all, k_all)
print(f"数据读取成功:波长范围 {wl_all.min():.2f}{wl_all.max():.2f} μm{len(wl_all)}个数据点")
# 三次样条插值
cs_n = CubicSpline(wl_all, n_all)
cs_k = CubicSpline(wl_all, k_all)
# 定义PDMS厚度和计算波长范围
thicknesses = [0.5, 1.0, 1.5, 2.0]
wl_fine = np.linspace(wl_all.min(), wl_all.max(), 500) # 细化解析度
# -----------------------------
# 3. 预计算材料级关键参数(ε和α)
# -----------------------------
# 存储平均发射率8-13μm黑体辐射加权和平均太阳吸收率0.3-2.5μm太阳光谱加权
avg_eps_dict = {} # 平均发射率 ε_avg
avg_alpha_dict = {} # 平均太阳吸收率 α_avg
# 定义权重光谱(辐射冷却+太阳吸收关键波段)
def planck_spectrum(wl, T):
"""普朗克黑体光谱8-13μm波段权重"""
wl_m = wl * 1e-6 # 转换为米
c1 = 3.7418e8 # 第一辐射常数 (W·μm⁴/m²)
c2 = 14388 # 第二辐射常数 (μm·K)
return c1 / (wl_m ** 5 * (np.exp(c2 / (wl * T)) - 1))
def solar_spectrum_am15(wl):
"""AM1.5太阳光谱0.3-2.5μm波段权重"""
spectrum = np.zeros_like(wl)
mask = (wl >= 0.3) & (wl <= 2.5)
wl_masked = wl[mask]
# 经验拟合AM1.5标准光谱
spectrum[mask] = np.where(
wl_masked < 0.5, 800 + 400 * wl_masked,
np.where(wl_masked < 1.0, 1000 - 200 * (wl_masked - 0.5),
np.where(wl_masked < 1.5, 900 - 100 * (wl_masked - 1.0),
750 - 200 * (wl_masked - 1.5)))
)
return spectrum
# 计算各厚度的平均ε和α
for d in thicknesses:
print(f"\n正在计算厚度 {d} μm 的光学性能...")
# -----------------------------
# 计算平均发射率 ε_avg8-13μm黑体辐射加权
# -----------------------------
if wl_all.min() <= 13 and wl_all.max() >= 8:
wl_rad = np.linspace(8, 13, 300) # 辐射冷却核心波段
n_rad = cs_n(wl_rad)
k_rad = cs_k(wl_rad)
eps_rad, _, _ = thin_film_optical_properties(n_rad, k_rad, d, wl_rad)
planck_weight = planck_spectrum(wl_rad, T=298) # 25℃黑体光谱权重
# 加权平均
eps_avg = simpson(eps_rad * planck_weight, wl_rad) / simpson(planck_weight, wl_rad)
else:
print(f"警告数据未覆盖8-13μm波段使用全波段平均发射率替代")
n_film = cs_n(wl_fine)
k_film = cs_k(wl_fine)
eps_full, _, _ = thin_film_optical_properties(n_film, k_film, d, wl_fine)
eps_avg = np.mean(eps_full)
avg_eps_dict[d] = eps_avg
# -----------------------------
# 计算平均太阳吸收率 α_avg0.3-2.5μm太阳光谱加权
# -----------------------------
if wl_all.min() <= 2.5 and wl_all.max() >= 0.3:
wl_solar = np.linspace(0.3, 2.5, 300) # 太阳光谱波段
n_solar = cs_n(wl_solar)
k_solar = cs_k(wl_solar)
alpha_solar, _, _ = thin_film_optical_properties(n_solar, k_solar, d, wl_solar)
solar_weight = solar_spectrum_am15(wl_solar) # AM1.5太阳光谱权重
# 加权平均
alpha_avg = simpson(alpha_solar * solar_weight, wl_solar) / simpson(solar_weight, wl_solar)
else:
print(f"警告数据未覆盖0.3-2.5μm太阳波段使用PDMS典型值α=0.08")
alpha_avg = 0.08 # PDMS在太阳波段的典型吸收率低吸收
avg_alpha_dict[d] = alpha_avg
# -----------------------------
# 4. 系统级:净冷却功率计算(解答思路核心逻辑)
# -----------------------------
# 系统物理参数(可根据实际场景调整)
sigma = 5.67e-8 # 斯特藩-玻尔兹曼常数 (W/m²·K⁴)
G_sun_list = [500, 700, 900, 1100] # 不同太阳辐照强度(对应多云到晴天)
T_amb_list = np.linspace(293, 318, 6) # 环境温度20-45℃转换为开尔文
v_wind = 1.5 # 风速 (m/s)
h_conv = 5.6 + 3.1 * v_wind # 对流换热系数经验公式W/m²·K
def net_cooling_power(eps, alpha, T_s, T_amb, G_sun, h_conv, sigma):
"""净冷却功率公式P_net = 辐射散热 - 太阳吸收热 - 对流换热损失"""
# 辐射散热(向宇宙太空)
rad散热 = eps * sigma * T_s ** 4
# 太阳吸收热(从太阳光获取的热量)
solar吸热 = alpha * G_sun
# 对流换热损失(向环境散热/吸热)
conv损失 = h_conv * (T_s - T_amb)
# 环境辐射吸收(从环境获取的辐射热)
amb_rad吸热 = eps * sigma * T_amb ** 4
# 净冷却功率(正值表示主动冷却,负值表示吸热)
return rad散热 - solar吸热 - conv损失 - amb_rad吸热
def solve_surface_temperature(eps, alpha, T_amb, G_sun, h_conv, sigma):
"""迭代求解PDMS薄膜表面温度T_s净冷却功率=0时的热平衡温度"""
T_s_guess = T_amb - 5 # 初始猜测比环境低5℃
tol = 1e-3 # 收敛精度
max_iter = 100 # 最大迭代次数
for _ in range(max_iter):
P_net = net_cooling_power(eps, alpha, T_s_guess, T_amb, G_sun, h_conv, sigma)
# 数值微分求导(牛顿迭代法,确保收敛)
dP_dT = (net_cooling_power(eps, alpha, T_s_guess + 1e-4, T_amb, G_sun, h_conv, sigma) -
net_cooling_power(eps, alpha, T_s_guess - 1e-4, T_amb, G_sun, h_conv, sigma)) / (2e-4)
if abs(dP_dT) < 1e-6:
break # 避免除以零
# 更新猜测值
T_s_new = T_s_guess - P_net / dP_dT
# 限制温度范围(物理合理值)
T_s_new = max(250, min(T_amb + 5, T_s_new))
# 检查收敛
if abs(T_s_new - T_s_guess) < tol:
return T_s_new
T_s_guess = T_s_new
return T_s_guess # 若未收敛,返回最后一次猜测值
# -----------------------------
# 5. 全链条分析:材料性能→系统冷却性能
# -----------------------------
# 存储各厚度的系统级结果
system_results = {}
for d in thicknesses:
eps = avg_eps_dict[d]
alpha = avg_alpha_dict[d]
# 初始化结果矩阵(太阳辐照×环境温度)
P_net_matrix = np.zeros((len(G_sun_list), len(T_amb_list)))
T_s_matrix = np.zeros((len(G_sun_list), len(T_amb_list)))
# 遍历所有太阳辐照和环境温度组合
for i, G_sun in enumerate(G_sun_list):
for j, T_amb in enumerate(T_amb_list):
# 求解表面温度
T_s = solve_surface_temperature(eps, alpha, T_amb, G_sun, h_conv, sigma)
T_s_matrix[i, j] = T_s
# 计算净冷却功率
P_net = net_cooling_power(eps, alpha, T_s, T_amb, G_sun, h_conv, sigma)
P_net_matrix[i, j] = P_net
# 存储结果
system_results[d] = {
"eps_avg": eps,
"alpha_avg": alpha,
"P_net": P_net_matrix,
"T_s": T_s_matrix
}
# -----------------------------
# 6. 结果可视化(全链条分析图表)
# -----------------------------
plt.rcParams['font.sans-serif'] = ['Arial'] # 统一字体
plt.rcParams['axes.unicode_minus'] = False # 支持负号
# 图1材料级性能平均ε和α随厚度变化
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
thicknesses_arr = np.array(thicknesses)
# 平均发射率
ax1.bar(thicknesses_arr - 0.08, [system_results[d]["eps_avg"] for d in thicknesses],
width=0.15, label='Avg Emissivity (8-13μm)', color='darkred', alpha=0.8)
ax1.set_xlabel('PDMS Thickness (μm)', fontsize=12)
ax1.set_ylabel('Emissivity', fontsize=12)
ax1.set_title('Average Emissivity (Radiative Cooling Window)', fontsize=14, fontweight='bold')
ax1.grid(True, alpha=0.3)
ax1.set_ylim(0, 1.05)
# 平均太阳吸收率
ax2.bar(thicknesses_arr - 0.08, [system_results[d]["alpha_avg"] for d in thicknesses],
width=0.15, label='Avg Solar Absorptivity (0.3-2.5μm)', color='darkblue', alpha=0.8)
ax2.set_xlabel('PDMS Thickness (μm)', fontsize=12)
ax2.set_ylabel('Absorptivity', fontsize=12)
ax2.set_title('Average Solar Absorptivity', fontsize=14, fontweight='bold')
ax2.grid(True, alpha=0.3)
ax2.set_ylim(0, 0.2) # 限制范围,更清晰
plt.tight_layout()
plt.savefig('material_performance.png', dpi=300, bbox_inches='tight')
# 图2系统级性能净冷却功率随环境温度变化选最优厚度
# 最优厚度:ε/α比值最大(平衡高发射和低吸收)
optimal_d = max(thicknesses, key=lambda x: system_results[x]["eps_avg"] / (system_results[x]["alpha_avg"] + 0.01))
print(
f"\n最优厚度:{optimal_d} μmε={system_results[optimal_d]['eps_avg']:.4f}, α={system_results[optimal_d]['alpha_avg']:.4f}")
fig, ax = plt.subplots(figsize=(12, 6))
T_amb_c = T_amb_list - 273.15 # 转换为摄氏度
colors = ['red', 'orange', 'green', 'blue']
for i, G_sun in enumerate(G_sun_list):
P_net = system_results[optimal_d]["P_net"][i, :]
ax.plot(T_amb_c, P_net, marker='o', markersize=6, linewidth=2,
color=colors[i], label=f'Solar Irradiance = {G_sun} W/m²')
ax.set_xlabel('Ambient Temperature (°C)', fontsize=12)
ax.set_ylabel('Net Cooling Power (W/m²)', fontsize=12)
ax.set_title(f'Net Cooling Power vs Ambient Temperature (PDMS Thickness = {optimal_d} μm)',
fontsize=14, fontweight='bold')
ax.grid(True, alpha=0.3)
ax.legend(fontsize=11)
# 添加零线(区分冷却/吸热)
ax.axhline(y=0, color='black', linestyle='--', alpha=0.5, label='Zero Cooling Power')
plt.tight_layout()
plt.savefig('net_cooling_power.png', dpi=300, bbox_inches='tight')
# 图3表面温度随环境温度变化
fig, ax = plt.subplots(figsize=(12, 6))
for i, G_sun in enumerate(G_sun_list):
T_s = system_results[optimal_d]["T_s"][i, :] - 273.15 # 转换为摄氏度
ax.plot(T_amb_c, T_s, marker='s', markersize=6, linewidth=2,
color=colors[i], label=f'Solar Irradiance = {G_sun} W/m²')
ax.set_xlabel('Ambient Temperature (°C)', fontsize=12)
ax.set_ylabel('PDMS Surface Temperature (°C)', fontsize=12)
ax.set_title(f'Surface Temperature vs Ambient Temperature (PDMS Thickness = {optimal_d} μm)',
fontsize=14, fontweight='bold')
ax.grid(True, alpha=0.3)
ax.legend(fontsize=11)
# 添加环境温度参考线y=x
ax.plot(T_amb_c, T_amb_c, color='black', linestyle='--', alpha=0.5, label='Ambient Temperature')
plt.tight_layout()
plt.savefig('surface_temperature.png', dpi=300, bbox_inches='tight')
plt.show()
# -----------------------------
# 7. 关键结果输出(量化分析)
# -----------------------------
print("\n" + "=" * 60)
print("材料-系统全链条关键结果")
print("=" * 60)
for d in thicknesses:
print(f"\n厚度 {d} μm:")
print(f" - 平均发射率8-13μm: {system_results[d]['eps_avg']:.4f}")
print(f" - 平均太阳吸收率0.3-2.5μm: {system_results[d]['alpha_avg']:.4f}")
print(f" - 最优工况净冷却功率T_amb=30℃, G_sun=900 W/m²: {system_results[d]['P_net'][2, 2]:.2f} W/m²")
print(f" - 对应表面温度: {system_results[d]['T_s'][2, 2] - 273.15:.2f}")
print("\n" + "=" * 60)
print("结论PDMS薄膜的最优厚度为 {} μm在典型工况下30℃环境、900 W/m²太阳辐照".format(optimal_d))
print("可实现 {:.2f} W/m² 的净冷却功率,表面温度比环境低 {:.2f}".format(
system_results[optimal_d]['P_net'][2, 2],
30 - (system_results[optimal_d]['T_s'][2, 2] - 273.15)
))
print("=" * 60)

329
org/chatgpt2/q1_anaylis.py Normal file
View File

@@ -0,0 +1,329 @@
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import CubicSpline
from scipy.stats import pearsonr
from sklearn.metrics import mean_squared_error
import warnings
from sklearn.metrics.pairwise import cosine_similarity
from org.chatgpt2.q1_2 import wl_max, wl_min
warnings.filterwarnings('ignore')
# -----------------------------
# 1. 通用工具函数(保持不变)
# -----------------------------
def make_strictly_increasing(wl, n, k):
unique_wl, indices = np.unique(wl, return_index=True)
if len(unique_wl) != len(wl):
print(f"Removed {len(wl) - len(unique_wl)} duplicate wavelength points")
wl, n, k = wl[indices], n[indices], k[indices]
is_increasing = np.diff(wl) > 0
if not all(is_increasing):
valid_indices = np.concatenate([[True], is_increasing])
wl, n, k = wl[valid_indices], n[valid_indices], k[valid_indices]
return wl, n, k
def read_split_data(file_path):
with open(file_path, 'r', encoding='utf-8') as f:
lines = [line.strip() for line in f if line.strip() and not line.startswith('#')]
split_idx = None
for i, line in enumerate(lines):
if line == 'wl k':
split_idx = i
break
n_lines = lines[1:split_idx]
wl_n, n_list = [], []
for line in n_lines:
wl, n_val = line.split()
wl_n.append(float(wl)), n_list.append(float(n_val))
k_lines = lines[split_idx + 1:]
wl_k, k_list = [], []
for line in k_lines:
wl, k_val = line.split()
wl_k.append(float(wl)), k_list.append(float(k_val))
wl_n, n_list = np.array(wl_n), np.array(n_list)
wl_k, k_list = np.array(wl_k), np.array(k_list)
assert np.allclose(wl_n, wl_k), "n和k的波长列表不一致"
sorted_idx = np.argsort(wl_n)
return wl_n[sorted_idx], n_list[sorted_idx], k_list[sorted_idx]
def fresnel_reflectance(n1, k1, n2, k2):
m1 = n1 + 1j * k1
m2 = n2 + 1j * k2
return np.abs((m1 - m2) / (m1 + m2)) ** 2
def thin_film_emissivity(n_film, k_film, d, wl, n_air=1.0, k_air=0.0, n_sub=1.5, k_sub=0.0, r=0.0):
m_air = n_air + 1j * k_air
m_film = n_film + 1j * k_film
m_sub = n_sub + 1j * k_sub
R12 = np.abs((m_air - m_film) / (m_air + m_film)) ** 2
R23 = np.abs((m_film - m_sub) / (m_film + m_sub)) ** 2
beta = 2 * np.pi * m_film / wl
delta_complex = beta * d
alpha = 2 * np.imag(delta_complex)
sqrt_term = np.sqrt(R12 * R23 * np.exp(-alpha))
cos_term = np.cos(2 * np.real(delta_complex))
R_specular = (R12 + R23 * np.exp(-alpha) + 2 * sqrt_term * cos_term) / (
1 + R12 * R23 * np.exp(-alpha) + 2 * sqrt_term * cos_term)
R_diffuse = 0.05 * r
R_total = (1 - r) * R_specular + r * R_diffuse
T_total = (1 - R12) * (1 - R23) * np.exp(-alpha) / (1 + R12 * R23 * np.exp(-alpha) + 2 * sqrt_term * cos_term)
return np.clip(1 - R_total - T_total, 0, 1)
# -----------------------------
# 2. 新增:局部相似度计算函数(滑动窗口法)
# -----------------------------
def calculate_local_similarity(eps1, eps2, wl_common, window_size=0.5):
"""
计算逐波长的局部相似度(滑动窗口法)
输入:
eps1/eps2: 两个发射率数组
wl_common: 统一波长数组
window_size: 滑动窗口宽度μm默认0.5μm覆盖5个采样点保证平滑
输出:
local_corr: 逐波长皮尔逊相关系数(局部相似度)
local_cos_sim: 逐波长余弦相似度
local_mae: 逐波长局部平均绝对误差(相似度的互补指标)
"""
n_points = len(wl_common)
local_corr = np.zeros(n_points)
local_cos_sim = np.zeros(n_points)
local_mae = np.zeros(n_points)
# 滑动窗口计算局部相似度(窗口中心为每个波长点)
for i in range(n_points):
# 确定当前窗口的波长范围
wl_center = wl_common[i]
window_mask = (wl_common >= wl_center - window_size / 2) & (wl_common <= wl_center + window_size / 2)
if np.sum(window_mask) < 3: # 窗口内至少3个点才计算避免统计无意义
local_corr[i] = np.nan
local_cos_sim[i] = np.nan
local_mae[i] = np.nan
continue
# 提取窗口内的发射率数据
eps1_window = eps1[window_mask]
eps2_window = eps2[window_mask]
# 计算窗口内的相似度指标
corr, _ = pearsonr(eps1_window, eps2_window)
cos_sim = cosine_similarity(eps1_window.reshape(1, -1), eps2_window.reshape(1, -1))[0][0]
mae = np.mean(np.abs(eps1_window - eps2_window))
# 存储结果相关系数和余弦相似度归一化到0-1范围
local_corr[i] = (corr + 1) / 2 # 原始相关系数-1~1 → 映射为0~1
local_cos_sim[i] = cos_sim
local_mae[i] = mae
return local_corr, local_cos_sim, local_mae
# -----------------------------
# 3. 核心相似性分析函数(新增局部相似度计算)
# -----------------------------
def analyze_spectral_similarity(file1, file2, thicknesses=[1.0], n_sub=1.5, k_sub=0.0, r=0.0, window_size=0.5):
# Step 1: 数据读取与预处理
wl1, n1, k1 = read_split_data(file1)
wl2, n2, k2 = read_split_data(file2)
wl1, n1, k1 = make_strictly_increasing(wl1, n1, k1)
wl2, n2, k2 = make_strictly_increasing(wl2, n2, k2)
# Step 2: 统一波长范围
wl_min = max(wl1.min(), wl2.min())
wl_max = min(wl1.max(), wl2.max())
if wl_min >= wl_max:
raise ValueError("两个文件的波长范围无交集,无法进行相似性分析!")
wl_common = np.linspace(wl_min, wl_max, 1000)
# Step 3: 插值得到统一波长下的n和k
cs_n1 = CubicSpline(wl1, n1)
cs_k1 = CubicSpline(wl1, k1)
cs_n2 = CubicSpline(wl2, n2)
cs_k2 = CubicSpline(wl2, k2)
n1_common = cs_n1(wl_common)
k1_common = cs_k1(wl_common)
n2_common = cs_n2(wl_common)
k2_common = cs_k2(wl_common)
# Step 4: 计算发射率和局部相似度
emissivity_dict = {}
local_similarity_dict = {}
for d in thicknesses:
eps1 = thin_film_emissivity(n1_common, k1_common, d, wl_common, n_sub=n_sub, k_sub=k_sub, r=r)
eps2 = thin_film_emissivity(n2_common, k2_common, d, wl_common, n_sub=n_sub, k_sub=k_sub, r=r)
emissivity_dict[d] = (eps1, eps2)
# 计算局部相似度
local_corr, local_cos_sim, local_mae = calculate_local_similarity(eps1, eps2, wl_common, window_size)
local_similarity_dict[d] = (local_corr, local_cos_sim, local_mae)
# Step 5: 全局相似性指标计算
similarity_results = {}
for d in thicknesses:
eps1, eps2 = emissivity_dict[d]
pearson_corr, _ = pearsonr(eps1, eps2)
cos_sim = cosine_similarity(eps1.reshape(1, -1), eps2.reshape(1, -1))[0][0]
mse = mean_squared_error(eps1, eps2)
norm_mse = mse / (np.max([np.var(eps1), np.var(eps2)]) + 1e-8)
mae = np.mean(np.abs(eps1 - eps2))
# 大气窗口指标
window_corr, window_mae = None, None
if wl_min <= 13 and wl_max >= 8:
window_mask = (wl_common >= 8) & (wl_common <= 13)
eps1_window = eps1[window_mask]
eps2_window = eps2[window_mask]
window_corr, _ = pearsonr(eps1_window, eps2_window)
window_mae = np.mean(np.abs(eps1_window - eps2_window))
similarity_results[d] = {
"pearson_correlation": pearson_corr,
"cosine_similarity": cos_sim,
"normalized_mse": norm_mse,
"mae": mae,
"window_pearson_correlation": window_corr,
"window_mae": window_mae
}
# Step 6: 可视化(新增相似度曲线)
plot_spectral_comparison(wl_common, emissivity_dict, local_similarity_dict, thicknesses, wl_min, wl_max)
return similarity_results, wl_common, emissivity_dict, local_similarity_dict
# -----------------------------
# 4. 可视化函数(新增相似度曲线子图)
# -----------------------------
def plot_spectral_comparison(wl_common, emissivity_dict, local_similarity_dict, thicknesses, wl_min, wl_max):
n_plots = len(thicknesses)
fig, axes = plt.subplots(n_plots, 3, figsize=(18, 5 * n_plots)) # 新增1列用于相似度曲线
plt.rcParams['font.sans-serif'] = ['Arial']
for idx, d in enumerate(thicknesses):
eps1, eps2 = emissivity_dict[d]
local_corr, local_cos_sim, local_mae = local_similarity_dict[d]
diff = np.abs(eps1 - eps2)
# 子图1发射率光谱对比
ax1 = axes[idx, 0] if n_plots > 1 else axes[0]
ax1.plot(wl_common, eps1, linewidth=2, label='data.txt', color='darkblue')
ax1.plot(wl_common, eps2, linewidth=2, label='data2.txt', color='darkred', linestyle='--')
if wl_min <= 13 and wl_max >= 8:
ax1.axvspan(8, 13, alpha=0.15, color='orange', label='Atmospheric Window (8-13 μm)')
ax1.set_xlabel('Wavelength (μm)', fontsize=12)
ax1.set_ylabel('Emissivity ε(λ)', fontsize=12)
ax1.set_title(f'Emissivity Spectrum Comparison (Thickness = {d} μm)', fontsize=14, fontweight='bold')
ax1.grid(True, alpha=0.3)
ax1.legend(fontsize=10)
ax1.set_ylim(0, 1.05)
# 子图2发射率差异
ax2 = axes[idx, 1] if n_plots > 1 else axes[1]
ax2.plot(wl_common, diff, linewidth=2, color='darkgreen', label='Absolute Difference |ε1 - ε2|')
ax2.fill_between(wl_common, 0, diff, alpha=0.3, color='darkgreen')
if wl_min <= 13 and wl_max >= 8:
ax2.axvspan(8, 13, alpha=0.15, color='orange', label='Atmospheric Window (8-13 μm)')
ax2.set_xlabel('Wavelength (μm)', fontsize=12)
ax2.set_ylabel('Absolute Difference', fontsize=12)
ax2.set_title(f'Emissivity Difference (Thickness = {d} μm)', fontsize=14, fontweight='bold')
ax2.grid(True, alpha=0.3)
ax2.legend(fontsize=10)
ax2.set_ylim(0, np.nanmax(diff) * 1.2)
# 子图3相似度曲线核心新增
ax3 = axes[idx, 2] if n_plots > 1 else axes[2]
# 绘制局部皮尔逊相关系数归一化到0-1
ax3.plot(wl_common, local_corr, linewidth=2.5, color='#4B0082', label='Local Pearson Correlation (0-1)') # 绘制局部余弦相似度0-1
ax3.plot(wl_common, local_cos_sim, linewidth=2.5, color='orange', label='Local Cosine Similarity (0-1)',
linestyle='--')
# 标注相似度阈值线0.9为高相似0.8为中等相似)
ax3.axhline(y=0.9, color='red', linestyle=':', linewidth=1.5, label='High Similarity Threshold (0.9)')
ax3.axhline(y=0.8, color='orange', linestyle=':', linewidth=1.5, label='Medium Similarity Threshold (0.8)')
# 标注大气窗口
if wl_min <= 13 and wl_max >= 8:
ax3.axvspan(8, 13, alpha=0.15, color='orange', label='Atmospheric Window (8-13 μm)')
ax3.set_xlabel('Wavelength (μm)', fontsize=12)
ax3.set_ylabel('Local Similarity (0-1)', fontsize=12)
ax3.set_title(f'Wavelength-Dependent Similarity Curve (Thickness = {d} μm)', fontsize=14, fontweight='bold')
ax3.grid(True, alpha=0.3)
ax3.legend(fontsize=10)
ax3.set_ylim(0, 1.05) # 相似度范围0-1
ax3.set_xlim(wl_min, wl_max)
plt.tight_layout()
plt.savefig('spectral_similarity_complete.png', dpi=300, bbox_inches='tight')
plt.show()
# -----------------------------
# 5. 主程序执行
# -----------------------------
if __name__ == "__main__":
file1 = '/Users/spasolreisa/IdeaProjects/asiaMath/data.txt'
file2 = '/Users/spasolreisa/IdeaProjects/asiaMath/data2.txt'
target_thicknesses = [0.5, 1.0, 1.5, 2.0]
base_params = {
'n_sub': 1.5,
'k_sub': 0.0,
'r': 0.05
}
window_size = 0.5 # 滑动窗口宽度可调整建议0.3-0.8μm
print("=== 开始光谱相似性分析(含相似度曲线) ===")
print(f"文件1: {file1}")
print(f"文件2: {file2}")
print(f"分析厚度: {target_thicknesses} μm")
print(f"滑动窗口宽度: {window_size} μm")
print("-" * 50)
results, wl_common, emissivity_dict, local_similarity_dict = analyze_spectral_similarity(
file1, file2,
thicknesses=target_thicknesses,
n_sub=base_params['n_sub'],
k_sub=base_params['k_sub'],
r=base_params['r'],
window_size=window_size
)
# 输出全局指标
print("\n=== 全局相似性指标 ===")
for d in target_thicknesses:
res = results[d]
print(f"\n【厚度 {d} μm】")
print(f"全局皮尔逊相关系数: {res['pearson_correlation']:.4f}")
print(f"全局余弦相似度: {res['cosine_similarity']:.4f}")
print(f"归一化均方误差: {res['normalized_mse']:.4f}")
print(f"平均绝对误差: {res['mae']:.4f}")
if res['window_pearson_correlation'] is not None:
print(f"大气窗口全局相关系数: {res['window_pearson_correlation']:.4f}")
# 输出局部相似度统计(大气窗口内)
print("\n=== 大气窗口8-13μm局部相似度统计 ===")
for d in target_thicknesses:
local_corr, local_cos_sim, _ = local_similarity_dict[d]
if wl_min <= 13 and wl_max >= 8:
window_mask = (wl_common >= 8) & (wl_common <= 13)
window_local_corr = local_corr[window_mask]
window_local_cos = local_cos_sim[window_mask]
# 过滤NaN值
window_local_corr = window_local_corr[~np.isnan(window_local_corr)]
window_local_cos = window_local_cos[~np.isnan(window_local_cos)]
if len(window_local_corr) > 0:
print(f"\n【厚度 {d} μm】")
print(f"大气窗口局部相关系数均值: {np.mean(window_local_corr):.4f}")
print(f"大气窗口局部相关系数最小值: {np.min(window_local_corr):.4f}")
print(f"大气窗口局部余弦相似度均值: {np.mean(window_local_cos):.4f}")
# 结果解读
print("\n=== 结果解读 ===")
print("1. 相似度曲线子图3值越接近1对应波长下的发射率越相似")
print("2. 高相似区域≥0.9):两文件在该波段的辐射冷却性能几乎一致;")
print("3. 低相似区域(<0.8):需关注该波段的材料差异对冷却效果的影响;")
print("4. 大气窗口内相似度:优先关注该区域,直接决定辐射冷却核心性能是否一致。")

321
org/chatgpt2/q2_2.py Normal file
View File

@@ -0,0 +1,321 @@
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import CubicSpline
from scipy.integrate import simpson
import os
# -----------------------------
# Configuration (Update File Path!)
# -----------------------------
DATA_FILE_PATH = "/Users/spasolreisa/IdeaProjects/asiaMath/data.txt" # Replace with your data.txt absolute path
THICKNESSES = [0.5, 1.0, 1.5, 2.0, 2.5, 3.0] # Expand thickness range for evaluation
T_AMBIENT = 300 # Ambient temperature (K)
SOLAR_IRRADIANCE = 1000 # AM1.5 solar irradiance (W/m²)
CONVECTION_COEFF = 10 # Convection coefficient (W/(m²K))
SIGMA = 5.67e-8 # Stefan-Boltzmann constant (W/(m²K⁴))
# -----------------------------
# 1. Fixed Data Parsing Function (Critical Fix for "wl" String Error)
# -----------------------------
def read_split_data(file_path):
"""Read and parse split-format data (wl+n followed by wl+k)"""
if not os.path.exists(file_path):
raise FileNotFoundError(f"File not found: {file_path}")
# Read all lines, skip empty lines and comments
with open(file_path, 'r', encoding='utf-8') as f:
lines = []
for line in f:
stripped = line.strip()
if stripped and not stripped.startswith('#'):
lines.append(stripped)
# Step 1: Identify all headers (lines containing "wl" and either "n" or "k")
header_indices = []
for i, line in enumerate(lines):
parts = line.split()
# Header must be exactly 2 parts: ["wl", "n"] or ["wl", "k"] (case-insensitive)
if len(parts) == 2 and parts[0].lower() == "wl" and parts[1].lower() in ["n", "k"]:
header_indices.append(i)
# Validate: Must have exactly 2 headers (one for n, one for k)
if len(header_indices) != 2:
raise ValueError(
f"Invalid number of headers! Expected 2 (wl+n and wl+k), found {len(header_indices)}.\nCheck data.txt format.")
# Step 2: Split data into n-block and k-block
n_header_idx = header_indices[0]
k_header_idx = header_indices[1]
# Ensure n-header comes before k-header
if n_header_idx > k_header_idx:
n_header_idx, k_header_idx = k_header_idx, n_header_idx
# Extract n data (between n-header and k-header)
n_lines = lines[n_header_idx + 1: k_header_idx]
# Extract k data (after k-header)
k_lines = lines[k_header_idx + 1:]
# Step 3: Parse n data (skip any invalid lines)
wl_n, n_list = [], []
for line in n_lines:
parts = line.split()
# Data line must have exactly 2 numeric parts
if len(parts) != 2:
continue # Skip lines with wrong column count
try:
wl_val = float(parts[0])
n_val = float(parts[1])
wl_n.append(wl_val)
n_list.append(n_val)
except ValueError:
continue # Skip non-numeric lines
# Step 4: Parse k data (skip any invalid lines)
wl_k, k_list = [], []
for line in k_lines:
parts = line.split()
if len(parts) != 2:
continue
try:
wl_val = float(parts[0])
k_val = float(parts[1])
wl_k.append(wl_val)
k_list.append(k_val)
except ValueError:
continue
# Validate: Must have at least 1 data point for n and k
if len(wl_n) == 0:
raise ValueError("No valid n data found! Check the format between wl+n and wl+k headers.")
if len(wl_k) == 0:
raise ValueError("No valid k data found! Check the format after wl+k header.")
# Convert to numpy arrays
wl_n, n_list = np.array(wl_n), np.array(n_list)
wl_k, k_list = np.array(wl_k), np.array(k_list)
# Align wavelengths (if n and k have different wavelength points)
if not np.allclose(wl_n, wl_k, rtol=1e-6):
print("Warning: Wavelengths for n and k do not match. Automatically aligning...")
# Use n's wavelengths as reference, interpolate k to match
k_list = np.interp(wl_n, np.sort(wl_k), k_list[np.argsort(wl_k)])
wl_k = wl_n # Sync k's wavelengths to n's
# Sort by wavelength (ascending) to avoid interpolation errors
sorted_idx = np.argsort(wl_n)
sorted_wl = wl_n[sorted_idx]
sorted_n = n_list[sorted_idx]
sorted_k = k_list[sorted_idx]
print(f"Data loaded successfully: {len(sorted_wl)} valid wavelength points")
print(f"Wavelength range: {sorted_wl.min():.2f}{sorted_wl.max():.2f} μm")
return sorted_wl, sorted_n, sorted_k
# -----------------------------
# 2. Core Functions (Unchanged)
# -----------------------------
def planck_function(wl, T):
"""Planck's law: Blackbody radiation (W/(m³sr))"""
wl_m = wl * 1e-6 # Convert μm to m
c1 = 3.7418e8 # First radiation constant (Wμm⁴/m²)
c2 = 14388 # Second radiation constant (μmK)
return c1 / (wl_m ** 5 * (np.exp(c2 / (wl * T)) - 1))
def solar_spectrum_am15(wl):
"""AM1.5 global solar irradiance (W/(m²μm))"""
spectrum = np.zeros_like(wl)
mask = (wl >= 0.3) & (wl <= 2.5)
wl_masked = wl[mask]
# Empirical fit to AM1.5 data (valid for 0.32.5 μm)
spectrum[mask] = np.where(
wl_masked < 0.5, 800 + 400 * wl_masked,
np.where(wl_masked < 1.0, 1000 - 200 * (wl_masked - 0.5),
np.where(wl_masked < 1.5, 900 - 100 * (wl_masked - 1.0),
750 - 200 * (wl_masked - 1.5)))
)
return spectrum
def fresnel_reflectance(n1, k1, n2, k2):
"""Fresnel reflectance (normal incidence, complex refractive index)"""
m1, m2 = n1 + 1j * k1, n2 + 1j * k2
return np.abs((m1 - m2) / (m1 + m2)) ** 2
def thin_film_optical_properties(n_film, k_film, d, wl):
"""Calculate emissivity (ε), absorptivity (α), transmissivity (T) of thin film"""
R12 = fresnel_reflectance(1.0, 0.0, n_film, k_film) # Air→Film
R23 = fresnel_reflectance(n_film, k_film, 1.0, 0.0) # Film→Air
delta = 2 * np.pi * n_film * d / wl # Phase difference
alpha_abs = 4 * np.pi * k_film * d / wl # Absorption attenuation
# Total reflectance and transmissivity (multiple-beam interference)
R_total = (R12 + R23 * np.exp(-alpha_abs) + 2 * np.sqrt(R12 * R23 * np.exp(-alpha_abs)) * np.cos(2 * delta)) / \
(1 + R12 * R23 * np.exp(-alpha_abs) + 2 * np.sqrt(R12 * R23 * np.exp(-alpha_abs)) * np.cos(2 * delta))
T_total = (1 - R12) * (1 - R23) * np.exp(-alpha_abs) / \
(1 + R12 * R23 * np.exp(-alpha_abs) + 2 * np.sqrt(R12 * R23 * np.exp(-alpha_abs)) * np.cos(2 * delta))
alpha_total = 1 - R_total - T_total # Kirchhoff's law (α=ε for thermal equilibrium)
return alpha_total, R_total, T_total # α=ε for emissivity
# -----------------------------
# 3. Evaluation Model (Unchanged)
# -----------------------------
def evaluate_radiative_cooling(wl_all, n_all, k_all, thickness):
"""Calculate KPIs and comprehensive score for a given PDMS thickness"""
cs_n = CubicSpline(wl_all, n_all)
cs_k = CubicSpline(wl_all, k_all)
# KPI 1: Average Emissivity in 813 μm (weighted by Planck function)
wl_window = np.linspace(8, 13, 500)
# Check if data covers the window (otherwise use nearest values)
if wl_all.min() > 8 or wl_all.max() < 13:
print(f"Warning: Data does not fully cover 813 μm window. Extrapolating...")
n_window = cs_n(wl_window, extrapolate=True)
k_window = cs_k(wl_window, extrapolate=True)
else:
n_window = cs_n(wl_window)
k_window = cs_k(wl_window)
eps_window, _, _ = thin_film_optical_properties(n_window, k_window, thickness, wl_window)
planck = planck_function(wl_window, T_AMBIENT)
eps_avg = simpson(eps_window * planck, wl_window) / simpson(planck, wl_window)
# KPI 2: Average Solar Absorptivity in 0.32.5 μm (weighted by AM1.5)
wl_solar = np.linspace(0.3, 2.5, 500)
if wl_all.min() > 2.5 or wl_all.max() < 0.3:
print(f"Warning: Data does not cover solar spectrum (0.32.5 μm). Using default PDMS properties...")
n_solar = np.ones_like(wl_solar) * 1.4 # Typical PDMS n in solar range
k_solar = np.ones_like(wl_solar) * 1e-6 # Typical PDMS k in solar range
else:
n_solar = cs_n(wl_solar, extrapolate=True)
k_solar = cs_k(wl_solar, extrapolate=True)
alpha_solar, _, _ = thin_film_optical_properties(n_solar, k_solar, thickness, wl_solar)
solar_irr = solar_spectrum_am15(wl_solar)
alpha_avg = simpson(alpha_solar * solar_irr, wl_solar) / simpson(solar_irr, wl_solar)
# KPI 3: Maximum Cooling Temperature (ΔT_max)
def heat_flux(T_film):
planck_film = planck_function(wl_window, T_film)
eps_eff = simpson(eps_window * planck_film, wl_window) / simpson(planck_film, wl_window)
return SIGMA * eps_eff * T_film ** 4 - alpha_avg * SOLAR_IRRADIANCE - CONVECTION_COEFF * (T_film - T_AMBIENT)
# Newton-Raphson iteration (stable convergence)
T_film = T_AMBIENT - 10 # Initial guess
for _ in range(50):
q = heat_flux(T_film)
if abs(q) < 1e-3:
break
# Numerical derivative (more stable than analytical)
dq_dT = (heat_flux(T_film + 1e-4) - heat_flux(T_film - 1e-4)) / (2e-4)
T_film -= q / dq_dT
# Prevent unrealistic temperatures
if T_film < 200 or T_film > T_AMBIENT:
T_film = max(200, min(T_AMBIENT - 5, T_film))
delta_T = T_AMBIENT - T_film
# KPI 4: Cooling Efficiency Ratio (η_CR)
eta_cr = eps_avg / (alpha_avg + 0.01) # +0.01 to avoid division by zero
# Comprehensive Score (0100)
score = 0.0
score += 40 * min(eps_avg, 1.0) # Cap at 1.0 (ideal emissivity)
score += 35 * (1 - min(alpha_avg, 1.0)) # Lower absorption = higher score
score += 15 * min(delta_T / 40, 1.0) # ΔT theoretical upper limit = 40K
score += 10 * min(eta_cr / 100, 1.0) # Cap at 100 (ideal ratio)
return {
"thickness": thickness,
"eps_8-13": eps_avg,
"alpha_0.3-2.5": alpha_avg,
"delta_T_max": delta_T,
"eta_cr": eta_cr,
"comprehensive_score": score
}
# -----------------------------
# 4. Main Execution (Unchanged)
# -----------------------------
if __name__ == "__main__":
try:
# Read data (fixed parsing logic)
wl_all, n_all, k_all = read_split_data(DATA_FILE_PATH)
print("\n" + "-" * 50 + "\n")
# Evaluate each thickness
results = []
for d in THICKNESSES:
res = evaluate_radiative_cooling(wl_all, n_all, k_all, d)
results.append(res)
print(f"Thickness: {d} μm")
print(f" - Avg Emissivity (813 μm): {res['eps_8-13']:.4f}")
print(f" - Avg Solar Absorptivity (0.32.5 μm): {res['alpha_0.3-2.5']:.4f}")
print(f" - Max Cooling Temperature: {res['delta_T_max']:.2f} K")
print(f" - Cooling Efficiency Ratio: {res['eta_cr']:.2f}")
print(f" - Comprehensive Score: {res['comprehensive_score']:.1f}/100\n")
# Convert results to numpy array for plotting
results_arr = np.array([[
res["thickness"], res["eps_8-13"], res["alpha_0.3-2.5"],
res["delta_T_max"], res["comprehensive_score"]
] for res in results])
# Plot KPIs vs Thickness
fig, axes = plt.subplots(2, 2, figsize=(14, 10))
fig.suptitle("PDMS Thin Film Radiative Cooling Performance vs Thickness", fontsize=16, fontweight='bold')
# Emissivity (813 μm)
axes[0, 0].plot(results_arr[:, 0], results_arr[:, 1], 'o-', color='darkred', linewidth=2, markersize=6)
axes[0, 0].set_xlabel("Thickness (μm)", fontsize=12), axes[0, 0].set_ylabel("Avg Emissivity (813 μm)",
fontsize=12)
axes[0, 0].grid(True, alpha=0.3), axes[0, 0].set_ylim(0, 1.05)
# Solar Absorptivity (0.32.5 μm)
axes[0, 1].plot(results_arr[:, 0], results_arr[:, 2], 's-', color='darkblue', linewidth=2, markersize=6)
axes[0, 1].set_xlabel("Thickness (μm)", fontsize=12), axes[0, 1].set_ylabel(
"Avg Solar Absorptivity (0.32.5 μm)", fontsize=12)
axes[0, 1].grid(True, alpha=0.3), axes[0, 1].set_ylim(0, 0.5)
# Max Cooling Temperature
axes[1, 0].plot(results_arr[:, 0], results_arr[:, 3], '^-', color='darkgreen', linewidth=2, markersize=6)
axes[1, 0].set_xlabel("Thickness (μm)", fontsize=12), axes[1, 0].set_ylabel("Max Cooling Temperature (K)",
fontsize=12)
axes[1, 0].grid(True, alpha=0.3)
# Comprehensive Score
axes[1, 1].plot(results_arr[:, 0], results_arr[:, 4], 'd-', color='darkorange', linewidth=2, markersize=6)
axes[1, 1].set_xlabel("Thickness (μm)", fontsize=12), axes[1, 1].set_ylabel("Comprehensive Score (0100)",
fontsize=12)
axes[1, 1].grid(True, alpha=0.3), axes[1, 1].set_ylim(0, 100)
plt.tight_layout()
plt.savefig("PDMS_radiative_cooling_evaluation.png", dpi=300, bbox_inches='tight')
plt.show()
# Highlight optimal thickness
optimal = max(results, key=lambda x: x["comprehensive_score"])
print("=" * 50)
print(f"Optimal PDMS Thickness: {optimal['thickness']} μm")
print(f"Best Comprehensive Score: {optimal['comprehensive_score']:.1f}/100")
print(
f"Key Performance: ε(8-13μm)={optimal['eps_8-13']:.4f}, α(0.3-2.5μm)={optimal['alpha_0.3-2.5']:.4f}, ΔT={optimal['delta_T_max']:.2f}K")
print("=" * 50)
except Exception as e:
print(f"\nError: {e}")
print("\nTroubleshooting Steps:")
print("1. Check data.txt format: Ensure it has exactly two headers (e.g., 'wl n' and 'wl k')")
print("2. Example valid format:")
print(" wl n")
print(" 0.40 1.41491")
print(" 0.41 1.41403")
print(" ...")
print(" wl k")
print(" 0.40 1.40E-06")
print(" 0.41 1.38E-06")
print("3. Ensure no extra 'wl' strings in data lines (only numbers)")
print("4. Use space or tab as separator (avoid commas)")