feat(validation): added more of the scripts to make paper figures

This commit is contained in:
2026-04-20 12:41:10 -04:00
parent 3a22792fd1
commit bbd702904a
38 changed files with 130679 additions and 2069 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,472 @@
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate
import pynucastro as pyna
import os
import sys
import importlib.util
import time
import matplotlib.lines as mlines
import re
import json
import argparse
from fourdst.composition import Composition
from gridfire.type import NetIn
from gridfire.engine import GraphEngine
from gridfire.solver import PointSolver, PointSolverContext
from tqdm import tqdm
from fourdst.composition.utils import buildCompositionFromMassFractions
def T9(age):
return 10.0 / np.sqrt(age)
def get_density(age):
return 4e-5 * (T9(age) ** 3)
def get_pyna_rate(my_rate_str, library):
match = re.match(r"([a-zA-Z0-9]+)\(([^,]*),([^)]*)\)(.*)", my_rate_str)
if not match:
print(f"Could not parse string format: {my_rate_str}")
return []
target = match.group(1)
projectile = match.group(2)
ejectiles = match.group(3)
product = match.group(4)
def expand_species(s_str):
if not s_str or s_str.strip() == "":
return []
parts = s_str.split()
expanded = []
for p in parts:
if p == 'g':
continue
mult_match = re.match(r"^(\d+)([a-zA-Z0-9]+)$", p)
if mult_match:
count = int(mult_match.group(1))
spec = mult_match.group(2)
else:
count = 1
spec = p
if spec == 'g':
continue
if spec == 'a': spec = 'he4'
expanded.extend([spec] * count)
return expanded
reactants_str = [target] + expand_species(projectile)
products_str = expand_species(ejectiles) + [product]
try:
r_nuc = [pyna.Nucleus(r) for r in reactants_str]
p_nuc = [pyna.Nucleus(p) for p in products_str]
except Exception as e:
print(f"Error converting nuclei for {my_rate_str}: {e}")
return []
rates = library.get_rate_by_nuclei(r_nuc, p_nuc)
if rates:
if not isinstance(rates, list):
return [rates]
return rates
r_nuc_names = sorted([str(n) for n in r_nuc])
p_nuc_names = sorted([str(n) for n in p_nuc])
ignore_list = ['e-', 'e+', 'g', 'nu', 'anu']
matched_rates = []
for rate in library.get_rates():
lib_r_names = sorted([str(n) for n in rate.reactants if str(n) not in ignore_list])
lib_p_names = sorted([str(n) for n in rate.products if str(n) not in ignore_list])
if r_nuc_names == lib_r_names and p_nuc_names == lib_p_names:
matched_rates.append(rate)
return matched_rates
def load_network_module(filepath):
module_name = os.path.basename(filepath).replace(".py", "")
if module_name in sys.modules:
del sys.modules[module_name]
spec = importlib.util.spec_from_file_location(module_name, filepath)
if spec is None:
raise FileNotFoundError(f"Error: could not find module at {filepath}")
network_module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = network_module
spec.loader.exec_module(network_module)
return network_module
def main(args):
tMax = 3600.0
h = 0.01
current_time = 180.0
XpXn = 7.17
Xn = 1.0 / (1.0 + XpXn)
Xp = 1.0 - Xn
comp: Composition = buildCompositionFromMassFractions(["H-1", "n-1"], [Xp, Xn])
netIn = NetIn()
netIn.composition = comp
netIn.dt0 = 1e-12
if args.depth is not None:
print(f"Initializing GridFire GraphEngine with restricted depth = {args.depth}")
engine = GraphEngine(comp, args.depth)
else:
print("Initializing full-depth GridFire GraphEngine (Note: pynucastro may take a long time to run JIT, set NUMBA_DISABLE_JIT=1 as an eviromental variable to disable JIT, this makes per timestep time increase but may still be faster for large networks due to the lack of upfront compilation time)")
engine = GraphEngine(comp)
blob = engine.constructStateBlob()
solver_ctx = PointSolverContext(blob)
solver_ctx.stdout_logging = False
solver = PointSolver(engine)
gf_initial_Y = {}
for sp in engine.getNetworkSpecies(solver_ctx.engine_ctx):
if comp.contains(sp):
gf_initial_Y[sp.name()] = comp.getMolarAbundance(sp)
else:
gf_initial_Y[sp.name()] = 0.0
gf_time = []
gf_results = {}
step_conditions = []
gf_start_time = time.time()
gf_current_time = current_time
total_steps = int(np.ceil(np.log(tMax / current_time) / np.log(1 + h)))
with tqdm(total=total_steps, desc="GridFire BBN", unit="step") as pbar:
while gf_current_time < tMax:
current_dt = h * gf_current_time
next_time = gf_current_time + current_dt
burn_temp = (T9(gf_current_time) + T9(next_time)) / 2.0 * 1e9
burn_density = (get_density(gf_current_time) + get_density(next_time)) / 2.0
netIn.temperature = burn_temp
netIn.density = burn_density
netIn.tMax = current_dt
netOut = solver.evaluate(solver_ctx, netIn)
netIn.composition = netOut.composition
pbar.update(1)
pbar.set_postfix(t=f"{gf_current_time:.2e}", T=f"{burn_temp:.2e}", rho=f"{burn_density:.2e}")
step_conditions.append({
"dt": current_dt,
"T": burn_temp,
"rho": burn_density,
"t": gf_current_time
})
gf_time.append(gf_current_time)
for sp in engine.getNetworkSpecies(solver_ctx.engine_ctx):
name = sp.name()
if name not in gf_results:
gf_results[name] = []
gf_results[name].append(netOut.composition.getMolarAbundance(sp))
gf_current_time += current_dt
gf_end_time = time.time()
print(f"GridFire integration finished in {gf_end_time - gf_start_time:.4f} seconds.")
print("Building Pynucastro BBN Network...")
reaclib_library = pyna.ReacLibLibrary()
rate_names = [r.id().replace("e+","").replace("e-","").replace(", ", ",") for r in engine.getNetworkReactions(solver_ctx.engine_ctx)]
goodRates = []
missingRates = []
skipped_photo_rates = 0
pyna_rate_mapping = {}
import io
import contextlib
for r_str in rate_names:
pyna_rates_for_reaction = []
with contextlib.redirect_stdout(io.StringIO()), contextlib.redirect_stderr(io.StringIO()):
try:
res = reaclib_library.get_rate_by_name(r_str)
if res is not None:
if isinstance(res, list):
pyna_rates_for_reaction.extend(res)
else:
pyna_rates_for_reaction.append(res)
except:
pass
if not pyna_rates_for_reaction:
res_nuc = get_pyna_rate(r_str, reaclib_library)
if res_nuc:
if isinstance(res_nuc, list):
pyna_rates_for_reaction.extend(res_nuc)
else:
pyna_rates_for_reaction.append(res_nuc)
if pyna_rates_for_reaction:
pyna_rate_mapping[r_str] = pyna_rates_for_reaction
for rate in pyna_rates_for_reaction:
if args.filter_photo:
is_photo_rate = any(str(r).lower() in ['g', 'gamma'] for r in rate.reactants)
if is_photo_rate:
skipped_photo_rates += 1
continue
goodRates.append(rate)
else:
missingRates.append(r_str)
if missingRates:
print(f"Warning: Could not map {len(missingRates)} rates to Pynucastro (likely absent from default ReacLib).")
print(f"Missing sample: {missingRates[:10]}...")
if args.filter_photo:
print(f"Info: Skipped {skipped_photo_rates} photodisintegration rates due to --filter-photo flag.")
print("--- Evaluating reaction rates over all temperatures ---")
gf_rates_history = {}
py_rates_history = {}
gf_rate_labels = {}
py_rate_labels = {}
for reaction in engine.getNetworkReactions(solver_ctx.engine_ctx):
r_str = reaction.id().replace("e+","").replace("e-","").replace(", ", ",")
gf_rates_history[r_str] = []
py_rates_history[r_str] = []
try:
gf_rate_labels[r_str] = reaction.sources()
except AttributeError:
try:
gf_rate_labels[r_str] = reaction.sourceLabel()
except AttributeError:
gf_rate_labels[r_str] = "Unknown"
if r_str in pyna_rate_mapping:
py_rate_labels[r_str] = [getattr(pr, 'label', 'Unknown') for pr in pyna_rate_mapping[r_str]]
else:
py_rate_labels[r_str] = []
for step in tqdm(step_conditions, desc="Calculating Rates", unit="step"):
T9_val = step["T"] / 1e9
T_K = step["T"]
for reaction in engine.getNetworkReactions(solver_ctx.engine_ctx):
r_str = reaction.id().replace("e+","").replace("e-","").replace(", ", ",")
gf_rate_val = 0.0
try:
gf_rate_val = reaction.calculate_rate(T9_val, 0, [])
except:
try:
gf_rate_val = reaction.calculate_rate(T9_val, 0, 0, 0, [], dict())
except Exception as e:
pass
gf_rates_history[r_str].append(gf_rate_val)
py_rate_val = 0.0
if r_str in pyna_rate_mapping:
for pr in pyna_rate_mapping[r_str]:
py_rate_val += pr.eval(T_K)
py_rates_history[r_str].append(py_rate_val)
print("--- Rate Comparison Summary ---")
threshold = 1e-4
mismatches = {}
for r_str in gf_rates_history:
gf_arr = np.array(gf_rates_history[r_str])
py_arr = np.array(py_rates_history[r_str])
with np.errstate(divide='ignore', invalid='ignore'):
denom = np.where(py_arr != 0, py_arr, gf_arr)
denom = np.where(denom == 0, 1e-30, denom)
rel_diffs = np.abs(gf_arr - py_arr) / denom
max_diff = np.max(rel_diffs)
if max_diff > threshold:
max_idx = np.argmax(rel_diffs)
mismatches[r_str] = {
"max_diff": max_diff,
"temp": step_conditions[max_idx]["T"],
"gf_val": gf_arr[max_idx],
"py_val": py_arr[max_idx]
}
if mismatches:
print(f"Found {len(mismatches)} rates with differences > {threshold:.2%}")
for r_str, info in mismatches.items():
gf_lbl = gf_rate_labels.get(r_str, 'Unknown')
py_lbl = py_rate_labels.get(r_str, [])
print(f"{r_str:20}: Max Diff = {info['max_diff']:.2%}, at T = {info['temp']:.2e} K")
print(f" GF = {info['gf_val']:.4e} (Source: {gf_lbl})")
print(f" Py = {info['py_val']:.4e} (Sources: {py_lbl})")
else:
print(f"All rates match within the {threshold:.2%} threshold across all temperatures.")
print("-------------------------------")
pynet = pyna.PythonNetwork(rates=goodRates)
network_file = "pynuc_bbn_network.py"
pynet.write_network(network_file)
net = load_network_module(network_file)
mapping = {
"H-1": ("p", "tab:blue"),
"n-1": ("n", "tab:orange"),
"He-4": ("he4", "tab:green"),
"H-2": ("d", "tab:red"),
"H-3": ("t", "tab:purple"),
"He-3": ("he3", "tab:brown"),
"Li-7": ("li7", "tab:pink"),
"Be-7": ("be7", "tab:gray")
}
Y0 = np.zeros(net.nnuc)
for i, nuc in enumerate(pynet.get_nuclei()):
nuc_name = str(nuc)
gf_name = None
for gf, (py, _) in mapping.items():
if py == nuc_name:
gf_name = gf
break
if not gf_name:
match = re.match(r"([a-zA-Z]+)(\d+)", nuc_name)
if match:
gf_name = f"{match.group(1).capitalize()}-{match.group(2)}"
if gf_name and gf_name in gf_initial_Y:
Y0[i] = gf_initial_Y[gf_name]
pyna_time = []
pyna_nuc_names = [str(n) for n in pynet.get_nuclei()]
pyna_results = {nuc: [] for nuc in pyna_nuc_names}
pyna_start_time = time.time()
for step in tqdm(step_conditions, unit="step", desc="pynucastro Integration"):
sol = scipy.integrate.solve_ivp(
net.rhs,
[0, step["dt"]],
Y0,
args=(step["rho"], step["T"]),
method="Radau",
jac=net.jacobian,
rtol=1e-8,
atol=1e-20
)
Y0 = sol.y[:, -1]
pyna_time.append(step["t"])
for j in range(net.nnuc):
nuc_name = str(pynet.get_nuclei()[j])
if nuc_name in pyna_results:
pyna_results[nuc_name].append(Y0[j])
pyna_end_time = time.time()
print(f"Pynucastro integration finished in {pyna_end_time - pyna_start_time:.4f} seconds.")
export_data = {
"metadata": {
"tMax": tMax,
"h": h,
"initial_time": current_time,
"initial_XpXn_ratio": XpXn,
"initial_mass_fractions": {
"Xp": Xp,
"Xn": Xn
},
"execution_times_seconds": {
"gridfire": gf_end_time - gf_start_time,
"pynucastro": pyna_end_time - pyna_start_time
},
"missing_pynucastro_rates": missingRates,
"skipped_photodisintegration_rates": skipped_photo_rates if args.filter_photo else 0,
"rate_labels": {
"gridfire": gf_rate_labels,
"pynucastro": py_rate_labels
}
},
"thermodynamic_conditions": step_conditions,
"data": {
"gridfire": {
"time": gf_time,
"molar_abundances": gf_results,
"reaction_rates": gf_rates_history
},
"pynucastro": {
"time": pyna_time,
"molar_abundances": pyna_results,
"reaction_rates": py_rates_history
}
}
}
json_out_file = "bbn_simulation_data.json"
with open(json_out_file, "w") as f:
json.dump(export_data, f, indent=4)
plt.style.use("default")
fig, ax = plt.subplots(figsize=(10, 7))
for gf_name, (pyna_name, color) in mapping.items():
if gf_name in gf_results:
ax.plot(gf_time, gf_results[gf_name], color=color, linestyle="-", linewidth=2.5, label=f"GF {gf_name}")
if pyna_name in pyna_results:
ax.plot(pyna_time, pyna_results[pyna_name], color=color, linestyle="--", linewidth=1.5, label=f"Pyna {pyna_name}")
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_ylim(1e-12, 2)
ax.set_xlabel("Time (s)", fontsize=14)
ax.set_ylabel("Molar Abundance (Y)", fontsize=14)
line_gf = mlines.Line2D([], [], color='black', linestyle='-', linewidth=2.5, label='GridFire')
line_py = mlines.Line2D([], [], color='black', linestyle='--', linewidth=1.5, label='Pynucastro')
sp_handles = []
for gf_name, (pyna_name, color) in mapping.items():
sp_handles.append(mlines.Line2D([], [], color=color, linestyle='-', linewidth=2, label=gf_name))
ax.legend(handles=[line_gf, line_py] + sp_handles, loc='center left', bbox_to_anchor=(1.02, 0.5), fontsize=12)
out_file = "bbn_comparison.pdf"
plt.savefig(out_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="GridFire vs Pynucastro BBN Comparison")
parser.add_argument("--filter-photo", action="store_true",
help="Filter out photodisintegration (reverse) rates to mimic GridFire's forward-only mechanics.")
parser.add_argument("--depth", type=int, default=None,
help="Limit the assembly depth of GridFire's GraphEngine. E.g., setting '--depth 3' shrinks the network size from 5000+ reactions to ~100, which reduces Pynucastro's Numba JIT compile time from hours to seconds.")
args = parser.parse_args()
main(args)

View File

@@ -153,13 +153,15 @@ def quantify_engine_error(df_base, df_approx, r_base: NetOut, r_approx: NetOut,
def main(save_show):
C = init_composition()
netIn = init_netIn(10**7.1760912591, 10**2.2041199827, 1e17, C)
netIn = init_netIn(10**7.1760912591, 10**2.2041199827, 1e18, C)
stepLogger = StepLogger()
engine_graph = GraphEngine(C, 4)
blob = engine_graph.constructStateBlob()
print(f"Gridfire Using: {len(engine_graph.getNetworkReactions(blob))} Reactions and {len(engine_graph.getNetworkSpecies(blob))} Species")
print(engine_graph.getNetworkReactions(blob))
print(engine_graph.getNetworkSpecies(blob))
solver_ctx_graph = PointSolverContext(blob)
solver_ctx_graph.stdout_logging = False

View File

@@ -0,0 +1,20 @@
42a43
> real(dp) :: nuc_eval_time
358a360,361
> integer*8 :: count_start, count_end, count_rate
>
361c364
< eps_neu
---
> eps_neu, eval_time
437a441
> call system_clock(count_rate=count_rate)
438a443
> call system_clock(count_start)
451a457,460
> call system_clock(count_end)
> eval_time = real(count_end - count_start, dp) / real(count_rate, dp)
>
>
455a465
> out% nuc_eval_time = eval_time

View File

@@ -0,0 +1,33 @@
10a11,14
> !--- EMB (April 11, 2026. GridFire Comparison Timing) ---
> real(dp) :: total_eval_time
> !---
>
22a27,29
> real :: t_start, t_end
>
> total_eval_time = 0.0d0
40a48
> write(*,*) "Calling do_hydrostatic_burn ", j, "th time"
41a50
>
44a54,57
> write(*,*) "============================"
> write(*,*) "Network Evaluation Wall Time: ", total_eval_time
> write(*,*) "============================"
>
130a144,146
> integer*8 :: count_start, count_end, count_rate
> real(dp) :: eval_time
>
131a148,151
>
> call system_clock(count_rate=count_rate)
>
> call system_clock(count_start)
132a153,154
> call system_clock(count_end)
>
133a156,157
>
> total_eval_time = total_eval_time + out% nuc_eval_time

View File

@@ -0,0 +1,356 @@
5,8c5,9
< ! This program is free software: you can redistribute it and/or modify
< ! it under the terms of the GNU Lesser General Public License
< ! as published by the Free Software Foundation,
< ! either version 3 of the License, or (at your option) any later version.
---
> ! MESA is free software; you can use it and/or modify
> ! it under the combined terms and restrictions of the MESA MANIFESTO
> ! and the GNU General Library Public License as published
> ! by the Free Software Foundation; either version 2 of the License,
> ! or (at your option) any later version.
10c11,15
< ! This program is distributed in the hope that it will be useful,
---
> ! You should have received a copy of the MESA MANIFESTO along with
> ! this software; if not, it is available at the mesa website:
> ! http://mesa.sourceforge.net/
> !
> ! MESA is distributed in the hope that it will be useful,
13c18
< ! See the GNU Lesser General Public License for more details.
---
> ! See the GNU Library General Public License for more details.
15,16c20,22
< ! You should have received a copy of the GNU Lesser General Public License
< ! along with this program. If not, see <https://www.gnu.org/licenses/>.
---
> ! You should have received a copy of the GNU Library General Public License
> ! along with this software; if not, write to the Free Software
> ! Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21c27
< use const_def, only: dp, Qconv
---
> use const_def
24a31
>
25a33
>
28c36
<
---
>
30c38,39
<
---
>
>
33c42
<
---
>
36c45
<
---
>
40d48
< contains
41a50,53
>
>
> contains
>
55c67
<
---
>
61c73
<
---
>
71c83
<
---
>
74c86
<
---
>
86c98
< use num_lib
---
> use num_lib
94c106
<
---
>
98,101c110,113
< real(dp), intent(in) :: t_start, t_end, starting_x(:) ! (species)
< integer, intent(in) :: ntimes ! ending time is times(num_times); starting time is 0
< real(dp), pointer, intent(in) :: times(:) ! (num_times)
< real(dp), pointer, intent(in) :: log10Ts_f1(:)
---
> real(dp), intent(in) :: t_start, t_end, starting_x(:) ! (species)
> integer, intent(in) :: ntimes ! ending time is times(num_times); starting time is 0
> real(dp), pointer, intent(in) :: times(:) ! (num_times)
> real(dp), pointer, intent(in) :: log10Ts_f1(:)
109c121
< real(dp), intent(in), pointer :: rate_factors(:) ! (num_reactions)
---
> real(dp), intent(in), pointer :: rate_factors(:) ! (num_reactions)
111,113c123,125
< real(dp), pointer, intent(in) :: reaction_Qs(:) ! (rates_reaction_id_max)
< real(dp), pointer, intent(in) :: reaction_neuQs(:) ! (rates_reaction_id_max)
< integer, intent(in) :: screening_mode ! see screen_def
---
> real(dp), pointer, intent(in) :: reaction_Qs(:) ! (rates_reaction_id_max)
> real(dp), pointer, intent(in) :: reaction_neuQs(:) ! (rates_reaction_id_max)
> integer, intent(in) :: screening_mode ! see screen_def
115,116c127,128
< integer, intent(in) :: max_steps ! maximal number of allowed steps.
< real(dp), intent(in) :: eps, odescal ! tolerances. e.g., set both to 1d-6
---
> integer, intent(in) :: max_steps ! maximal number of allowed steps.
> real(dp), intent(in) :: eps, odescal ! tolerances. e.g., set both to 1d-6
131c143
<
---
>
136c148
<
---
>
141c153
<
---
>
150c162
<
---
>
152c164
<
---
>
154c166
<
---
>
156c168
<
---
>
158c170
<
---
>
161c173
<
---
>
171c183
<
---
>
173c185
<
---
>
181c193
<
---
>
193c205
<
---
>
198c210
<
---
>
204c216
<
---
>
214c226
<
---
>
218c230
<
---
>
220c232
<
---
>
227c239
<
---
>
229c241
< call setup_net_info(n)
---
> call setup_net_info(n)
231c243
<
---
>
267c279
< cid = g% chem_id(i)
---
> cid = g% chem_id(i)
274c286
<
---
>
276c288
<
---
>
284c296
< real(dp) :: dxdt_sum, dxdt_sum_approx21, &
---
> real(dp) :: dxdt_sum, dxdt_sum_aprox21, &
291c303
<
---
>
293c305
<
---
>
298,299c310,311
< if (ierr /= 0) return
<
---
> if (ierr /= 0) return
>
309c321
<
---
>
321c333
<
---
>
332c344
<
---
>
336c348
<
---
>
342c354
< real(dp) :: d_eps_nuc_dx(species)
---
> real(dp) :: d_eps_nuc_dx(species)
347c359
<
---
>
354c366
<
---
>
360a373,375
> character(len=255) :: env_val
> integer :: env_val_status, env_val_length
>
362c377
<
---
>
367c382
<
---
>
371c386
<
---
>
373c388
<
---
>
376c391
<
---
>
378c393
<
---
>
394,395c409,410
<
< xsum = 0
---
>
> xsum = 0
403c418
< end if
---
> end if
412c427
<
---
>
415a431
>
416a433,446
> call get_environment_variable("BBQ_DISABLE_EOS", value=env_val, length=env_val_length, status=env_val_status)
> if (trim(env_val) == "False") then
> call eosDT_get( &
> eos_handle, species, g% chem_id, g% net_iso, x, &
> Rho, lgRho, T, lgT, &
> res, d_dlnd, d_dlnT, d_dxa, ierr)
> if (ierr /= 0) then
> if (report_ierr) write(*,*) 'failed in eosDT_get'
> return
> end if
> eta = res(i_eta)
> d_eta_dlnT = d_dlnT(i_eta)
> d_eta_dlnRho = d_dlnd(i_eta)
> endif
418,430c448
< call eosDT_get( &
< eos_handle, species, g% chem_id, g% net_iso, x, &
< Rho, lgRho, T, lgT, &
< res, d_dlnd, d_dlnT, d_dxa, ierr)
< if (ierr /= 0) then
< if (report_ierr) write(*,*) 'failed in eosDT_get'
< return
< end if
< eta = res(i_eta)
< d_eta_dlnT = d_dlnT(i_eta)
< d_eta_dlnRho = d_dlnd(i_eta)
<
<
---
>
433c451
<
---
>
446c464
<
---
>
470c488,489
<
---
>
>
471a491,493
>
>
>
473a496,500
>
>
>
>
>
475a503
>

View File

@@ -0,0 +1,11 @@
diff files for tests run in GridFire paper 1
Apply these diffs to BBQ and MESA files then compile
Note that to disable EOS evaluation in mesa set the BBQ_DISABLE_EOS enviromental variable to 1
i.e.
export BBQ_DISABLE_EOS=1
prior to running bbq

View File

@@ -0,0 +1,56 @@
&bbq
! Physcis options
net_name = 'mesa_125.net'
! Solver tolerances
max_steps = 1000000
eps = 1d-8
odescal = 1d-10
stptry = 0
! What mode to run
use_hydrostatic=.true.
write_iso_list = .true.
iso_list_filename = 'iso.list'
/
&sampling ! For both use_input_file and use_random_sampling
/
&profile ! For use_profile
/
&hydrostatic ! for use with use_hydrostatic
min_time = -8
max_time = 17
log_time =.true.
num_times = 300
logT = 7.1760912591
logRho = 2.2041199827
input_composition_filename = 'comp.txt'
output_filename = 'output.txt'
/
&eos
/
&nuclear
/
&controls
screening_mode = ''
/

View File

@@ -0,0 +1,56 @@
&bbq
! Physcis options
net_name = 'mesa_235.net'
! Solver tolerances
max_steps = 1000000
eps = 1d-8
odescal = 1d-10
stptry = 0
! What mode to run
use_hydrostatic=.true.
write_iso_list = .true.
iso_list_filename = 'iso.list'
/
&sampling ! For both use_input_file and use_random_sampling
/
&profile ! For use_profile
/
&hydrostatic ! for use with use_hydrostatic
min_time = -8
max_time = 17
log_time =.true.
num_times = 300
logT = 7.1760912591
logRho = 2.2041199827
input_composition_filename = 'comp.txt'
output_filename = 'output.txt'
/
&eos
/
&nuclear
/
&controls
screening_mode = ''
/

View File

@@ -0,0 +1,56 @@
&bbq
! Physcis options
net_name = 'mesa_45.net'
! Solver tolerances
max_steps = 1000000
eps = 1d-8
odescal = 1d-10
stptry = 0
! What mode to run
use_hydrostatic=.true.
write_iso_list = .true.
iso_list_filename = 'iso.list'
/
&sampling ! For both use_input_file and use_random_sampling
/
&profile ! For use_profile
/
&hydrostatic ! for use with use_hydrostatic
min_time = -8
max_time = 17
log_time =.true.
num_times = 300
logT = 7.1760912591
logRho = 2.2041199827
input_composition_filename = 'comp.txt'
output_filename = 'output.txt'
/
&eos
/
&nuclear
/
&controls
screening_mode = ''
/

View File

@@ -0,0 +1,56 @@
&bbq
! Physcis options
net_name = 'mesa_495.net'
! Solver tolerances
max_steps = 1000000
eps = 1d-8
odescal = 1d-10
stptry = 0
! What mode to run
use_hydrostatic=.true.
write_iso_list = .true.
iso_list_filename = 'iso.list'
/
&sampling ! For both use_input_file and use_random_sampling
/
&profile ! For use_profile
/
&hydrostatic ! for use with use_hydrostatic
min_time = -8
max_time = 17
log_time =.true.
num_times = 300
logT = 7.1760912591
logRho = 2.2041199827
input_composition_filename = 'comp.txt'
output_filename = 'output.txt'
/
&eos
/
&nuclear
/
&controls
screening_mode = ''
/

View File

@@ -0,0 +1,56 @@
&bbq
! Physcis options
net_name = 'basic.net'
! Solver tolerances
max_steps = 1000000
eps = 1d-8
odescal = 1d-10
stptry = 0
! What mode to run
use_hydrostatic=.true.
write_iso_list = .true.
iso_list_filename = 'iso.list'
/
&sampling ! For both use_input_file and use_random_sampling
/
&profile ! For use_profile
/
&hydrostatic ! for use with use_hydrostatic
min_time = -8
max_time = 17
log_time =.true.
num_times = 300
logT = 7.1760912591
logRho = 2.2041199827
input_composition_filename = 'comp.txt'
output_filename = 'output.txt'
/
&eos
/
&nuclear
/
&controls
screening_mode = ''
/

View File

@@ -0,0 +1,56 @@
&bbq
! Physcis options
net_name = 'pp_extras.net'
! Solver tolerances
max_steps = 1000000
eps = 1d-8
odescal = 1d-10
stptry = 0
! What mode to run
use_hydrostatic=.true.
write_iso_list = .true.
iso_list_filename = 'iso.list'
/
&sampling ! For both use_input_file and use_random_sampling
/
&profile ! For use_profile
/
&hydrostatic ! for use with use_hydrostatic
min_time = -8
max_time = 17
log_time =.true.
num_times = 300
logT = 7.1760912591
logRho = 2.2041199827
input_composition_filename = 'comp.txt'
output_filename = 'output.txt'
/
&eos
/
&nuclear
/
&controls
screening_mode = ''
/

View File

@@ -248,7 +248,7 @@ def main(save_show):
ax.loglog(t, dex_dh_diff, color='black')
# ax.semilogx(t, qse_h2(t)/qse_h1(t), color='green')
ax.set_xlabel("Time [s]", fontsize=17)
ax.set_ylabel(r"$\left|\log_{10}\left(\frac{D}{H})\right)_{graph} - \log_{10}\left(\frac{D}{H}\right)_{qse}\right|$", fontsize=17)
ax.set_ylabel(r"$\left|\Delta\log_{10}\right|$ [dex]", fontsize=17)
if save_show == ShowSave.SAVE:
plt.savefig("DHErr.pdf")
@@ -285,7 +285,7 @@ def main(save_show):
offset = np.log10(total_qse / total_graph)
else:
if z >= 14:
offset = np.nan # Disable these for visualization, they all have abundances so small (on the order of -100 it doesnt matter)
offset = np.nan
else:
offset = 0.0
@@ -300,8 +300,7 @@ def main(save_show):
print(sorted_symbols)
bars = ax.bar(sorted_symbols, sorted_dex, color='grey', edgecolor='grey', alpha=0.8)
# 3. Add styling and labels
ax.axhline(0, color='black', linewidth=0.8) # Adds a clear baseline at 0 dex
ax.axhline(0, color='black', linewidth=0.8)
ax.set_xlabel('Element', fontsize=25)
ax.set_ylabel('Offset [dex]', fontsize=25)
@@ -317,7 +316,7 @@ def main(save_show):
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
ax.semilogx(t, dex_eps_diff, color='black')
ax.set_xlabel("Time [s]", fontsize=25)
ax.set_xlabel("Offset [dex]", fontsize=25)
ax.set_ylabel("Offset [dex]", fontsize=25)
if save_show == ShowSave.SAVE:
plt.savefig("DexEpsOffset.pdf")

View File

@@ -2,6 +2,7 @@ import numpy as np
from IPython.core.pylabtools import figsize
from gridfire.solver import PointSolver, PointSolverContext
from gridfire.policy import MainSequencePolicy
from gridfire.engine import GraphEngine, MultiscalePartitioningEngineView
from scipy.signal import find_peaks
@@ -93,8 +94,10 @@ def years_to_seconds(years: float) -> float:
def main(save_show):
C = init_composition()
netIn = init_netIn(1.5e7, 160, years_to_seconds(10e9), C)
policy = MainSequencePolicy(C)
construct = policy.construct()
enigne_graph = GraphEngine(C, 5)
graph_blob = enigne_graph.constructStateBlob()
qse_engine = MultiscalePartitioningEngineView(enigne_graph)
qse_blob = qse_engine.constructStateBlob(graph_blob)
# 3e-8 and 1e-24 are the default tolerances we adopt as testing indicates it works well for
# main sequence evolution. We encorage researchers to trial various relative and
@@ -104,8 +107,8 @@ def main(save_show):
# config.solver.pointSolver.trigger.boundaryFlux.absoluteThreshold = 1e-24
# solver = PointSolver(construct.engine, config)
solver = PointSolver(construct.engine)
solver_ctx = PointSolverContext(construct.scratch_blob)
solver = PointSolver(qse_engine)
solver_ctx = PointSolverContext(qse_blob)
stepLogger = StepLogger()
solver_ctx.callback = lambda ctx: stepLogger.log_step(ctx);
@@ -114,14 +117,14 @@ def main(save_show):
df = stepLogger.df
fig, axs = plt.subplots(2, 1, figsize=(17, 10))
fig, axs = plt.subplots(2, 1, figsize=(17, 10), gridspec_kw={'hspace': 0, 'height_ratios': [1, 1]}, sharex=True)
t = np.linspace(df.t.min(), df.t.max(), 1000)
# Note we are not plotting Ne-20 as its molar abundance is so close to N-14 that it makes it hard to
# distinguish that species
PlottingSpecies = ["H-1", "He-3", "He-4", "C-12", "N-14", "O-16", "Mg-24"]
stable_index = 10
stable_index = 25
for sp in PlottingSpecies:
x = df.t[stable_index:]
@@ -141,11 +144,11 @@ def main(save_show):
ax_eps.set_ylabel(r"$\epsilon$ [erg/g/s]", rotation=270, labelpad=25, fontsize=23)
ax_deps.set_ylabel(r"$\frac{d\epsilon}{dt}$ [erg/g/s$^2$]", rotation=270, labelpad=25, fontsize=23)
ax_eps.axvline(1.008e+15, color='grey', linestyle='dashed')
ax_deps.axvline(1.008e+15, color='grey', linestyle='dashed')
ax_eps.axvline(2.276e17, color='grey', linestyle='dashed')
ax_deps.axvline(2.276e17, color='grey', linestyle='dashed')
ax_eps.loglog(df.t[stable_index:], df.eps[stable_index:], color='red', linestyle='dashed')
ax_eps.text(df.t[stable_index:].iloc[0]*1.05, df.eps[stable_index:].iloc[0]*3, r"$\epsilon$", rotation=25, fontsize=20)
ax_eps.text(df.t[stable_index:].iloc[0], df.eps[stable_index:].iloc[0], r"$\epsilon$", fontsize=20)
ax_deps.semilogx(df.t[stable_index:], np.gradient(df.eps[stable_index:], df.t[stable_index:]), color='red', linestyle='dashed')
@@ -160,63 +163,20 @@ def main(save_show):
t = df.t.values
eps = df.eps.values
# Use this plot to determine the index to test removal of
# fig, ax = plt.subplots(1, 1, figsize=(10, 7))
# ax.plot(np.gradient(eps, t))
# ax.grid()
# plt.show()
idx = 156
t1 = t
eps1 = eps
t2 = np.delete(t, idx)
eps2 = np.delete(eps, idx)
f_deps_1 = interp1d(t1, np.gradient(eps1, t1))
f_deps_2 = interp1d(t2, np.gradient(eps2, t2))
int_deps_1 = trapezoid(f_deps_1(t), t)
int_deps_2 = trapezoid(f_deps_2(t), t)
rel_err = (int_deps_1 - int_deps_2) / int_deps_2
print(f"Rel Error: {rel_err:+0.3E}")
window = 10
indices = np.arange(idx - window, idx + window + 1)
indices_no_gap = np.delete(indices, window)
clean_t = t[indices_no_gap]
clean_eps = eps[indices_no_gap]
spline = CubicSpline(clean_t, clean_eps)
eps_predicted = spline(t[idx])
eps_actual = eps[idx]
absolute_jump = np.abs(eps_actual - eps_predicted)
relative_jump = absolute_jump / eps_actual
print(f"Local Discontinuity at index {idx}: {relative_jump:.3%}")
E_actual = trapezoid(eps, t)
t_clean = np.delete(t, idx)
eps_clean_points = np.delete(eps, idx)
spline = CubicSpline(t_clean, eps_clean_points)
eps_smooth = np.copy(eps)
eps_smooth[idx] = spline(t[idx])
E_smooth = trapezoid(eps_smooth, t)
total_rel_error = (E_actual - E_smooth) / E_smooth
print(f"Total Relative Energy Error: {total_rel_error:+0.12E}")
t1 = np.delete(t, [237])
eps1 = np.delete(eps, [237])
f_discon = interp1d(t, eps, bounds_error=False, fill_value='extrapolate')
f_smooth = interp1d(t1, eps1, bounds_error=False, fill_value='extrapolate')
ti = np.logspace(np.log10(t.min()), np.log10(t.max()), 1000)
cum_discon = trapezoid(f_discon(ti), ti)
cum_smooth = trapezoid(f_smooth(ti), ti)
rel_err = (cum_discon - cum_smooth) / cum_smooth
print(f"Relative Cummulative Energy Error: {rel_err:0.4E} ({cum_discon:0.4E} [erg/g] vs {cum_smooth:0.4E} [erg/g])")
if __name__ == "__main__":
import argparse

View File

@@ -0,0 +1,20 @@
1.000000000000000000e+02
1.623776739188720910e+02
2.636650898730358108e+02
4.281332398719395655e+02
6.951927961775605809e+02
1.128837891684688429e+03
1.832980710832435534e+03
2.976351441631319176e+03
4.832930238571751943e+03
7.847599703514606517e+03
1.274274985703132188e+04
2.069138081114790111e+04
3.359818286283781345e+04
5.455594781168514601e+04
8.858667904100831947e+04
1.438449888287663052e+05
2.335721469090121391e+05
3.792690190732246265e+05
6.158482110660254257e+05
1.000000000000000000e+06

View File

@@ -0,0 +1,20 @@
7.516506915568591296e+01 7.495454650010640307e+01 7.474402243846603255e+01 7.453349751089608333e+01 7.432297205004131513e+01 7.411244626076269526e+01 7.390192026922430557e+01 7.369139415312440633e+01 7.348086796031343226e+01 7.327034172026007752e+01 7.305981545111255571e+01 7.284928916404744825e+01 7.263876286594782528e+01 7.242823656105262842e+01 7.221771025197236327e+01 7.200718394031474645e+01 7.179665762706986243e+01 7.158613131284748476e+01 7.137560499802309266e+01 7.116507868282796778e+01
2.411108827502363994e+01 2.390056655264294605e+01 2.369004306572022500e+01 2.347951849209244912e+01 2.326899324921361867e+01 2.305846759417556413e+01 2.284794168530916991e+01 2.263741562012273434e+01 2.242688945866676775e+01 2.221636323792333556e+01 2.200583698066780514e+01 2.179531070092636114e+01 2.158478440733700054e+01 2.137425810521942893e+01 2.116373179784977410e+01 2.095320548724562926e+01 2.074267917464953115e+01 2.053215286082669166e+01 2.032162654624836762e+01 2.011110023120477663e+01
1.588134936422530075e+01 1.567082305245526186e+01 1.546029673914114788e+01 1.524977042487612167e+01 1.503924411002547501e+01 1.482871779481417640e+01 1.461819147938077101e+01 1.440766516381058082e+01 1.419713884815615401e+01 1.398661253244984870e+01 1.377608621671159383e+01 1.356555990095366226e+01 1.335503358518361416e+01 1.314450726940610359e+01 1.293398095362399758e+01 1.272345463783906006e+01 1.251292832205238170e+01 1.230240200626462865e+01 1.209187569047621480e+01 1.188134937468739416e+01
1.240141625353471788e+01 1.219088993828025025e+01 1.198036362282025813e+01 1.176983730723369170e+01 1.155931099156918407e+01 1.134878467585666861e+01 1.131773574219467626e+01 1.131773344868713060e+01 1.131772427641642054e+01 1.131773274314408440e+01 1.131774381739029423e+01 1.131774099106536724e+01 1.131772792426291652e+01 1.131772001429877506e+01 1.131772477191268145e+01 1.131772951612228795e+01 1.131773269504887658e+01 1.131771867785366759e+01 1.131773274806228891e+01 1.131773618945088700e+01
1.221441707845048441e+01 1.221443092438587996e+01 1.221449268621868711e+01 1.221444340127185413e+01 1.221450447820550878e+01 1.221450450550565847e+01 1.221462299546512753e+01 1.221450541184946736e+01 1.221449741948761059e+01 1.221437937238299476e+01 1.221452600508989228e+01 1.221450974355266439e+01 1.221454206060290382e+01 1.221449405571979696e+01 1.221450465229765392e+01 1.221445641451824571e+01 1.221439900705651915e+01 1.221450373335646056e+01 1.221440168392089554e+01 1.221457855378897328e+01
1.285007977945194213e+01 1.285045003815560705e+01 1.284971632467318692e+01 1.284966729479870828e+01 1.284988825100139920e+01 1.284989877979216288e+01 1.284993556983961582e+01 1.284999283899156453e+01 1.285004027052133146e+01 1.285010185143070771e+01 1.284956549615072419e+01 1.285026985045980297e+01 1.285030607530261904e+01 1.284995107080426280e+01 1.285014040334820606e+01 1.284998296697936659e+01 1.285007370919554859e+01 1.284984313025256419e+01 1.285062579528359450e+01 1.284971304845847762e+01
1.332742493047810584e+01 1.332742517211232247e+01 1.332742546286364416e+01 1.332742938067791094e+01 1.332742349532839476e+01 1.332742474050093406e+01 1.332742419644821119e+01 1.332742815605684683e+01 1.332742784332937092e+01 1.332742638395014367e+01 1.332742810591535942e+01 1.332742545889716723e+01 1.332742617301116894e+01 1.332742856994030056e+01 1.332742872162519987e+01 1.332742428558021075e+01 1.332742647766768229e+01 1.332742921111014844e+01 1.332742941333290076e+01 1.332742293601148376e+01
1.347856888607090298e+01 1.347852064818079398e+01 1.347848605530894872e+01 1.347847130758353629e+01 1.347845366742022044e+01 1.347844421325814146e+01 1.347843780579964879e+01 1.347843881361396789e+01 1.347843249086560036e+01 1.347843051734689368e+01 1.347843088633933917e+01 1.347842621749463632e+01 1.347842953219780249e+01 1.347843032361080340e+01 1.347843437616470652e+01 1.347842800930953722e+01 1.347843104180701168e+01 1.347842687171626075e+01 1.347843078062061473e+01 1.347842862739984149e+01
1.278613012830484585e+01 1.278496693040220045e+01 1.278424783677001919e+01 1.278380390835387814e+01 1.278353105351237318e+01 1.278336397571877470e+01 1.278326064906533155e+01 1.278319580429599078e+01 1.278315656532500455e+01 1.278313425351781696e+01 1.278311894450724751e+01 1.278310725590280583e+01 1.278310351000193812e+01 1.278310038295341933e+01 1.278309778527155416e+01 1.278309542126202025e+01 1.278309502060797875e+01 1.278309434390479637e+01 1.278309469349433591e+01 1.278309355867858166e+01
1.203164561346398997e+01 1.202454099764239892e+01 1.202011655316218430e+01 1.201737235620566580e+01 1.201567535052035218e+01 1.201462713041880193e+01 1.201398075487848693e+01 1.201358208920642667e+01 1.201333643079369473e+01 1.201318530620892489e+01 1.201309211797299881e+01 1.201303460578686177e+01 1.201299925517803402e+01 1.201297778500763513e+01 1.201296402629015425e+01 1.201295602514885807e+01 1.201295092007369192e+01 1.201294776572451539e+01 1.201294578156225334e+01 1.201294445669623023e+01
1.147182662670111952e+01 1.144367788592245461e+01 1.142551981827195284e+01 1.141400530026558080e+01 1.140678324875124972e+01 1.140228496986332551e+01 1.139949510693226387e+01 1.139776944147619631e+01 1.139670393632033907e+01 1.139604667634546864e+01 1.139564144923076228e+01 1.139539174325708260e+01 1.139523790399952752e+01 1.139514313577533322e+01 1.139508469291570592e+01 1.139504883737335206e+01 1.139502665112581603e+01 1.139501305044587021e+01 1.139500464491563037e+01 1.139499946776214934e+01
1.112437351439633382e+01 1.104983178380758879e+01 1.099728149037341218e+01 1.096183216558747553e+01 1.093866818437858335e+01 1.092385480067567372e+01 1.091451366605461715e+01 1.090867559087929806e+01 1.090504724352872223e+01 1.090280005986856615e+01 1.090141130887915644e+01 1.090055422303433375e+01 1.090002569902901897e+01 1.089969993641441270e+01 1.089949919024657987e+01 1.089937554186619906e+01 1.089929936996127857e+01 1.089925246542425796e+01 1.089922356951065829e+01 1.089920576898874138e+01
1.097211035219207176e+01 1.084134486646048501e+01 1.073647503072117892e+01 1.065745474258099712e+01 1.060122323076019768e+01 1.056302031110026540e+01 1.053793274514885958e+01 1.052183628790108649e+01 1.051166454889254531e+01 1.050529886706545035e+01 1.050133929659283893e+01 1.049888572075516890e+01 1.049736892822251733e+01 1.049643262007837308e+01 1.049585516448912514e+01 1.049549921954909593e+01 1.049527988180654248e+01 1.049514477454431649e+01 1.049506154264506641e+01 1.049501027963316524e+01
1.094468724003418458e+01 1.077450225810061291e+01 1.062302102545175941e+01 1.049487461738804051e+01 1.039277793053667942e+01 1.031633777833742727e+01 1.026223795135402028e+01 1.022563538542393147e+01 1.020166848316140573e+01 1.018632073935404847e+01 1.017663415868663179e+01 1.017057683358957298e+01 1.016681091859345010e+01 1.016447806019511191e+01 1.016303616604842475e+01 1.016214619362716221e+01 1.016159734987274454e+01 1.016125906067995643e+01 1.016105061769337148e+01 1.016092220613375474e+01
1.097651335571714704e+01 1.078588796262477167e+01 1.060606838834930521e+01 1.044162196127209086e+01 1.029755509438241390e+01 1.017803438345032419e+01 1.008480316539240995e+01 1.001638286170760672e+01 9.968755779276024853e+00 9.936928619742690927e+00 9.916266013536175095e+00 9.903108546512575217e+00 9.894834222328734441e+00 9.889671762689555834e+00 9.886466743915320876e+00 9.884483084019732857e+00 9.883257691494989672e+00 9.882501603001433210e+00 9.882035421372110662e+00 9.881748119644676365e+00
1.103372098422792646e+01 1.083337479287353844e+01 1.063895056067691236e+01 1.045351601851197820e+01 1.028119637274276243e+01 1.012690577382375245e+01 9.995405386723385632e+00 9.889757307003176123e+00 9.810009639387388347e+00 9.753173954964635683e+00 9.714515406752507687e+00 9.689107699410834940e+00 9.672796915308628840e+00 9.662486085923703172e+00 9.656031896506101830e+00 9.652016722703004703e+00 9.649528481194510121e+00 9.647990171985121322e+00 9.647040550101658951e+00 9.646454868275412764e+00
1.110184168148390071e+01 1.089681287389299413e+01 1.069508339268978858e+01 1.049852259320839565e+01 1.030987990205894178e+01 1.013295385171165996e+01 9.972476778158801736e+00 9.833404330024478668e+00 9.719506474267035401e+00 9.631851532036685981e+00 9.568308558091725757e+00 9.524510461234727998e+00 9.495451254290198762e+00 9.476677471525505325e+00 9.464760761499684705e+00 9.457281885088901774e+00 9.452621649844495266e+00 9.449730720772761217e+00 9.447942336862459101e+00 9.446837908757437674e+00
1.117465294371354645e+01 1.096726403921578807e+01 1.076178538409097385e+01 1.055934766635116695e+01 1.036168711930753439e+01 1.017137900705320064e+01 9.992030295576467935e+00 9.828237222646533766e+00 9.685001477264883363e+00 9.566427510509075915e+00 9.474146500757882450e+00 9.406566347274901929e+00 9.359604550897161701e+00 9.328261330384671979e+00 9.307930342276915070e+00 9.294991305454903596e+00 9.286857258004600268e+00 9.281783457841839891e+00 9.278633925233053859e+00 9.276684775049567122e+00
1.124926881688630687e+01 1.104062677480477817e+01 1.083313911549921293e+01 1.062750533454747170e+01 1.042482367952639954e+01 1.022678380690943456e+01 1.003589927307875307e+01 9.855707138075906926e+00 9.690749489446384146e+00 9.546033914635952300e+00 9.425773144805688730e+00 9.331796860392312709e+00 9.262715545169019293e+00 9.214563489601520274e+00 9.182353621157208323e+00 9.161428359629866236e+00 9.148097719226344182e+00 9.139712116369935302e+00 9.134479294651772108e+00 9.131230233292356502e+00
1.132422755411165838e+01 1.111488579882806782e+01 1.090627085960638532e+01 1.069882940676268213e+01 1.049327112144276519e+01 1.029070998272206161e+01 1.009285865446117292e+01 9.902261421692060139e+00 9.722490357852789700e+00 9.558114979716174631e+00 9.414140564492795349e+00 9.294726311310403943e+00 9.201602599594787435e+00 9.133277722187754577e+00 9.085726396385924275e+00 9.053954438477180844e+00 9.033329695930307324e+00 9.020197187298318653e+00 9.011938899484631449e+00 9.006786577439486408e+00

View File

@@ -0,0 +1,20 @@
4.000000000000000000e+06
5.642105263157894462e+07
1.088421052631578892e+08
1.612631578947368264e+08
2.136842105263157785e+08
2.661052631578947306e+08
3.185263157894736528e+08
3.709473684210526347e+08
4.233684210526315570e+08
4.757894736842104793e+08
5.282105263157894611e+08
5.806315789473683834e+08
6.330526315789473057e+08
6.854736842105262280e+08
7.378947368421052694e+08
7.903157894736841917e+08
8.427368421052631140e+08
8.951578947368420362e+08
9.475789473684209585e+08
1.000000000000000000e+09

View File

@@ -0,0 +1,143 @@
import os.path
from gridfire.engine import GraphEngine, NetworkJacobian
from fourdst.composition import Composition
from gridfire._gridfire.engine.scratchpads import StateBlob
from gridfire._gridfire.engine.scratchpads import ScratchPadType
import gridfire
import numpy as np
from typing import Tuple
from tqdm import tqdm
def get_stiffness_ratio_and_eigs(J: NetworkJacobian) -> Tuple[float, np.ndarray]:
jac = J.to_numpy()
eigenvalues = np.linalg.eigvals(jac)
abs_real_eigvals = np.abs(np.real(eigenvalues))
non_zero_eigenvalues = abs_real_eigvals[abs_real_eigvals > 0]
if len(non_zero_eigenvalues) == 0:
return 1.0, eigenvalues
min_lambda = np.min(non_zero_eigenvalues)
max_lambda = np.max(non_zero_eigenvalues)
if (min_lambda) == 0:
return np.inf, eigenvalues
stiffness_ratio = max_lambda / min_lambda
return stiffness_ratio, eigenvalues
def setup() -> Tuple[Composition, GraphEngine, StateBlob]:
baseComposition : Composition = Composition({"H-1": 0.702, "He-4": 0.06, "O-16": 1e-5})
engine: GraphEngine = GraphEngine(baseComposition, 4)
blob = StateBlob()
blob.enroll(ScratchPadType.GRAPH_ENGINE_SCRATCHPAD)
graph_ctx = blob.get(ScratchPadType.GRAPH_ENGINE_SCRATCHPAD)
graph_ctx.initialize(engine)
return baseComposition, engine, blob
def format_grid_point_stiff_data(J: NetworkJacobian, engine: GraphEngine, blob: StateBlob, T9: float, density: float) -> str:
species = engine.getNetworkSpecies(blob)
result = []
for sp_row in species:
for sp_col in species:
Jij = J[sp_row, sp_col]
if Jij != 0:
result.append(f"J[{sp_row},{sp_col}](T9={T9},rho={density}) = {Jij}")
return "\n".join(result)
def get_stiff(engine: GraphEngine, blob: StateBlob, comp: Composition, T9: float, density: float) -> Tuple[float, np.ndarray]:
J = engine.generateJacobianMatrix(blob, comp, T9, density)
S, eigs = get_stiffness_ratio_and_eigs(J)
return S, eigs
if __name__ == "__main__":
import matplotlib.pyplot as plt
comp, engine, blob = setup()
if os.path.exists("stiff_species.dat"):
os.remove("stiff_species.dat")
T = np.linspace(4e6, 1e9, 20)
R = np.logspace(2, 6, 20)
TT, RR = np.meshgrid(T, R)
print(f"Stiffness for a T9=0.015: {get_stiff(engine, blob, comp, 0.015, 160)[0]}")
print(f"Stiffness for a T9=0.040: {get_stiff(engine, blob, comp, 0.040, 160)[0]}")
# Generate the global stiffness map
SS = np.zeros_like(TT)
for tid, t in tqdm(enumerate(T), total=len(T), desc="Temperature Grid"):
for rid, r in tqdm(enumerate(R), total=len(R), desc=f"Density Grid at T={t:5.3e}", leave=False):
stiffness, _ = get_stiff(engine, blob, comp, t/1e9, r)
SS[tid, rid] = np.log10(stiffness)
np.savetxt("SS.np.dat", SS)
np.savetxt("T.np.dat", T)
np.savetxt("R.np.dat", R)
with open("metadata.txt", "w") as f:
f.write(f"SS - Stiffness ratio for jacobian matrix generated from a fully constructed GraphEngine with gridfire version: {gridfire.__version__}\n")
f.write("T - Temperature [K], first axis (rows)\n")
f.write("R - Log10 Density [log (g cm^-3)], second axis (cols)\n")
# Generate data specific to the requested T9 histograms
target_T9s = [0.015, 0.02, 0.1]
hist_data_reals = []
valid_mins, valid_maxs = [], []
for t9 in target_T9s:
eigs_for_t9 = []
for r in R:
_, eigs = get_stiff(engine, blob, comp, t9, r)
eigs_for_t9.append(eigs)
flat_eigs = np.concatenate(eigs_for_t9)
real_eigs = np.real(flat_eigs)
non_zero_reals = real_eigs[real_eigs != 0]
abs_reals = np.abs(non_zero_reals)
hist_data_reals.append(abs_reals)
if len(abs_reals) > 0:
valid_mins.append(np.min(abs_reals))
valid_maxs.append(np.max(abs_reals))
# ------------------ Plotting ------------------
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 7))
# Plot 1: Stiffness Ratio Contour
img = ax1.contourf(T, R, SS)
ax1.set_yscale('log')
ax1.set_xscale('log')
cbar = plt.colorbar(img, ax=ax1)
cbar.ax.invert_yaxis()
ax1.set_xlabel("Temperature [K]")
ax1.set_ylabel("Density [g cm$^{-3}$]")
ax1.set_title("Log10 Stiffness Ratio")
# Plot 2: Overlaid Log-Spaced Histograms for specific T9 values
if valid_mins and valid_maxs:
global_min = np.min(valid_mins)
global_max = np.max(valid_maxs)
# Create global bins so the histograms align perfectly
bins = np.logspace(np.log10(global_min), np.log10(global_max), 50)
colors = ['#1f77b4', '#ff7f0e', '#2ca02c'] # Blue, Orange, Green
for i, t9 in enumerate(target_T9s):
ax2.hist(hist_data_reals[i], bins=bins, alpha=0.6, label=f"T9 = {t9}",
color=colors[i], edgecolor='black', linewidth=0.5)
ax2.set_xscale('log')
ax2.legend()
ax2.grid(True, linestyle='--', alpha=0.6, axis='y')
ax2.set_xlabel(r"Absolute Real Part $|Re(\lambda)|$")
ax2.set_ylabel("Frequency")
ax2.set_title("Eigenvalue Distribution at Selected Temperatures")
plt.tight_layout()
plt.show()

View File

@@ -0,0 +1,3 @@
SS - Stiffness ratio for jacobian matrix generated from a fully constructed GraphEngine with gridfire version: 0.7.5rc3
T - Temperature [K], first axis (rows)
R - Log10 Density [log (g cm^-3)], second axis (cols)

View File

@@ -0,0 +1,391 @@
import os.path
import shutil
from gridfire.policy import MainSequencePolicy, NetworkPolicy
from gridfire.engine import DynamicEngine, GraphEngine, EngineTypes, MultiscalePartitioningEngineView
from gridfire.solver import PointSolverContext
from gridfire.type import NetIn
from gridfire.policy import ConstructionResults
from typing import Dict
from fourdst.composition import Composition
from testsuite import TestSuite
from utils import init_netIn, init_composition, years_to_seconds
from enum import Enum
EngineNameToType: Dict[str, EngineTypes] = {
"graphengine": EngineTypes.GRAPH_ENGINE,
"multiscalepartitioningengineview": EngineTypes.MULTISCALE_PARTITIONING_ENGINE_VIEW,
}
class SolarLikeStar_QSE_Suite(TestSuite):
def __init__(self):
initialComposition : Composition = init_composition()
super().__init__(
name="SolarLikeStar_QSE",
description="GridFire simulation of a roughly solar like star over 10Gyr with QSE enabled.",
temp=1.5e7,
density=1.5e2,
tMax=years_to_seconds(1e10),
composition=initialComposition,
notes="Thermodynamically Static, MultiscalePartitioning Engine View"
)
def __call__(self, pynucastro_compare: bool = False, pync_engine: str = "GraphEngine", output: str = "output"):
base_engine: GraphEngine = GraphEngine(self.composition, 3)
engine : MultiscalePartitioningEngineView = MultiscalePartitioningEngineView(base_engine)
blob = base_engine.constructStateBlob()
blob = engine.constructStateBlob(blob)
context : PointSolverContext = PointSolverContext(blob)
netIn : NetIn = init_netIn(self.temperature, self.density, self.tMax, self.composition)
self.evolve(engine, context, netIn, pynucastro_compare = pynucastro_compare, engine_type=EngineNameToType[pync_engine.lower()], output=output)
class MetalEnhancedSolarLikeStar_QSE_Suite(TestSuite):
def __init__(self):
initialComposition : Composition = init_composition(ZZs=1)
super().__init__(
name="MetalEnhancedSolarLikeStar_QSE",
description="GridFire simulation of a star with solar core temp and density but enhanced by 1 dex in Z.",
temp=0.8 * 1.5e7,
density=1.5e2,
tMax=years_to_seconds(1e10),
composition=initialComposition,
notes="Thermodynamically Static, MultiscalePartitioning Engine View, Z enhanced by 1 dex, temperature reduced to 80% of solar core"
)
def __call__(self, pynucastro_compare: bool = False, pync_engine: str = "GraphEngine", output: str = "output"):
base_engine: GraphEngine = GraphEngine(self.composition, 3)
engine : MultiscalePartitioningEngineView = MultiscalePartitioningEngineView(base_engine)
blob = base_engine.constructStateBlob()
blob = engine.constructStateBlob(blob)
context : PointSolverContext = PointSolverContext(blob)
netIn : NetIn = init_netIn(self.temperature, self.density, self.tMax, self.composition)
self.evolve(engine, context, netIn, pynucastro_compare = pynucastro_compare, engine_type=EngineNameToType[pync_engine.lower()], output=output)
class MetalEnhancedSolarLikeStar_No_QSE_Suite(TestSuite):
def __init__(self):
initialComposition : Composition = init_composition(ZZs=1)
super().__init__(
name="MetalEnhancedSolarLikeStar_No_QSE",
description="GridFire simulation of a star with solar core temp and density but enhanced by 1 dex in Z.",
temp=0.8 * 1.5e7,
density=1.5e2,
tMax=years_to_seconds(1e10),
composition=initialComposition,
notes="Thermodynamically Static, Z enhanced by 1 dex, temperature reduced to 80% of solar core"
)
def __call__(self, pynucastro_compare: bool = False, pync_engine: str = "GraphEngine", output: str = "output"):
engine: GraphEngine = GraphEngine(self.composition, 4)
blob = engine.constructStateBlob()
context : PointSolverContext = PointSolverContext(blob)
netIn : NetIn = init_netIn(self.temperature, self.density, self.tMax, self.composition)
self.evolve(engine, context, netIn, pynucastro_compare = pynucastro_compare, engine_type=EngineNameToType[pync_engine.lower()], output=output)
class MetalDepletedSolarLikeStar_QSE_Suite(TestSuite):
def __init__(self):
initialComposition : Composition = init_composition(ZZs=-1)
super().__init__(
name="MetalDepletedSolarLikeStar_QSE",
description="GridFire simulation of a star with solar core temp and density but depleted by 1 dex in Z.",
temp=1.2 * 1.5e7,
density=1.5e2,
tMax=years_to_seconds(1e10),
composition=initialComposition,
notes="Thermodynamically Static, MultiscalePartitioning Engine View, Z depleted by 1 dex, temperature increased to 120% of solar core"
)
def __call__(self, pynucastro_compare: bool = False, pync_engine: str = "GraphEngine", output: str = "output"):
base_engine: GraphEngine = GraphEngine(self.composition, 3)
engine : MultiscalePartitioningEngineView = MultiscalePartitioningEngineView(base_engine)
blob = base_engine.constructStateBlob()
blob = engine.constructStateBlob(blob)
context : PointSolverContext = PointSolverContext(blob)
netIn : NetIn = init_netIn(self.temperature, self.density, self.tMax, self.composition)
self.evolve(engine, context, netIn, pynucastro_compare = pynucastro_compare, engine_type=EngineNameToType[pync_engine.lower()], output=output)
class MetalDepletedSolarLikeStar_No_QSE_Suite(TestSuite):
def __init__(self):
initialComposition : Composition = init_composition(ZZs=-1)
super().__init__(
name="MetalDepletedSolarLikeStar_No_QSE",
description="GridFire simulation of a star with solar core temp and density but depleted by 1 dex in Z.",
temp=1.2 * 1.5e7,
density=1.5e2,
tMax=years_to_seconds(1e10),
composition=initialComposition,
notes="Thermodynamically Static, Z depleted by 1 dex, temperature increased to 120% of solar core"
)
def __call__(self, pynucastro_compare: bool = False, pync_engine: str = "GraphEngine", output: str = "output"):
engine: GraphEngine = GraphEngine(self.composition, 3)
blob = engine.constructStateBlob()
context : PointSolverContext = PointSolverContext(blob)
netIn : NetIn = init_netIn(self.temperature, self.density, self.tMax, self.composition)
self.evolve(engine, context, netIn, pynucastro_compare = pynucastro_compare, engine_type=EngineNameToType[pync_engine.lower()], output=output)
class SolarLikeStar_No_QSE_Suite(TestSuite):
def __init__(self):
initialComposition : Composition = init_composition()
super().__init__(
name="SolarLikeStar_No_QSE",
description="GridFire simulation of a roughly solar like star over 10Gyr with QSE disabled.",
temp=1.5e7,
density=1.5e2,
tMax=years_to_seconds(1e10),
composition=initialComposition,
notes="Thermodynamically Static, No MultiscalePartitioning Engine View"
)
def __call__(self, pynucastro_compare: bool = False, pync_engine: str = "GraphEngine", output: str = "output"):
engine : GraphEngine = GraphEngine(self.composition, 3)
netIn : NetIn = init_netIn(self.temperature, self.density, self.tMax, self.composition)
context : PointSolverContext = PointSolverContext(engine.constructStateBlob())
self.evolve(engine, context, netIn, pynucastro_compare = pynucastro_compare, engine_type=EngineNameToType[pync_engine.lower()], output=output)
class SolarLikeStar_No_QSE_Depth_Suite(TestSuite):
def __init__(self, depth: int = 1):
initialComposition : Composition = init_composition()
self.depth : int = depth
super().__init__(
name=f"SolarLikeStar_No_QSE_Depth_{depth}_Suite",
description="GridFire simulation of a roughly solar like star over 10Gyr with QSE disabled.",
temp=1.5e7,
density=1.5e2,
tMax=years_to_seconds(1e10),
composition=initialComposition,
notes=f"Thermodynamically Static, No MultiscalePartitioning Engine View, configurable depth {depth}"
)
def __call__(self, pynucastro_compare: bool = False, pync_engine: str = "GraphEngine", output: str = "output"):
engine : GraphEngine = GraphEngine(self.composition, self.depth)
netIn : NetIn = init_netIn(self.temperature, self.density, self.tMax, self.composition)
context : PointSolverContext = PointSolverContext(engine.constructStateBlob())
self.evolve(engine, context, netIn, pynucastro_compare = pynucastro_compare, engine_type=EngineNameToType[pync_engine.lower()], output=output)
class HotStar_QSE_Suite(TestSuite):
def __init__(self):
initialComposition : Composition = init_composition()
super().__init__(
name="HotStar_QSE",
description="GridFire simulation of a hot star over 1Gyr with QSE enabled.",
temp=2.5e7,
density=15,
tMax=1e15,
composition=initialComposition,
notes="Thermodynamically Static, MultiscalePartitioning Engine View, B(ish) star conditions"
)
def __call__(self, pynucastro_compare: bool = False, pync_engine: str = "GraphEngine", output: str = "output"):
base_engine: GraphEngine = GraphEngine(self.composition, 3)
engine : MultiscalePartitioningEngineView = MultiscalePartitioningEngineView(base_engine)
blob = base_engine.constructStateBlob()
blob = engine.constructStateBlob(blob)
context : PointSolverContext = PointSolverContext(blob)
netIn : NetIn = init_netIn(self.temperature, self.density, self.tMax, self.composition)
self.evolve(engine, context, netIn, pynucastro_compare = pynucastro_compare, engine_type=EngineNameToType[pync_engine.lower()], output=output)
class CoolStar_QSE_Suite(TestSuite):
def __init__(self):
initialComposition : Composition = init_composition()
super().__init__(
name="CoolStar_QSE",
description="GridFire simulation of a hot star over 1Gyr with QSE enabled.",
temp=6e6,
density=750,
tMax=1e15,
composition=initialComposition,
notes="Thermodynamically Static, MultiscalePartitioning Engine View, M(ish) star conditions"
)
def __call__(self, pynucastro_compare: bool = False, pync_engine: str = "GraphEngine", output: str = "output"):
base_engine: GraphEngine = GraphEngine(self.composition, 3)
engine : MultiscalePartitioningEngineView = MultiscalePartitioningEngineView(base_engine)
blob = base_engine.constructStateBlob()
blob = engine.constructStateBlob(blob)
context : PointSolverContext = PointSolverContext(blob)
netIn : NetIn = init_netIn(self.temperature, self.density, self.tMax, self.composition)
self.evolve(engine, context, netIn, pynucastro_compare = pynucastro_compare, engine_type=EngineNameToType[pync_engine.lower()], output=output)
class HotStar_No_QSE_Suite(TestSuite):
def __init__(self):
initialComposition : Composition = init_composition()
super().__init__(
name="HotStar_No_QSE",
description="GridFire simulation of a hot star over 1Gyr with QSE disabled.",
temp=2.5e7,
density=15,
tMax=1e15,
composition=initialComposition,
notes="Thermodynamically Static, B(ish) star conditions"
)
def __call__(self, pynucastro_compare: bool = False, pync_engine: str = "GraphEngine", output: str = "output"):
base_engine: GraphEngine = GraphEngine(self.composition, 3)
engine : MultiscalePartitioningEngineView = MultiscalePartitioningEngineView(base_engine)
blob = base_engine.constructStateBlob()
blob = engine.constructStateBlob(blob)
context : PointSolverContext = PointSolverContext(blob)
netIn : NetIn = init_netIn(self.temperature, self.density, self.tMax, self.composition)
self.evolve(engine, context, netIn, pynucastro_compare = pynucastro_compare, engine_type=EngineNameToType[pync_engine.lower()], output=output)
class CoolStar_No_QSE_Suite(TestSuite):
def __init__(self):
initialComposition : Composition = init_composition()
super().__init__(
name="CoolStar_No_QSE",
description="GridFire simulation of a hot star over 1Gyr with QSE disabled.",
temp=6e6,
density=750,
tMax=1e15,
composition=initialComposition,
notes="Thermodynamically Static, M(ish) star conditions"
)
def __call__(self, pynucastro_compare: bool = False, pync_engine: str = "GraphEngine", output: str = "output"):
base_engine: GraphEngine = GraphEngine(self.composition, 3)
engine : MultiscalePartitioningEngineView = MultiscalePartitioningEngineView(base_engine)
blob = base_engine.constructStateBlob()
blob = engine.constructStateBlob(blob)
context : PointSolverContext = PointSolverContext(blob)
netIn : NetIn = init_netIn(self.temperature, self.density, self.tMax, self.composition)
self.evolve(engine, context, netIn, pynucastro_compare = pynucastro_compare, engine_type=EngineNameToType[pync_engine.lower()], output=output)
class VeryCoolStar_QSE_Suite(TestSuite):
def __init__(self):
initialComposition : Composition = init_composition()
super().__init__(
name="VeryCoolStar_QSE",
description="GridFire simulation of a hot star over 1Gyr with QSE enabled.",
temp=4e6,
density=1000,
tMax=1e15,
composition=initialComposition,
notes="Thermodynamically Static, MultiscalePartitioning Engine View, M(ish) star conditions"
)
def __call__(self, pynucastro_compare: bool = False, pync_engine: str = "GraphEngine", output: str = "output"):
base_engine: GraphEngine = GraphEngine(self.composition, 3)
engine : MultiscalePartitioningEngineView = MultiscalePartitioningEngineView(base_engine)
blob = base_engine.constructStateBlob()
blob = engine.constructStateBlob(blob)
context : PointSolverContext = PointSolverContext(blob)
netIn : NetIn = init_netIn(self.temperature, self.density, self.tMax, self.composition)
self.evolve(engine, context, netIn, pynucastro_compare = pynucastro_compare, engine_type=EngineNameToType[pync_engine.lower()], output=output)
class VeryCoolStar_No_QSE_Suite(TestSuite):
def __init__(self):
initialComposition : Composition = init_composition()
super().__init__(
name="VeryCoolStar_No_QSE",
description="GridFire simulation of a hot star over 1Gyr with QSE disabled.",
temp=4e6,
density=1000,
tMax=1e19,
composition=initialComposition,
notes="Thermodynamically Static, M(ish) star conditions"
)
def __call__(self, pynucastro_compare: bool = False, pync_engine: str = "GraphEngine", output: str = "output"):
engine: GraphEngine = GraphEngine(self.composition, 3)
blob = engine.constructStateBlob()
context : PointSolverContext = PointSolverContext(blob)
netIn : NetIn = init_netIn(self.temperature, self.density, self.tMax, self.composition)
self.evolve(engine, context, netIn, pynucastro_compare = pynucastro_compare, engine_type=EngineNameToType[pync_engine.lower()], output=output)
class VeryHotStar_QSE_Suite(TestSuite):
def __init__(self):
initialComposition : Composition = init_composition()
super().__init__(
name="VeryHotStar_QSE",
description="GridFire simulation of a hot star over 1Gyr with QSE enabled.",
temp=4e7,
density=1,
tMax=1e15,
composition=initialComposition,
notes="Thermodynamically Static, MultiscalePartitioning Engine View, M(ish) star conditions"
)
def __call__(self, pynucastro_compare: bool = False, pync_engine: str = "GraphEngine", output: str = "output"):
base_engine: GraphEngine = GraphEngine(self.composition, 4)
engine : MultiscalePartitioningEngineView = MultiscalePartitioningEngineView(base_engine)
blob = base_engine.constructStateBlob()
blob = engine.constructStateBlob(blob)
context : PointSolverContext = PointSolverContext(blob)
netIn : NetIn = init_netIn(self.temperature, self.density, self.tMax, self.composition)
self.evolve(engine, context, netIn, pynucastro_compare = pynucastro_compare, engine_type=EngineNameToType[pync_engine.lower()], output=output)
class VeryHotStar_No_QSE_Suite(TestSuite):
def __init__(self):
initialComposition : Composition = init_composition()
super().__init__(
name="VeryHotStar_No_QSE",
description="GridFire simulation of a hot star over 1Gyr with QSE disabled.",
temp=4e7,
density=1,
tMax=1e15,
composition=initialComposition,
notes="Thermodynamically Static, B(ish) star conditions"
)
def __call__(self, pynucastro_compare: bool = False, pync_engine: str = "GraphEngine", output: str = "output"):
engine: GraphEngine = GraphEngine(self.composition, 4)
blob = engine.constructStateBlob()
context : PointSolverContext = PointSolverContext(blob)
netIn : NetIn = init_netIn(self.temperature, self.density, self.tMax, self.composition)
self.evolve(engine, context, netIn, pynucastro_compare = pynucastro_compare, engine_type=EngineNameToType[pync_engine.lower()], output=output)
class ValidationSuites(Enum):
SolarLikeStar_QSE = SolarLikeStar_QSE_Suite
SolarLikeStar_No_QSE = SolarLikeStar_No_QSE_Suite
SolarLikeStar_No_QSE_Depth = SolarLikeStar_No_QSE_Depth_Suite
MetalDepletedSolarLikeStar_QSE = MetalDepletedSolarLikeStar_QSE_Suite
MetalEnhancedSolarLikeStar_QSE = MetalEnhancedSolarLikeStar_QSE_Suite
MetalDepletedSolarLikeStar_No_QSE = MetalDepletedSolarLikeStar_No_QSE_Suite
MetalEnhancedSolarLikeStar_No_QSE = MetalEnhancedSolarLikeStar_No_QSE_Suite
HotStar_QSE = HotStar_QSE_Suite
CoolStar_QSE = CoolStar_QSE_Suite
HotStar_No_QSE = HotStar_No_QSE_Suite
CoolStar_No_QSE = CoolStar_No_QSE_Suite
VeryCoolStar_QSE = VeryCoolStar_QSE_Suite
VeryHotStar_QSE = VeryHotStar_QSE_Suite
VeryCoolStar_No_QSE = VeryCoolStar_No_QSE_Suite
VeryHotStar_No_QSE = VeryHotStar_No_QSE_Suite
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run some subset of the GridFire validation suite.")
parser.add_argument('--suite', type=str, choices=[suite.name for suite in ValidationSuites], nargs="+", help="The validation suite to run.")
parser.add_argument("--all", action="store_true", help="Run all validation suites.")
parser.add_argument("--pynucastro-compare", action="store_true", help="Generate pynucastro comparison data.")
parser.add_argument("--pync-engine", type=str, choices=["GraphEngine", "MultiscalePartitioningEngineView", "AdaptiveEngineView"], default="AdaptiveEngineView", help="The GridFire engine to use to select the reactions for pynucastro comparison.")
parser.add_argument("-o", "--output", type=str, help="Directory to save OKAY results too", default="GF_Validation_Output")
parser.add_argument("--depths", type=int, nargs="+", default=[1, 2, 3, 4, 5, 6, 7], help="Construction depths to test. Must be positive non zero")
args = parser.parse_args()
os.makedirs(args.output, exist_ok=True)
if args.all:
for suite in ValidationSuites:
if suite.name == "SolarLikeStar_No_QSE_Depth":
for depth in args.depths:
instance : TestSuite = suite.value(depth=depth)
instance(args.pynucastro_compare, args.pync_engine, args.output)
else:
instance : TestSuite = suite.value()
instance(args.pynucastro_compare, args.pync_engine, args.output)
else:
for suite_name in args.suite:
suite = ValidationSuites[suite_name]
if suite.name == "SolarLikeStar_No_QSE_Depth":
for depth in args.depths:
instance : TestSuite = suite.value(depth=depth)
instance(args.pynucastro_compare, args.pync_engine, args.output)
else:
instance : TestSuite = suite.value()
instance(args.pynucastro_compare, args.pync_engine, args.output)

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,80 @@
from enum import Enum
from typing import Dict, List, Any, SupportsFloat
import json
from datetime import datetime
import os
import sys
from gridfire.solver import PointSolverTimestepContext
from gridfire._gridfire.engine.scratchpads import StateBlob
import gridfire
class LogEntries(Enum):
Step = "Step"
t = "t"
dt = "dt"
eps = "eps"
Composition = "Composition"
ReactionContributions = "ReactionContributions"
class StepLogger:
def __init__(self):
self.num_steps : int = 0
self.steps : List[Dict[LogEntries, Any]] = []
def log_step(self, ctx: PointSolverTimestepContext):
comp_data: Dict[str, SupportsFloat] = {}
for species in ctx.engine.getNetworkSpecies(ctx.state_ctx):
sid = ctx.engine.getSpeciesIndex(ctx.state_ctx, species)
comp_data[species.name()] = ctx.state[sid]
entry : Dict[LogEntries, Any] = {
LogEntries.Step: ctx.num_steps,
LogEntries.t: ctx.t,
LogEntries.dt: ctx.dt,
LogEntries.eps: ctx.state[-1],
LogEntries.Composition: comp_data,
}
self.steps.append(entry)
self.num_steps += 1
def to_json(self, filename: str, **kwargs):
serializable_steps : List[Dict[str, Any]] = [
{
LogEntries.Step.value: step[LogEntries.Step],
LogEntries.t.value: step[LogEntries.t],
LogEntries.dt.value: step[LogEntries.dt],
LogEntries.eps.value: step[LogEntries.eps],
LogEntries.Composition.value: step[LogEntries.Composition],
}
for step in self.steps
]
out_data : Dict[str, Any] = {
"Metadata": {
"NumSteps": self.num_steps,
**kwargs,
"DateCreated": datetime.now().isoformat(),
"GridFireVersion": gridfire.__version__,
"Author": "Emily M. Boudreaux",
"OS": os.uname().sysname,
"ClangVersion": os.popen("clang --version").read().strip(),
"GccVersion": os.popen("gcc --version").read().strip(),
"PythonVersion": sys.version,
},
"Steps": serializable_steps
}
with open(filename, 'w') as f:
json.dump(out_data, f, indent=4)
def summary(self) -> Dict[str, Any]:
if not self.steps:
return {}
final_step = self.steps[-1]
summary_data : Dict[str, Any] = {
"TotalSteps": self.num_steps,
"FinalTime": final_step[LogEntries.t],
"FinalComposition": final_step[LogEntries.Composition],
}
return summary_data

View File

@@ -0,0 +1,351 @@
import argparse
import json
import os
import sys
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.integrate import cumulative_trapezoid
from astropy import constants as const
from astropy import units as u
from enum import Enum
from typing import List, Dict, Any, Tuple
from fourdst.atomic import species as spdict
EXTERNAL_STYLE_PATH = "../ManuscriptFigures/utils/pub.mplstyle"
class PlotVariable(Enum):
COMPOSITION = "composition"
EPS = "eps"
DT = "dt"
class OutputFormat(Enum):
INTERACTIVE = "interactive"
PDF = "pdf"
PNG = "png"
JPEG = "jpeg"
def discover_runs(base_dir: str) -> List[str]:
runs = set()
gf_ok_dir = os.path.join(base_dir, "GridFire", "Ok")
if os.path.exists(gf_ok_dir):
for fname in os.listdir(gf_ok_dir):
if fname.endswith("_OKAY.json"):
runs.add(fname.replace("_OKAY.json", ""))
return sorted(list(runs))
def load_run_data(base_dir: str, run_name: str) -> Tuple[Dict[str, Any], Dict[str, Any]]:
gf_data = {}
pynuc_data = {}
gf_ok_path = os.path.join(base_dir, "GridFire", "Ok", f"{run_name}_OKAY.json")
gf_err_path = os.path.join(base_dir, "GridFire", "Err", f"{run_name}_FAIL.json")
pynuc_path = os.path.join(base_dir, "pynucastro", f"{run_name}_pynucastro.json")
if os.path.exists(gf_ok_path):
with open(gf_ok_path, 'r') as f:
gf_data = json.load(f)
elif os.path.exists(gf_err_path):
with open(gf_err_path, 'r') as f:
gf_data = json.load(f)
if os.path.exists(pynuc_path):
with open(pynuc_path, 'r') as f:
pynuc_data = json.load(f)
return gf_data, pynuc_data
def get_pynuc_eps(steps: List[Dict[str, Any]]) -> Tuple[np.ndarray, np.ndarray]:
c_sq = (const.c.cgs.value)**2
Na = const.N_A.cgs.value
amu_to_g = const.u.cgs.value
eps_history = []
time_history = []
last_Y = {}
last_t = None
for step in steps:
t = step["t"]
current_Y = step["Composition"]
if last_t is None:
last_t = t
last_Y = current_Y.copy()
eps_history.append(0.0)
time_history.append(t)
continue
dt = t - last_t
if dt > 0:
dm_dt = 0.0
all_species = set(current_Y.keys()) | set(last_Y.keys())
for sp in all_species:
y_curr = current_Y.get(sp, 0.0)
y_prev = last_Y.get(sp, 0.0)
dy = y_curr - y_prev
if sp in spdict:
mass_g = spdict[sp].mass() * amu_to_g
dm_dt += mass_g * (dy / dt)
rate = -dm_dt * Na * c_sq
eps_history.append(rate)
time_history.append(t)
last_t = t
last_Y = current_Y.copy()
return np.array(time_history), np.array(eps_history)
def _setup_axes(ax_main: plt.Axes, ax_diff: plt.Axes, var: PlotVariable, fig_opts: dict):
ax_diff.set_xlabel("Time (s)")
ax_main.set_xscale(fig_opts['x_scale'])
ax_diff.set_xscale(fig_opts['x_scale'])
ax_main.set_yscale(fig_opts['y_scale'])
if var == PlotVariable.EPS:
ax_main.set_ylabel("Cumulative Energy (eps)")
elif var == PlotVariable.DT:
ax_main.set_ylabel("Timestep Size (dt)")
elif var == PlotVariable.COMPOSITION:
ax_main.set_ylabel("Mass Fraction (X_i)")
ax_diff.set_ylabel(r"$\Delta \log_{10}$")
def _plot_single_run(ax_main: plt.Axes, ax_diff: plt.Axes, run_name: str, var: PlotVariable,
base_dir: str, compare_pynuc: bool):
gf_data, pynuc_data = load_run_data(base_dir, run_name)
if not gf_data or gf_data.get("Metadata", {}).get("Status") == "Error":
return
gf_steps = gf_data.get("Steps", [])
if not gf_steps:
return
if compare_pynuc and not pynuc_data:
print(f"Warning: PyNucastro comparison requested but data not found for '{run_name}'.")
t_gf = np.array([s["t"] for s in gf_steps])
if var == PlotVariable.COMPOSITION:
final_comp = gf_steps[-1]["Composition"]
top_species = sorted(final_comp, key=final_comp.get, reverse=True)[:3]
for spec in top_species:
y_gf = np.array([s["Composition"].get(spec, 1e-30) for s in gf_steps])
line, = ax_main.plot(t_gf, y_gf, label=f"{run_name} {spec} (GF)")
if compare_pynuc and pynuc_data:
pynuc_steps = pynuc_data.get("Steps", [])
if not pynuc_steps:
continue
t_pynuc = np.array([s["t"] for s in pynuc_steps])
y_pynuc = np.array([s["Composition"].get(spec, 1e-30) for s in pynuc_steps])
ax_main.plot(t_pynuc, y_pynuc, '--', color=line.get_color(), label=f"{run_name} {spec} (PyNuc)")
if len(t_pynuc) > 1:
f_interp = interp1d(t_pynuc, y_pynuc, kind='linear', bounds_error=False, fill_value=(y_pynuc[0], y_pynuc[-1]))
y_pynuc_interp = f_interp(t_gf)
log_diff = np.abs(np.log10(np.maximum(y_gf, 1e-30)) - np.log10(np.maximum(y_pynuc_interp, 1e-30)))
ax_diff.plot(t_gf, log_diff, color=line.get_color(), linestyle=':', label=f"Δ {spec}")
elif var == PlotVariable.EPS:
y_gf = np.array([s["eps"] for s in gf_steps])
line, = ax_main.plot(t_gf, y_gf, label=f"{run_name} (GF)")
if compare_pynuc and pynuc_data:
pynuc_steps = pynuc_data.get("Steps", [])
if pynuc_steps:
s_t, s_e = get_pynuc_eps(pynuc_steps)
if len(s_t) > 1:
s_cumE = cumulative_trapezoid(s_e, s_t, initial=0)
ax_main.plot(s_t, s_cumE, '--', color=line.get_color(), label=f"{run_name} (PyNuc)")
f_pynuc_interp = interp1d(s_t[np.isfinite(s_cumE)], s_cumE[np.isfinite(s_cumE)])
f_gf_interp = interp1d(t_gf, y_gf)
t_safe = np.logspace(
8,
np.log10(min(s_t.max(), t_gf.max())),
1000
)
y_pynuc_interp = f_pynuc_interp(t_safe)
y_gf_interp = f_gf_interp(t_safe)
pynuc_safe = np.maximum(np.abs(y_pynuc_interp), 1e-30)
gf_safe = np.maximum(np.abs(y_gf_interp), 1e-30)
log_diff = np.log10(gf_safe) - np.log10(pynuc_safe)
ax_diff.plot(t_safe, log_diff, color=line.get_color(), linestyle=':', label=f"Δ eps")
ax_main.set_xlim(1e8)
elif var == PlotVariable.DT:
y_gf = np.array([s["dt"] for s in gf_steps])
ax_main.plot(t_gf, y_gf, label=f"{run_name} (GF)")
def _finalize_plot(fig: plt.Figure, ax_main: plt.Axes, ax_diff: plt.Axes, format_opt: OutputFormat, filename_base: str, is_subfigure: bool = False):
ax_main.legend(loc='best', fontsize='small')
if len(ax_diff.lines) > 0:
ax_diff.legend(loc='best', fontsize='x-small')
if is_subfigure:
return
fig.tight_layout()
if format_opt != OutputFormat.INTERACTIVE:
out_name = f"{filename_base}.{format_opt.value}"
fig.savefig(out_name, format=format_opt.value, bbox_inches='tight')
print(f"Saved figure: {out_name}")
plt.close(fig)
def plot_data(runs: List[str], plot_vars: List[PlotVariable], base_dir: str,
compare_pynuc: bool, format_opt: OutputFormat, fig_opts: dict):
if not runs:
print("No valid runs to plot.")
return
if fig_opts['use_ext_style']:
try:
plt.style.use(EXTERNAL_STYLE_PATH)
print(f"Using external style sheet: {EXTERNAL_STYLE_PATH}")
except Exception as e:
print(f"Warning: Failed to load external style sheet. Error: {e}")
elif fig_opts['style']:
try:
plt.style.use(fig_opts['style'])
except OSError:
print(f"Warning: Style '{fig_opts['style']}' not found. Using default.")
plt.rcParams["figure.figsize"] = fig_opts['figsize']
plt.rcParams["figure.dpi"] = fig_opts['dpi']
for var in plot_vars:
if fig_opts['merge_runs']:
fig, (ax_main, ax_diff) = plt.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [3, 1]})
ax_main.set_title(f"Comparison of {var.value.upper()} (Merged Runs)")
_setup_axes(ax_main, ax_diff, var, fig_opts)
for run_name in runs:
_plot_single_run(ax_main, ax_diff, run_name, var, base_dir, compare_pynuc)
_finalize_plot(fig, ax_main, ax_diff, format_opt, f"ValidationPlot_Merged_{var.value}")
else:
num_runs = len(runs)
cols = math.ceil(math.sqrt(num_runs))
rows = math.ceil(num_runs / cols)
base_w, base_h = fig_opts['figsize']
fig = plt.figure(figsize=(base_w * cols, base_h * rows), layout='constrained')
fig.suptitle(f"{var.value.upper()} Comparison", fontsize=16, fontweight='bold')
subfigs_raw = fig.subfigures(rows, cols)
if hasattr(subfigs_raw, 'flatten'):
subfigs = subfigs_raw.flatten()
else:
subfigs = [subfigs_raw]
for i, run_name in enumerate(runs):
subfig = subfigs[i]
subfig.suptitle(f"{run_name}", fontsize=12)
axes = subfig.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [3, 1]})
ax_main, ax_diff = axes[0], axes[1]
_setup_axes(ax_main, ax_diff, var, fig_opts)
_plot_single_run(ax_main, ax_diff, run_name, var, base_dir, compare_pynuc)
_finalize_plot(fig, ax_main, ax_diff, format_opt, "", is_subfigure=True)
for j in range(num_runs, len(subfigs)):
subfigs[j].set_visible(False)
if format_opt != OutputFormat.INTERACTIVE:
out_name = f"ValidationPlot_Grid_{var.value}.{format_opt.value}"
fig.savefig(out_name, format=format_opt.value, bbox_inches='tight')
print(f"Saved grid figure: {out_name}")
plt.close(fig)
if format_opt == OutputFormat.INTERACTIVE:
plt.show()
def main():
parser = argparse.ArgumentParser(description="GridFire Validation Suite Output Parser and Plotter")
parser.add_argument("-d", "--data-dir", type=str, default="GF_Validation_Output",
help="Path to the directory containing the JSON output folders.")
parser.add_argument("--runs", nargs="+", type=str, required=False,
help="Which validation runs to analyze. Use 'all' to process all available runs.")
parser.add_argument("--plot", nargs="*", type=lambda x: PlotVariable[x.upper()], choices=list(PlotVariable), default=[],
help="Variables to plot. Leave empty to skip plotting.")
parser.add_argument("--compare-pynucastro", action="store_true",
help="Include pynucastro data and calculate log residuals.")
parser.add_argument("--merge-runs", action="store_true",
help="Merge all specified runs onto a single figure per variable. (Default: Grid layout of subfigures)")
parser.add_argument("--x-scale", type=str, choices=["log", "linear"], default="log",
help="Scale for the x-axis (time). Default is 'log'.")
parser.add_argument("--y-scale", type=str, choices=["log", "linear"], default="log",
help="Scale for the y-axis (main plots). Default is 'log'.")
parser.add_argument("--format", type=lambda x: OutputFormat[x.upper()], choices=list(OutputFormat), default=OutputFormat.INTERACTIVE,
help="Output format for the plots. Default is interactive window.")
parser.add_argument("--use-external-style", action="store_true",
help="Load the custom style sheet defined in EXTERNAL_STYLE_PATH.")
parser.add_argument("--style", type=str, default=None,
help="Built-in Matplotlib stylesheet name (e.g., 'seaborn-v0_8-whitegrid'). Ignored if --use-external-style is set.")
parser.add_argument("--figsize", nargs=2, type=float, default=[8.0, 6.0],
metavar=("WIDTH", "HEIGHT"), help="Base figure size in inches per subfigure (e.g., --figsize 10 8).")
parser.add_argument("--dpi", type=int, default=150, help="DPI resolution for saved figures.")
parser.add_argument("--list", action="store_true", default=False, help="list available runs")
args = parser.parse_args()
available_runs = discover_runs(args.data_dir)
if not available_runs:
print(f"Error: No successful run data found in {args.data_dir} (Checked GridFire/Ok/).")
sys.exit(1)
if args.list:
for run in available_runs:
print(f"==> {run}")
exit()
if "all" in [r.lower() for r in args.runs]:
target_runs = available_runs
else:
target_runs = [r for r in args.runs if r in available_runs]
missing = [r for r in args.runs if r.lower() != "all" and r not in target_runs]
if missing:
print(f"Warning: The following runs were skipped because they failed or weren't found: {', '.join(missing)}")
if args.plot and target_runs:
fig_opts = {
"figsize": tuple(args.figsize),
"dpi": args.dpi,
"style": args.style,
"use_ext_style": args.use_external_style,
"merge_runs": args.merge_runs,
"x_scale": args.x_scale,
"y_scale": args.y_scale
}
plot_data(target_runs, args.plot, args.data_dir, args.compare_pynucastro, args.format, fig_opts)
elif args.plot:
print("Error: No valid runs matched your selection.")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,22 @@
# Pynucastro test suite
Test suite to auto generate pynucastro comparisons to GridFire by copying the GridFire topology to pynucastro.
Note that this may take a while to run as each pynucastro network must run through JIT compilation. The JIT time is not counted to
pynucastro evaluation time
To run use
```bash
python GridFireValidationSuite.py --suite HotStar_No_QSE CoolStar_No_QSE --pynucastro-compare --pync-engine="GraphEngine"
```
to see all options
```bash
python GridFireValidationSuite.py --help
```
Results will be saved in a directory as json files which you may parse to analyze

View File

@@ -0,0 +1,299 @@
import shutil
from abc import ABC, abstractmethod
import fourdst.atomic
import scipy.integrate
import gridfire
from fourdst.composition import Composition
from gridfire.engine import DynamicEngine, GraphEngine, AdaptiveEngineView, MultiscalePartitioningEngineView
from gridfire.engine import EngineTypes
from gridfire.policy import MainSequencePolicy
from gridfire.type import NetIn, NetOut
from gridfire.exceptions import GridFireError
from gridfire.solver import PointSolver, PointSolverContext
from logger import StepLogger
from typing import List
import re
from typing import Dict, Tuple, Any, Union
from datetime import datetime
import pynucastro as pyna
import os
import importlib.util
import sys
import numpy as np
import json
import time
EngineTypeLookup : Dict[EngineTypes, Any] = {
EngineTypes.ADAPTIVE_ENGINE_VIEW: AdaptiveEngineView,
EngineTypes.MULTISCALE_PARTITIONING_ENGINE_VIEW: MultiscalePartitioningEngineView,
EngineTypes.GRAPH_ENGINE: GraphEngine
}
def load_network_module(filepath):
module_name = os.path.basename(filepath).replace(".py", "")
if module_name in sys.modules: # clear any existing module with the same name
del sys.modules[module_name]
spec = importlib.util.spec_from_file_location(module_name, filepath)
if spec is None:
raise FileNotFoundError(f"Could not find module at {filepath}")
network_module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = network_module
spec.loader.exec_module(network_module)
return network_module
def get_pyna_rate(my_rate_str, library):
match = re.match(r"([a-zA-Z0-9]+)\(([^,]+),([^)]*)\)(.*)", my_rate_str)
if not match:
print(f"Could not parse string format: {my_rate_str}")
return None
target = match.group(1)
projectile = match.group(2)
ejectiles = match.group(3)
product = match.group(4)
def expand_species(s_str):
if not s_str or s_str.strip() == "":
return []
# Split by space (handling "p a" or "2p a")
parts = s_str.split()
expanded = []
for p in parts:
# Check for multipliers like 2p, 3a
mult_match = re.match(r"(\d+)([a-zA-Z0-9]+)", p)
if mult_match:
count = int(mult_match.group(1))
spec = mult_match.group(2)
# Map common aliases if necessary (though pyna handles most)
if spec == 'a': spec = 'he4'
expanded.extend([spec] * count)
else:
spec = p
if spec == 'a': spec = 'he4'
expanded.append(spec)
return expanded
reactants_str = [target] + expand_species(projectile)
products_str = expand_species(ejectiles) + [product]
# Convert strings to pyna.Nucleus objects
try:
r_nuc = [pyna.Nucleus(r) for r in reactants_str]
p_nuc = [pyna.Nucleus(p) for p in products_str]
except Exception as e:
print(f"Error converting nuclei for {my_rate_str}: {e}")
return None
rates = library.get_rate_by_nuclei(r_nuc, p_nuc)
if rates:
if isinstance(rates, list):
return rates[0] # Return the first match
return rates
else:
return None
class TestSuite(ABC):
def __init__(self, name: str, description: str, temp: float, density: float, tMax: float, composition: Composition, notes: str = ""):
self.name : str = name
self.description : str = description
self.temperature : float = temp
self.density : float = density
self.tMax : float = tMax
self.composition : Composition = composition
self.notes : str = notes
def evolve_pynucastro(self, engine: DynamicEngine, ctx: PointSolverContext, output: str = "pynucastro"):
print("Evolution complete. Now building equivalent pynucastro network...")
# Build equivalent pynucastro network for comparison
reaclib_library : pyna.ReacLibLibrary = pyna.ReacLibLibrary()
rate_names = [r.id().replace("e+","").replace("e-","").replace(", ", ",") for r in engine.getNetworkReactions(ctx.engine_ctx)]
with open(f"{self.name}_rate_names_pynuc.txt", "w") as f:
for r_name in rate_names:
f.write(f"{r_name}\n")
goodRates : List[pyna.rates.reaclib_rate.ReacLibRate] = []
missingRates = []
for r_str in rate_names:
# Try the exact name match first (fastest)
try:
pyna_rate = reaclib_library.get_rate_by_name(r_str)
if isinstance(pyna_rate, list):
goodRates.append(pyna_rate[0])
else:
goodRates.append(pyna_rate)
except:
# Fallback to the smart parser
pyna_rate = get_pyna_rate(r_str, reaclib_library)
if pyna_rate:
goodRates.append(pyna_rate)
else:
missingRates.append(r_str)
pynet : pyna.PythonNetwork = pyna.PythonNetwork(rates=goodRates)
pynet.write_network(f"{self.name}_pynucastro_network.py")
net = load_network_module(f"{self.name}_pynucastro_network.py")
Y0 = np.zeros(net.nnuc)
Y0[net.jp] = self.composition.getMolarAbundance("H-1")
Y0[net.jhe3] = self.composition.getMolarAbundance("He-3")
Y0[net.jhe4] = self.composition.getMolarAbundance("He-4")
Y0[net.jc12] = self.composition.getMolarAbundance("C-12")
Y0[net.jn14] = self.composition.getMolarAbundance("N-14")
Y0[net.jo16] = self.composition.getMolarAbundance("O-16")
Y0[net.jne20] = self.composition.getMolarAbundance("Ne-20")
Y0[net.jmg24] = self.composition.getMolarAbundance("Mg-24")
print("Starting pynucastro integration...")
startTime = time.time()
sol = scipy.integrate.solve_ivp(
net.rhs,
[0, self.tMax],
Y0,
args=(self.density, self.temperature),
method="BDF",
jac=net.jacobian,
rtol=1e-5,
atol=1e-8
)
endTime = time.time()
initial_duration = endTime - startTime
print("Pynucastro integration complete. Writing results to JSON...")
print("Running pynucastro a second time to account for any JIT compilation overhead...")
startTime = time.time()
sol = scipy.integrate.solve_ivp(
net.rhs,
[0, self.tMax],
Y0,
args=(self.density, self.temperature),
method="BDF",
jac=net.jacobian,
rtol=1e-5,
atol=1e-8
)
endTime = time.time()
final_duration = endTime - startTime
print(f"Pynucastro second integration complete. Initial run time: {initial_duration: .4f} s, Second run time: {final_duration: .4f} s")
data: List[Dict[str, Union[float, Dict[str, float]]]] = []
for time_step, t in enumerate(sol.t):
data.append({"t": t, "Composition": {}})
for j in range(net.nnuc):
A = net.A[j]
Z = net.Z[j]
species: str
try:
species = fourdst.atomic.az_to_species(A, Z).name()
except:
species = f"SP-A_{A}_Z_{Z}"
data[-1]["Composition"][species] = sol.y[j, time_step]
pynucastro_json : Dict[str, Any] = {
"Metadata": {
"Name": f"{self.name}_pynucastro",
"Description": f"pynucastro simulation equivalent to GridFire validation suite: {self.description}",
"Status": "Success",
"Notes": self.notes,
"Temperature": self.temperature,
"Density": self.density,
"tMax": self.tMax,
"RunTime0": initial_duration,
"RunTime1": final_duration,
"DateCreated": datetime.now().isoformat(),
"NumSpecies": net.nnuc
},
"Steps": data
}
filename: str = f"{self.name}_pynucastro.json"
filepath: str = os.path.join(output, filename)
with open(filepath, "w") as f:
json.dump(pynucastro_json, f, indent=4)
def evolve(self, engine: DynamicEngine, solver_ctx: PointSolverContext, netIn: NetIn, pynucastro_compare: bool = True, engine_type: EngineTypes | None = None, output: str = "output"):
solver : PointSolver = PointSolver(engine)
stepLogger : StepLogger = StepLogger()
solver_ctx.callback = lambda ctx: stepLogger.log_step(ctx)
startTime = time.time()
subdir: str = os.path.join(output, "GridFire")
os.makedirs(subdir, exist_ok=True)
try:
startTime = time.time()
netOut : NetOut = solver.evaluate(solver_ctx, netIn)
endTime = time.time()
filename: str = f"{self.name}_OKAY.json"
subdir2: str = os.path.join(subdir, "Ok")
os.makedirs(subdir2, exist_ok=True)
filepath: str = os.path.join(subdir2, filename)
stepLogger.to_json(
filepath,
Name = f"{self.name}_Success",
Description=self.description,
Status="Success",
Notes=self.notes,
Temperature=netIn.temperature,
Density=netIn.density,
tMax=netIn.tMax,
FinalEps = netOut.energy,
FinaldEpsdT = netOut.dEps_dT,
FinaldEpsdRho = netOut.dEps_dRho,
ElapsedTime = endTime - startTime,
NumSpecies = engine.getNetworkSpecies(solver_ctx.engine_ctx).__len__(),
NumReactions = engine.getNetworkReactions(solver_ctx.engine_ctx).__len__()
)
except GridFireError as e:
endTime = time.time()
filename : str = f"{self.name}_FAIL.json"
subdir2 : str = os.path.join(subdir, "Err")
os.makedirs(subdir2, exist_ok=True)
filepath = os.path.join(subdir2, filename)
stepLogger.to_json(
filepath,
Name = f"{self.name}_Failure",
Description=self.description,
Status=f"Error",
ErrorMessage=str(e),
Notes=self.notes,
Temperature=netIn.temperature,
Density=netIn.density,
tMax=netIn.tMax,
ElapsedTime = endTime - startTime
)
if pynucastro_compare:
pynuc_dir = os.path.join(output, "pynucastro")
os.makedirs(pynuc_dir, exist_ok=True)
if engine_type is not None:
if engine_type == EngineTypes.MULTISCALE_PARTITIONING_ENGINE_VIEW:
print("Pynucastro comparison using MultiscalePartitioningEngineView...")
graphEngine : GraphEngine = GraphEngine(self.composition, depth=3)
multiScaleEngine : MultiscalePartitioningEngineView = MultiscalePartitioningEngineView(graphEngine)
self.evolve_pynucastro(multiScaleEngine, solver_ctx, pynuc_dir)
elif engine_type == EngineTypes.GRAPH_ENGINE:
print("Pynucastro comparison using GraphEngine...")
graphEngine : GraphEngine = GraphEngine(self.composition, depth=3)
self.evolve_pynucastro(graphEngine, solver_ctx, pynuc_dir)
else:
print(f"Pynucastro comparison not implemented for engine type: {engine_type}")
@abstractmethod
def __call__(self, pynucastro_compare: bool = False, pync_engine: str = "GraphEngine", output: str = "output"):
pass

View File

@@ -0,0 +1,56 @@
from fourdst.composition import Composition
from fourdst.composition import CanonicalComposition
from fourdst.atomic import Species
from gridfire.type import NetIn
def rescale_composition(comp_ref : Composition, ZZs : float, Y_primordial : float = 0.248) -> Composition:
CC : CanonicalComposition = comp_ref.getCanonicalComposition()
dY_dZ = (CC.Y - Y_primordial) / CC.Z
Z_new = CC.Z * (10**ZZs)
Y_bulk_new = Y_primordial + (dY_dZ * Z_new)
X_new = 1.0 - Z_new - Y_bulk_new
if X_new < 0: raise ValueError(f"ZZs={ZZs} yields unphysical composition (X < 0)")
ratio_H = X_new / CC.X if CC.X > 0 else 0
ratio_He = Y_bulk_new / CC.Y if CC.Y > 0 else 0
ratio_Z = Z_new / CC.Z if CC.Z > 0 else 0
Y_new_list = []
newComp : Composition = Composition()
s: Species
for s in comp_ref.getRegisteredSpecies():
Xi_ref = comp_ref.getMassFraction(s)
if s.el() == "H":
Xi_new = Xi_ref * ratio_H
elif s.el() == "He":
Xi_new = Xi_ref * ratio_He
else:
Xi_new = Xi_ref * ratio_Z
Y = Xi_new / s.mass()
newComp.registerSpecies(s)
newComp.setMolarAbundance(s, Y)
return newComp
def init_composition(ZZs : float = 0) -> Composition:
Y_solar = [7.0262E-01, 9.7479E-06, 6.8955E-02, 2.5000E-04, 7.8554E-05, 6.0144E-04, 8.1031E-05, 2.1513E-05]
S = ["H-1", "He-3", "He-4", "C-12", "N-14", "O-16", "Ne-20", "Mg-24"]
return rescale_composition(Composition(S, Y_solar), ZZs)
def init_netIn(temp: float, rho: float, time: float, comp: Composition) -> NetIn:
n : NetIn = NetIn()
n.temperature = temp
n.density = rho
n.tMax = time
n.dt0 = 1e-12
n.composition = comp
return n
def years_to_seconds(years: float) -> float:
return years * 3.1536e7