Merge pull request #10 from tboudreaux/perf/openMP

Performance + openMP support
This commit is contained in:
2025-12-06 13:52:55 -05:00
committed by GitHub
49 changed files with 1156 additions and 422 deletions

7
.gitignore vendored
View File

@@ -82,6 +82,7 @@ subprojects/cvode-*/
subprojects/kinsol-*/
subprojects/CLI11-*/
subprojects/openssl-*/
subprojects/tomlplusplus-*/
*.fbundle
*.wraplock
@@ -98,6 +99,8 @@ liblogging.wrap
libplugin.wrap
minizip-ng.wrap
openssl.wrap
glaze.wrap
tomlplusplus.wrap
.vscode/
@@ -119,3 +122,7 @@ meson-boost-test/
*.json
*.xml
*_pynucastro_network.py
cross/python_includes
*.whl

View File

@@ -48,7 +48,7 @@ PROJECT_NAME = GridFire
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER = v0.7.3_rc2
PROJECT_NUMBER = v0.7.4_rc2
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewers a

View File

View File

@@ -0,0 +1,37 @@
cppc = meson.get_compiler('cpp')
if cppc.get_id() == 'clang'
message('disabling bitwise-instead-of-logical warnings for clang')
add_project_arguments('-Wno-bitwise-instead-of-logical', language: 'cpp')
endif
if cppc.get_id() == 'gcc'
message('disabling psabi warnings for gcc')
add_project_arguments('-Wno-psabi', language: 'cpp')
if (cppc.version().version_compare('<14.0'))
error('g++ version must be at least 14.0, found ' + cppc.version())
endif
endif
if not cppc.has_header('print')
error('C++ standard library header <print> not found. Please ensure your compiler and standard library supports C++23. We have already validated your compiler version so this is likely an issue with your standard library installation.')
endif
if not cppc.has_header('format')
error('C++ standard library header <format> not found. Please ensure your compiler and standard library supports C++23. We have already validated your compiler version so this is likely an issue with your standard library installation.')
endif
# For Eigen
add_project_arguments('-Wno-deprecated-declarations', language: 'cpp')
if get_option('build_python')
message('enabling hidden visibility for C++ symbols when building Python extension. This reduces the size of the resulting shared library.')
add_project_arguments('-fvisibility=hidden', language: 'cpp')
else
message('enabling default visibility for C++ symbols')
add_project_arguments('-fvisibility=default', language: 'cpp')
endif
if get_option('openmp_support')
add_project_arguments('-DGRIDFIRE_USE_OPENMP', language: 'cpp')
endif

View File

@@ -0,0 +1,15 @@
if get_option('build_fortran')
add_languages('fortran', native: true)
message('Found FORTRAN compiler: ' + meson.get_compiler('fortran').get_id())
message('Fortran standard set to: ' + get_option('fortran_std'))
message('Building fortran module (gridfire_mod.mod)')
fc = meson.get_compiler('fortran')
if not get_option('unsafe_fortran')
if fc.get_id() != 'gcc'
error('The only supported fortran compiler for GridFire is gfortran (version >= 14.0), found ' + fc + '. GridFire has not been tested with any other compilers. You can disable this check with the -Dunsafe-fortran=true flag to try other compilers')
endif
endif
if (fc.version().version_compare('<14.0'))
error('gfortran version must be at least 14.0, found ' + fc.version())
endif
endif

15
build-check/meson.build Normal file
View File

@@ -0,0 +1,15 @@
message('Found CXX compiler: ' + meson.get_compiler('cpp').get_id())
message('C++ standard set to: ' + get_option('cpp_std'))
cc = meson.get_compiler('c')
ignore_unused_args = '-Wno-unused-command-line-argument'
add_global_arguments(ignore_unused_args, language: 'cpp')
add_global_arguments(ignore_unused_args, language: 'c')
subdir('CPPC')
subdir('FC')

View File

@@ -1,9 +1,21 @@
# bring in all of the fourdst utility repositories
fourdst_build_lib_all = true
if not get_option('plugin_support')
fourdst_build_lib_all=false
message('Disabling fourdst plugin support as per user request.')
endif
fourdst_sp = subproject('fourdst',
default_options:
['build-tests=' + get_option('build-tests').to_string(),
'build-python=' + get_option('build-python').to_string()
['build_tests=' + get_option('build_tests').to_string(),
'build_python=' + get_option('build_python').to_string(),
'build_lib_all=' + fourdst_build_lib_all.to_string(),
'build_lib_comp=true',
'build_lib_config=true',
'build_lib_log=true',
'build_lib_const=true',
'pkg_config=' + get_option('pkg_config').to_string(),
]
)
@@ -11,10 +23,16 @@ composition_dep = fourdst_sp.get_variable('composition_dep')
log_dep = fourdst_sp.get_variable('log_dep')
const_dep = fourdst_sp.get_variable('const_dep')
config_dep = fourdst_sp.get_variable('config_dep')
plugin_dep = fourdst_sp.get_variable('plugin_dep')
if get_option('plugin_support')
warning('Including plugin library from fourdst. Note this will bring in minizip-ng and openssl, which can cause build issues with cross compilation due to their complexity.')
plugin_dep = fourdst_sp.get_variable('plugin_dep')
endif
libcomposition = fourdst_sp.get_variable('libcomposition')
libconst = fourdst_sp.get_variable('libconst')
libconfig = fourdst_sp.get_variable('libconfig')
liblogging = fourdst_sp.get_variable('liblogging')
libplugin = fourdst_sp.get_variable('libplugin')
if get_option('plugin_support')
warning('Including plugin library from fourdst. Note this will bring in minizip-ng and openssl, which can cause build issues with cross compilation due to their complexity.')
libplugin = fourdst_sp.get_variable('libplugin')
endif

View File

@@ -1,7 +1,10 @@
cmake = import('cmake')
if get_option('build_python')
subdir('python')
subdir('pybind')
endif
subdir('fourdst')
subdir('sundials')
subdir('cppad')
@@ -10,6 +13,5 @@ subdir('eigen')
subdir('json')
subdir('pybind')
subdir('CLI11')

View File

@@ -1,3 +1,3 @@
pybind11_proj = subproject('pybind11')
pybind11_dep = pybind11_proj.get_variable('pybind11_dep')
python3_dep = dependency('python3')
python3_dep = dependency('python3')

View File

@@ -0,0 +1,16 @@
py_installation = import('python').find_installation('python3', pure: false)
if meson.is_cross_build() and host_machine.system() == 'darwin'
py_ver = get_option('python-target-version')
message('Cross build on Darwin, using python version ' + py_ver)
py_inc_dir = include_directories('../../cross/python_includes/python-' + py_ver + '/include/python' + py_ver)
py_dep = declare_dependency(include_directories: py_inc_dir)
py_module_prefix = ''
py_module_suffic = 'so'
meson.override_dependency('python3', py_dep)
else
py_dep = py_installation.dependency()
py_module_prefix = ''
py_module_suffic = 'so'
endif

View File

@@ -4,8 +4,10 @@ cvode_cmake_options = cmake.subproject_options()
cvode_cmake_options.add_cmake_defines({
'CMAKE_CXX_FLAGS' : '-Wno-deprecated-declarations',
'CMAKE_C_FLAGS' : '-Wno-deprecated-declarations',
'BUILD_SHARED_LIBS' : 'ON',
'BUILD_STATIC_LIBS' : 'OFF',
'BUILD_SHARED_LIBS' : 'OFF',
'BUILD_STATIC_LIBS' : 'ON',
'EXAMPLES_ENABLE_C' : 'OFF',
'CMAKE_POSITION_INDEPENDENT_CODE': true
})
@@ -14,34 +16,70 @@ cvode_cmake_options.add_cmake_defines({
'CMAKE_INSTALL_INCLUDEDIR': get_option('includedir')
})
if meson.is_cross_build() and host_machine.system() == 'emscripten'
cvode_cmake_options.add_cmake_defines({
'CMAKE_C_FLAGS': '-s MEMORY64=1 -s ALLOW_MEMORY_GROWTH=1',
'CMAKE_CXX_FLAGS': '-s MEMORY64=1 -s ALLOW_MEMORY_GROWTH=1',
'CMAKE_SHARED_LINKER_FLAGS': '-s MEMORY64=1 -s ALLOW_MEMORY_GROWTH=1',
'CMAKE_EXE_LINKER_FLAGS': '-s MEMORY64=1 -s ALLOW_MEMORY_GROWTH=1'
})
endif
cvode_sp = cmake.subproject(
'cvode',
options: cvode_cmake_options,
)
# For the core SUNDIALS library (SUNContext, etc.)
sundials_core_dep = cvode_sp.dependency('sundials_core_shared')
sundials_core_tgt = cvode_sp.target('sundials_core_static')
sundials_cvode_tgt = cvode_sp.target('sundials_cvode_static')
sundials_nvecserial_tgt = cvode_sp.target('sundials_nvecserial_static')
sundials_sunmatrixdense_tgt = cvode_sp.target('sundials_sunmatrixdense_static')
sundials_sunlinsoldense_tgt = cvode_sp.target('sundials_sunlinsoldense_static')
# For the CVODE integrator library
sundials_cvode_dep = cvode_sp.dependency('sundials_cvode_shared')
cvode_objs = [
sundials_core_tgt.extract_all_objects(recursive: true),
sundials_cvode_tgt.extract_all_objects(recursive: true),
sundials_nvecserial_tgt.extract_all_objects(recursive: true),
sundials_sunmatrixdense_tgt.extract_all_objects(recursive: true),
sundials_sunlinsoldense_tgt.extract_all_objects(recursive: true),
]
# For the serial NVector library
sundials_nvecserial_dep = cvode_sp.dependency('sundials_nvecserial_shared')
sundials_core_includes = cvode_sp.include_directories('sundials_core_static')
sundials_cvode_includes = cvode_sp.include_directories('sundials_cvode_static')
sundials_nvecserial_includes = cvode_sp.include_directories('sundials_nvecserial_static')
sundials_sunmatrixdense_includes = cvode_sp.include_directories('sundials_sunmatrixdense_static')
sundials_sunlinsoldense_includes = cvode_sp.include_directories('sundials_sunlinsoldense_static')
# For the dense matrix library
sundials_sunmatrixdense_dep = cvode_sp.dependency('sundials_sunmatrixdense_shared')
cvode_includes = [
sundials_core_includes,
sundials_cvode_includes,
sundials_nvecserial_includes,
sundials_sunmatrixdense_includes,
sundials_sunlinsoldense_includes
]
# For the dense linear solver library
sundials_sunlinsoldense_dep = cvode_sp.dependency('sundials_sunlinsoldense_shared')
cvode_dep = declare_dependency(
dependencies: [
sundials_core_dep,
sundials_cvode_dep,
sundials_nvecserial_dep,
sundials_sunmatrixdense_dep,
sundials_sunlinsoldense_dep,
],
empty_cvode_file = configure_file(
output: 'cvode_dummy_ar.cpp',
command: ['echo'],
capture: true
)
libcvode_static = static_library(
'cvode-static',
empty_cvode_file,
objects: cvode_objs,
include_directories: cvode_includes,
pic: true,
install: false
)
cvode_dep = declare_dependency(
link_with: libcvode_static,
include_directories: cvode_includes,
)

View File

@@ -5,8 +5,10 @@ kinsol_cmake_options = cmake.subproject_options()
kinsol_cmake_options.add_cmake_defines({
'CMAKE_CXX_FLAGS' : '-Wno-deprecated-declarations',
'CMAKE_C_FLAGS' : '-Wno-deprecated-declarations',
'BUILD_SHARED_LIBS' : 'ON',
'BUILD_STATIC_LIBS' : 'OFF',
'BUILD_SHARED_LIBS' : 'OFF',
'BUILD_STATIC_LIBS' : 'ON',
'EXAMPLES_ENABLE_C' : 'OFF',
'CMAKE_POSITION_INDEPENDENT_CODE': true
})
kinsol_cmake_options.add_cmake_defines({
@@ -19,11 +21,31 @@ kinsol_sp = cmake.subproject(
options: kinsol_cmake_options,
)
sundials_kinsol_shared = kinsol_sp.dependency('sundials_kinsol_shared')
sundials_kinsol_static_tgt = kinsol_sp.target('sundials_kinsol_obj_static')
kinsol_includes = kinsol_sp.include_directories('sundials_kinsol_obj_static')
kinsol_dep = declare_dependency(
dependencies: [
sundials_kinsol_shared,
]
kinsol_objs = [sundials_kinsol_static_tgt.extract_all_objects(recursive: false)]
empty_kinsol_file = configure_file(
output: 'kinsol_dummy_ar.cpp',
command: ['echo'],
capture: true
)
libkinsol_static = static_library(
'kinsol_static',
empty_kinsol_file,
objects: kinsol_objs,
include_directories: kinsol_includes,
pic: true,
install: false
)
kinsol_dep = declare_dependency(
link_with: libkinsol_static,
include_directories: kinsol_includes
)

View File

@@ -6,4 +6,4 @@ sundials_dep = declare_dependency(
cvode_dep,
kinsol_dep,
],
)
)

View File

@@ -1,4 +1,4 @@
xxhash_dep = declare_dependency(
include_directories: include_directories('include')
)
)

View File

@@ -0,0 +1,32 @@
llevel = get_option('log_level')
logbase='QUILL_COMPILE_ACTIVE_LOG_LEVEL_'
if (llevel == 'traceL3')
message('Setting log level to TRACE_L3')
log_argument = logbase + 'TRACE_L3'
elif (llevel == 'traceL2')
message('Setting log level to TRACE_L2')
log_argument = logbase + 'TRACE_L2'
elif (llevel == 'traceL1')
message('Setting log level to TRACE_L1')
log_argument = logbase + 'TRACE_L1'
elif (llevel == 'debug')
message('Setting log level to DEBUG')
log_argument = logbase + 'DEBUG'
elif (llevel == 'info')
message('Setting log level to INFO')
log_argument = logbase + 'INFO'
elif (llevel == 'warning')
message('Setting log level to WARNING')
log_argument = logbase + 'WARNING'
elif (llevel == 'error')
message('Setting log level to ERROR')
log_argument = logbase + 'ERROR'
elif (llevel == 'critical')
message('Setting log level to CRITICAL')
log_argument = logbase + 'CRITICAL'
endif
log_argument = '-DQUILL_COMPILE_ACTIVE_LOG_LEVEL=' + log_argument
add_project_arguments(log_argument, language: 'cpp')

View File

@@ -0,0 +1,18 @@
if get_option('pkg_config')
message('Generating pkg-config file for GridFire...')
pkg = import('pkgconfig')
pkg.generate(
name: 'gridfire',
description: 'GridFire nuclear reaction network solver',
version: meson.project_version(),
libraries: [
libgridfire,
libcomposition,
libconst,
liblogging
],
subdirs: ['gridfire'],
filebase: 'gridfire',
install_dir: join_paths(get_option('libdir'), 'pkgconfig')
)
endif

View File

@@ -78,7 +78,7 @@ def fix_rpaths(binary_path):
def main():
if len(sys.argv) != 2:
print(f"--- Error: Expected one argument (path to .so file), got {sys.argv}", file=sys.stderr)
print(f"--- Error: Expected one argument (path to .dylib/.so file), got {sys.argv}", file=sys.stderr)
sys.exit(1)
# Get the file path directly from the command line argument

View File

@@ -1,76 +1,95 @@
# --- Python Extension Setup ---
py_installation = import('python').find_installation('python3', pure: false)
if get_option('build_python')
message('Building Python bindings...')
gridfire_py_deps = [
pybind11_dep,
const_dep,
config_dep,
composition_dep,
gridfire_dep
]
gridfire_py_deps = [
py_dep,
pybind11_dep,
const_dep,
config_dep,
composition_dep,
gridfire_dep
]
py_mod = py_installation.extension_module(
'_gridfire', # Name of the generated .so/.pyd file (without extension)
sources: [
meson.project_source_root() + '/src/python/bindings.cpp',
meson.project_source_root() + '/src/python/types/bindings.cpp',
meson.project_source_root() + '/src/python/partition/bindings.cpp',
meson.project_source_root() + '/src/python/partition/trampoline/py_partition.cpp',
meson.project_source_root() + '/src/python/reaction/bindings.cpp',
meson.project_source_root() + '/src/python/screening/bindings.cpp',
meson.project_source_root() + '/src/python/screening/trampoline/py_screening.cpp',
meson.project_source_root() + '/src/python/io/bindings.cpp',
meson.project_source_root() + '/src/python/io/trampoline/py_io.cpp',
meson.project_source_root() + '/src/python/exceptions/bindings.cpp',
meson.project_source_root() + '/src/python/engine/bindings.cpp',
meson.project_source_root() + '/src/python/engine/trampoline/py_engine.cpp',
meson.project_source_root() + '/src/python/solver/bindings.cpp',
meson.project_source_root() + '/src/python/solver/trampoline/py_solver.cpp',
meson.project_source_root() + '/src/python/policy/bindings.cpp',
meson.project_source_root() + '/src/python/policy/trampoline/py_policy.cpp',
meson.project_source_root() + '/src/python/utils/bindings.cpp',
],
dependencies : gridfire_py_deps,
install : true,
subdir: 'gridfire',
)
py_sources = [
meson.project_source_root() + '/src/python/bindings.cpp',
meson.project_source_root() + '/src/python/types/bindings.cpp',
meson.project_source_root() + '/src/python/partition/bindings.cpp',
meson.project_source_root() + '/src/python/partition/trampoline/py_partition.cpp',
meson.project_source_root() + '/src/python/reaction/bindings.cpp',
meson.project_source_root() + '/src/python/screening/bindings.cpp',
meson.project_source_root() + '/src/python/screening/trampoline/py_screening.cpp',
meson.project_source_root() + '/src/python/io/bindings.cpp',
meson.project_source_root() + '/src/python/io/trampoline/py_io.cpp',
meson.project_source_root() + '/src/python/exceptions/bindings.cpp',
meson.project_source_root() + '/src/python/engine/bindings.cpp',
meson.project_source_root() + '/src/python/engine/trampoline/py_engine.cpp',
meson.project_source_root() + '/src/python/solver/bindings.cpp',
meson.project_source_root() + '/src/python/solver/trampoline/py_solver.cpp',
meson.project_source_root() + '/src/python/policy/bindings.cpp',
meson.project_source_root() + '/src/python/policy/trampoline/py_policy.cpp',
meson.project_source_root() + '/src/python/utils/bindings.cpp',
]
py_installation.install_sources(
files(
meson.project_source_root() + '/src/python/gridfire/__init__.py',
meson.project_source_root() + '/stubs/gridfire/_gridfire/__init__.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/exceptions.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/partition.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/reaction.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/screening.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/io.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/solver.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/policy.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/type.pyi'
),
subdir: 'gridfire',
)
if meson.is_cross_build() and host_machine.system() == 'darwin'
py_mod = shared_module(
'_gridfire',
sources: py_sources,
dependencies: gridfire_py_deps,
name_prefix: '',
name_suffix: 'so',
install: true,
install_dir: py_installation.get_install_dir() + '/gridfire'
)
else
py_mod = py_installation.extension_module(
'_gridfire', # Name of the generated .so/.pyd file (without extension)
sources: py_sources,
dependencies : gridfire_py_deps,
install : true,
subdir: 'gridfire',
)
endif
py_installation.install_sources(
files(
meson.project_source_root() + '/stubs/gridfire/_gridfire/engine/__init__.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/engine/diagnostics.pyi',
),
subdir: 'gridfire/engine',
)
py_installation.install_sources(
files(
meson.project_source_root() + '/stubs/gridfire/_gridfire/utils/__init__.pyi',
),
subdir: 'gridfire/utils',
)
py_installation.install_sources(
files(
meson.project_source_root() + '/src/python/gridfire/__init__.py',
meson.project_source_root() + '/stubs/gridfire/_gridfire/__init__.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/exceptions.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/partition.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/reaction.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/screening.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/io.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/solver.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/policy.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/type.pyi'
),
subdir: 'gridfire',
)
py_installation.install_sources(
files(
meson.project_source_root() + '/stubs/gridfire/_gridfire/utils/hashing/__init__.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/utils/hashing/reaction.pyi',
),
subdir: 'gridfire/utils/hashing',
)
py_installation.install_sources(
files(
meson.project_source_root() + '/stubs/gridfire/_gridfire/engine/__init__.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/engine/diagnostics.pyi',
),
subdir: 'gridfire/engine',
)
py_installation.install_sources(
files(
meson.project_source_root() + '/stubs/gridfire/_gridfire/utils/__init__.pyi',
),
subdir: 'gridfire/utils',
)
py_installation.install_sources(
files(
meson.project_source_root() + '/stubs/gridfire/_gridfire/utils/hashing/__init__.pyi',
meson.project_source_root() + '/stubs/gridfire/_gridfire/utils/hashing/reaction.pyi',
),
subdir: 'gridfire/utils/hashing',
)
else
message('Python bindings disabled')
endif

19
cross/macos_arm64.ini Normal file
View File

@@ -0,0 +1,19 @@
[binaries]
c = 'arm64-apple-darwin25-clang'
cpp = 'arm64-apple-darwin25-clang++'
ar = 'arm64-apple-darwin25-ar'
strip = 'arm64-apple-darwin25-strip'
pkg-config = 'pkg-config'
ranlib = '/usr/bin/true'
[host_machine]
system = 'darwin'
cpu_family = 'aarch64'
cpu = 'arm64'
endian = 'little'
[built-in options]
c_args = ['-mmacosx-version-min=15.0']
cpp_args = ['-mmacos-version-min=15.0']
c_link_args = ['-mmacosx-version-min=15.0']
cpp_link_args = ['-mmacos-version-min=15.0']

23
cross/wasm.ini Normal file
View File

@@ -0,0 +1,23 @@
[binaries]
c = 'emcc'
cpp = 'em++'
ar = 'emar'
strip = 'emstrip'
exec_wrapper = 'node'
[built-in options]
c_args = ['-Dpkg_config=false', '-Dbuild_tests=false', '-Dbuild_examples=true', '-Dbuild_fortran=falase', '-Dplugin_support=false', '-s', 'MEMORY64=1', '-pthread', '-DQUILL_NO_THREAD_NAME_SUPPORT', '-DQUILL_IMMEDIATE_FLUSH']
cpp_args = ['-Dpkg_config=false', '-Dbuild_tests=false', '-Dbuild_examples=true', '-Dbuild_fortran=falase', '-Dplugin_support=false', '-s', 'MEMORY64=1', '-pthread', '-DQUILL_NO_THREAD_NAME_SUPPORT', '-DQUILL_IMMEDIATE_FLUSH']
c_link_args = ['-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MEMORY64=1', '-fwasm-exceptions', '-pthread', '-s', 'EXPORTED_RUNTIME_METHODS=["FS", "callMain"]', '-s', 'STACK_SIZE=10485760']
cpp_link_args = ['-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MEMORY64=1', '-fwasm-exceptions', '-pthread', '-s', 'EXPORTED_RUNTIME_METHODS=["FS", "callMain"]', '-s', 'STACK_SIZE=10485760']
[host_machine]
system = 'emscripten'
cpu_family = 'wasm64'
cpu = 'wasm64'
endian = 'little'
[properties]
cmake_toolchain_file = '/home/tboudreaux/Programming/emsdk/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake'

View File

@@ -18,126 +18,29 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# *********************************************************************** #
project('GridFire', ['c', 'cpp', 'fortran'], version: 'v0.7.3_rc2', default_options: ['cpp_std=c++23'], meson_version: '>=1.5.0')
project('GridFire', ['c', 'cpp'], version: 'v0.7.4_rc2', default_options: ['cpp_std=c++23'], meson_version: '>=1.5.0')
# Add default visibility for all C++ targets
add_project_arguments('-fvisibility=default', language: 'cpp')
# Start by running the code which validates the build environment
subdir('build-check')
message('Found CXX compiler: ' + meson.get_compiler('cpp').get_id())
message('Found FORTRAN compiler: ' + meson.get_compiler('fortran').get_id())
message('C++ standard set to: ' + get_option('cpp_std'))
message('Fortran standard set to: ' + get_option('fortran_std'))
if meson.get_compiler('cpp').get_id() == 'clang'
# We disable these because of CppAD
message('disabling bitwise-instead-of-logical warnings for clang')
add_project_arguments('-Wno-bitwise-instead-of-logical', language: 'cpp')
endif
if meson.get_compiler('cpp').get_id() == 'gcc'
# We disable these because of boost notes about abi changes from C++10 -> C++17 make the build too noisey
message('disabling psabi warnings for gcc')
add_project_arguments('-Wno-psabi', language: 'cpp')
if (meson.get_compiler('cpp').version().version_compare('<14.0'))
error('g++ version must be at least 14.0, found ' + meson.get_compiler('cpp').version())
endif
endif
build_fortran = get_option('build-fortran')
if (build_fortran)
add_languages('fortran', native: true)
message('Building fortran module (gridfire_mod.mod)')
fc = meson.get_compiler('fortran')
if not get_option('unsafe-fortran')
if fc.get_id() != 'gcc'
error('The only supported fortran compiler for GridFire is gfortran (version >= 14.0), found ' + fc + '. GridFire has not been tested with any other compilers. You can disable this check with the -Dunsafe-fortran=true flag to try other compilers')
endif
endif
if (meson.get_compiler('fortran').version().version_compare('<14.0'))
error('gfortran version must be at least 14.0, found ' + meson.get_compiler('fortran').version())
endif
endif
# For Eigen
add_project_arguments('-Wno-deprecated-declarations', language: 'cpp')
llevel = get_option('log-level')
logbase='QUILL_COMPILE_ACTIVE_LOG_LEVEL_'
if (llevel == 'traceL3')
message('Setting log level to TRACE_L3')
log_argument = logbase + 'TRACE_L3'
elif (llevel == 'traceL2')
message('Setting log level to TRACE_L2')
log_argument = logbase + 'TRACE_L2'
elif (llevel == 'traceL1')
message('Setting log level to TRACE_L1')
log_argument = logbase + 'TRACE_L1'
elif (llevel == 'debug')
message('Setting log level to DEBUG')
log_argument = logbase + 'DEBUG'
elif (llevel == 'info')
message('Setting log level to INFO')
log_argument = logbase + 'INFO'
elif (llevel == 'warning')
message('Setting log level to WARNING')
log_argument = logbase + 'WARNING'
elif (llevel == 'error')
message('Setting log level to ERROR')
log_argument = logbase + 'ERROR'
elif (llevel == 'critical')
message('Setting log level to CRITICAL')
log_argument = logbase + 'CRITICAL'
endif
log_argument = '-DQUILL_COMPILE_ACTIVE_LOG_LEVEL=' + log_argument
add_project_arguments(log_argument, language: 'cpp')
cpp = meson.get_compiler('cpp')
# Configure the logging level
subdir('build-extra/log-level')
# Then build the external dependencies
subdir('build-config')
# Build the main source code
subdir('src')
if get_option('build-python')
message('Configuring Python bindings...')
subdir('build-python')
else
message('Skipping Python bindings...')
endif
# Build the Python bindings
subdir('build-python')
if get_option('build-tests')
message('Setting up tests for GridFire...')
subdir('tests')
else
message('Skipping tests for GridFire...')
endif
if get_option('pkg-config')
message('Generating pkg-config file for GridFire...')
pkg = import('pkgconfig')
pkg.generate(
name: 'gridfire',
description: 'GridFire nuclear reaction network solver',
version: meson.project_version(),
libraries: [
libgridfire,
libcomposition,
libconfig,
libconst,
liblogging
],
subdirs: ['gridfire'],
filebase: 'gridfire',
install_dir: join_paths(get_option('libdir'), 'pkgconfig')
)
endif
# Buil the test suite
subdir('tests')
subdir('tools')
# Build the pkg-config file
subdir('build-extra/pkg-config')

View File

@@ -1,6 +1,12 @@
option('log-level', type: 'combo', choices: ['traceL3', 'traceL2', 'traceL1', 'debug', 'info', 'warning', 'error', 'critial'], value: 'info', description: 'Set the log level for the GridFire library')
option('pkg-config', type: 'boolean', value: true, description: 'generate pkg-config file for GridFire (gridfire.pc)')
option('build-python', type: 'boolean', value: false, description: 'build the python bindings so you can use GridFire from python')
option('build-tests', type: 'boolean', value: true, description: 'build the test suite')
option('build-fortran', type: 'boolean', value: false, description: 'build fortran module support')
option('unsafe-fortran', type: 'boolean', value: false, description: 'Allow untested fortran compilers (compilers other than gfortran)')
option('log_level', type: 'combo', choices: ['traceL3', 'traceL2', 'traceL1', 'debug', 'info', 'warning', 'error', 'critial'], value: 'info', description: 'Set the log level for the GridFire library')
option('pkg_config', type: 'boolean', value: true, description: 'generate pkg-config file for GridFire (gridfire.pc)')
option('build_python', type: 'boolean', value: false, description: 'build the python bindings so you can use GridFire from python')
option('build_tests', type: 'boolean', value: true, description: 'build the test suite')
option('build_examples', type: 'boolean', value: true, description: 'build example code')
option('build_fortran', type: 'boolean', value: false, description: 'build fortran module support')
option('unsafe_fortran', type: 'boolean', value: false, description: 'Allow untested fortran compilers (compilers other than gfortran)')
option('plugin_support', type: 'boolean', value: false, description: 'Enable support for libplugin plugins')
option('python_target_version', type: 'string', value: '3.13', description: 'Target version for python compilation, only used for cross compilation')
option('build_c_api', type: 'boolean', value: true, description: 'compile the C API')
option('build_tools', type: 'boolean', value: true, description: 'build the GridFire command line tools')
option('openmp_support', type: 'boolean', value: false, description: 'Enable OpenMP support for parallelization')

View File

@@ -8,7 +8,7 @@ build-backend = "mesonpy"
[project]
name = "gridfire" # Choose your Python package name
version = "0.7.3_rc2" # Your project's version
version = "0.7.4_rc2" # Your project's version
description = "Python interface to the GridFire nuclear network code"
readme = "README.md"
license = { file = "LICENSE.txt" } # Reference your license file [cite: 2]

View File

@@ -23,7 +23,7 @@ gridfire_extern_dep = declare_dependency(
install_subdir('include/gridfire', install_dir: get_option('includedir'))
if get_option('build-fortran')
if get_option('build_fortran')
message('Configuring Fortran bindings...')
subdir('fortran')
endif

View File

@@ -0,0 +1,35 @@
#pragma once
#include "fourdst/config/config.h"
namespace gridfire::config {
struct CVODESolverConfig {
double absTol = 1.0e-8;
double relTol = 1.0e-5;
};
struct SolverConfig {
CVODESolverConfig cvode;
};
struct AdaptiveEngineViewConfig {
double relativeCullingThreshold = 1.0e-75;
};
struct EngineViewConfig {
AdaptiveEngineViewConfig adaptiveEngineView;
};
struct EngineConfig {
EngineViewConfig views;
};
struct GridFireConfig {
SolverConfig solver;
EngineConfig engine;
};
}

View File

@@ -53,7 +53,7 @@ namespace gridfire::engine {
struct StepDerivatives {
std::map<fourdst::atomic::Species, T> dydt{}; ///< Derivatives of abundances (dY/dt for each species).
T nuclearEnergyGenerationRate = T(0.0); ///< Specific energy generation rate (e.g., erg/g/s).
std::map<fourdst::atomic::Species, std::unordered_map<std::string, T>> reactionContributions{};
std::optional<std::map<fourdst::atomic::Species, std::unordered_map<std::string, T>>> reactionContributions = std::nullopt;
T neutrinoEnergyLossRate = T(0.0); // (erg/g/s)
T totalNeutrinoFlux = T(0.0); // (neutrinos/g/s)

View File

@@ -12,6 +12,7 @@
#include "gridfire/screening/screening_types.h"
#include "gridfire/partition/partition_abstract.h"
#include "gridfire/engine/procedures/construction.h"
#include "gridfire/config/config.h"
#include <string>
#include <unordered_map>
@@ -96,7 +97,7 @@ namespace gridfire::engine {
*
* @see engine_abstract.h
*/
class GraphEngine final : public DynamicEngine{
class GraphEngine final : public DynamicEngine {
public:
/**
* @brief Constructs a GraphEngine from a composition.
@@ -753,6 +754,14 @@ namespace gridfire::engine {
[[nodiscard]]
SpeciesStatus getSpeciesStatus(const fourdst::atomic::Species &species) const override;
[[nodiscard]] bool get_store_intermediate_reaction_contributions() const {
return m_store_intermediate_reaction_contributions;
}
void set_store_intermediate_reaction_contributions(const bool value) {
m_store_intermediate_reaction_contributions = value;
}
private:
struct PrecomputedReaction {
@@ -846,8 +855,14 @@ namespace gridfire::engine {
const reaction::Reaction& m_reaction;
const GraphEngine& m_engine;
};
struct PrecomputationKernelResults {
std::vector<double> dydt_vector;
double total_neutrino_energy_loss_rate{0.0};
double total_neutrino_flux{0.0};
};
private:
Config& m_config = Config::getInstance();
Config<config::GridFireConfig> m_config;
quill::Logger* m_logger = LogManager::getInstance().getLogger("log");
constants m_constants;
@@ -866,6 +881,7 @@ namespace gridfire::engine {
mutable CppAD::ADFun<double> m_rhsADFun; ///< CppAD function for the right-hand side of the ODE.
mutable CppAD::ADFun<double> m_epsADFun; ///< CppAD function for the energy generation rate.
mutable CppAD::sparse_jac_work m_jac_work; ///< Work object for sparse Jacobian calculations.
mutable std::vector<double> m_local_abundance_cache;
bool m_has_been_primed = false; ///< Flag indicating if the engine has been primed.
@@ -879,6 +895,7 @@ namespace gridfire::engine {
bool m_usePrecomputation = true; ///< Flag to enable or disable using precomputed reactions for efficiency. Mathematically, this should not change the results. Generally end users should not need to change this.
bool m_useReverseReactions = true; ///< Flag to enable or disable reverse reactions. If false, only forward reactions are considered.
bool m_store_intermediate_reaction_contributions = false; ///< Flag to enable or disable storing intermediate reaction contributions for debugging.
BuildDepthType m_depth;
@@ -948,6 +965,42 @@ namespace gridfire::engine {
*/
[[nodiscard]] bool validateConservation() const;
double compute_reaction_flow(
const std::vector<double> &local_abundances,
const std::vector<double> &screening_factors,
const std::vector<double> &bare_rates,
const std::vector<double> &bare_reverse_rates,
double rho,
size_t reactionCounter,
const reaction::Reaction &reaction,
size_t reactionIndex,
const PrecomputedReaction &precomputedReaction
) const;
std::pair<double, double> compute_neutrino_fluxes(
double netFlow,
const reaction::Reaction &reaction) const;
PrecomputationKernelResults accumulate_flows_serial(
const std::vector<double>& local_abundances,
const std::vector<double>& screening_factors,
const std::vector<double>& bare_rates,
const std::vector<double>& bare_reverse_rates,
double rho,
const reaction::ReactionSet& activeReactions
) const;
#ifdef GRIDFIRE_USE_OPENMP
PrecomputationKernelResults accumulate_flows_parallel(
const std::vector<double>& local_abundances,
const std::vector<double>& screening_factors,
const std::vector<double>& bare_rates,
const std::vector<double>& bare_reverse_rates,
double rho,
const reaction::ReactionSet& activeReactions
) const;
#endif
[[nodiscard]] StepDerivatives<double> calculateAllDerivativesUsingPrecomputation(
const fourdst::composition::CompositionAbstract &comp,
@@ -1207,7 +1260,10 @@ namespace gridfire::engine {
const T nu_ij = static_cast<T>(reaction.stoichiometry(species));
const T dydt_increment = threshold_flag * molarReactionFlow * nu_ij;
dydt_vec[speciesIdx] += dydt_increment;
result.reactionContributions[species][std::string(reaction.id())] = dydt_increment;
if (m_store_intermediate_reaction_contributions) {
result.reactionContributions.value()[species][std::string(reaction.id())] = dydt_increment;
}
}
}

View File

@@ -4,6 +4,7 @@
#include "gridfire/screening/screening_abstract.h"
#include "gridfire/screening/screening_types.h"
#include "gridfire/types/types.h"
#include "gridfire/config/config.h"
#include "fourdst/atomic/atomicSpecies.h"
#include "fourdst/config/config.h"
@@ -386,10 +387,10 @@ namespace gridfire::engine {
*/
[[nodiscard]] SpeciesStatus getSpeciesStatus(const fourdst::atomic::Species &species) const override;
private:
using Config = fourdst::config::Config;
using LogManager = fourdst::logging::LogManager;
/** @brief A reference to the singleton Config instance, used for retrieving configuration parameters. */
Config& m_config = Config::getInstance();
fourdst::config::Config<config::GridFireConfig> m_config;
/** @brief A pointer to the logger instance, used for logging messages. */
quill::Logger* m_logger = LogManager::getInstance().getLogger("log");

View File

@@ -6,6 +6,8 @@
#include "gridfire/io/network_file.h"
#include "gridfire/types/types.h"
#include "gridfire/config/config.h"
#include "fourdst/config/config.h"
#include "fourdst/logging/logging.h"
@@ -365,9 +367,9 @@ namespace gridfire::engine {
[[nodiscard]] std::string getNetworkFile() const { return m_fileName; }
[[nodiscard]] const io::NetworkFileParser& getParser() const { return m_parser; }
private:
using Config = fourdst::config::Config;
using LogManager = fourdst::logging::LogManager;
Config& m_config = Config::getInstance();
using LogManager = LogManager;
Config<config::GridFireConfig> m_config;
quill::Logger* m_logger = LogManager::getInstance().getLogger("log");
std::string m_fileName;
///< Parser for the network file.

View File

@@ -2,6 +2,7 @@
#include "fourdst/config/config.h"
#include "fourdst/logging/logging.h"
#include "gridfire/config/config.h"
#include "quill/Logger.h"
@@ -101,9 +102,8 @@ namespace gridfire::io {
*/
[[nodiscard]] ParsedNetworkData parse(const std::string& filename) const override;
private:
using Config = fourdst::config::Config;
using LogManager = fourdst::logging::LogManager;
Config& m_config = Config::getInstance();
fourdst::config::Config<config::GridFireConfig> m_config;
quill::Logger* m_logger = LogManager::getInstance().getLogger("log");
};
@@ -141,9 +141,8 @@ namespace gridfire::io {
*/
[[nodiscard]] ParsedNetworkData parse(const std::string& filename) const override;
private:
using Config = fourdst::config::Config;
using LogManager = fourdst::logging::LogManager;
Config& m_config = Config::getInstance();
fourdst::config::Config<config::GridFireConfig> m_config;
quill::Logger* m_logger = LogManager::getInstance().getLogger("log");
std::string m_filename;

View File

@@ -809,6 +809,8 @@ namespace gridfire::reaction {
std::vector<RateCoefficientSet> m_rates; ///< List of rate coefficient sets from each source.
bool m_weak = false;
mutable std::unordered_map<double, double> m_cached_rates;
private:
/**
* @brief Template implementation for calculating the total reaction rate.
@@ -876,6 +878,8 @@ namespace gridfire::reaction {
[[nodiscard]] std::optional<std::unique_ptr<Reaction>> get(const std::string_view& id) const;
[[nodiscard]] std::unique_ptr<Reaction> get(size_t index) const;
/**
* @brief Removes a reaction from the set.
* @param reaction The Reaction to remove.

View File

@@ -4,6 +4,7 @@
#include "gridfire/engine/engine_abstract.h"
#include "gridfire/types/types.h"
#include "gridfire/exceptions/exceptions.h"
#include "gridfire/config/config.h"
#include "fourdst/atomic/atomicSpecies.h"
#include "fourdst/config/config.h"
@@ -237,13 +238,13 @@ namespace gridfire::solver {
};
struct CVODERHSOutputData {
std::map<fourdst::atomic::Species, std::unordered_map<std::string, double>> reaction_contribution_map;
std::optional<std::map<fourdst::atomic::Species, std::unordered_map<std::string, double>>> reaction_contribution_map;
double neutrino_energy_loss_rate;
double total_neutrino_flux;
};
private:
fourdst::config::Config& m_config = fourdst::config::Config::getInstance();
fourdst::config::Config<config::GridFireConfig> m_config;
quill::Logger* m_logger = fourdst::logging::LogManager::getInstance().getLogger("log");
/**
* @brief CVODE RHS C-wrapper that delegates to calculate_rhs and captures exceptions.

View File

@@ -28,6 +28,10 @@
#include "cppad/utility/sparse_rc.hpp"
#include "cppad/utility/sparse_rcv.hpp"
#ifdef GRIDFIRE_USE_OPENMP
#include <omp.h>
#endif
namespace {
enum class REACLIB_WEAK_TYPES {
@@ -403,6 +407,167 @@ namespace gridfire::engine {
return true; // All reactions passed the conservation check
}
double GraphEngine::compute_reaction_flow(
const std::vector<double> &local_abundances,
const std::vector<double> &screening_factors,
const std::vector<double> &bare_rates,
const std::vector<double> &bare_reverse_rates,
const double rho,
const size_t reactionCounter,
const reaction::Reaction &reaction,
const size_t reactionIndex,
const PrecomputedReaction &precomputedReaction
) const {
double forwardAbundanceProduct = 1.0;
for (size_t i = 0; i < precomputedReaction.unique_reactant_indices.size(); ++i) {
const size_t reactantIndex = precomputedReaction.unique_reactant_indices[i];
const int power = precomputedReaction.reactant_powers[i];
const double abundance = local_abundances[reactantIndex];
double factor;
if (power == 1) { factor = abundance; }
else if (power == 2) { factor = abundance * abundance; }
else { factor = std::pow(abundance, power); }
if (!std::isfinite(factor)) {
LOG_CRITICAL(m_logger, "Non-finite factor encountered in forward abundance product for reaction '{}'. Check input abundances for validity.", reaction.id());
throw exceptions::BadRHSEngineError("Non-finite factor encountered in forward abundance product.");
}
forwardAbundanceProduct *= factor;
}
const double bare_rate = bare_rates.at(reactionCounter);
const double screeningFactor = screening_factors[reactionCounter];
const size_t numReactants = m_reactions[reactionIndex].reactants().size();
const size_t numProducts = m_reactions[reactionIndex].products().size();
const double forwardMolarReactionFlow = screeningFactor *
bare_rate *
precomputedReaction.symmetry_factor *
forwardAbundanceProduct *
std::pow(rho, numReactants > 1 ? static_cast<double>(numReactants) - 1 : 0.0);
if (!std::isfinite(forwardMolarReactionFlow)) {
LOG_CRITICAL(m_logger, "Non-finite forward molar reaction flow computed for reaction '{}'. Check input abundances and rates for validity.", reaction.id());
throw exceptions::BadRHSEngineError("Non-finite forward molar reaction flow computed.");
}
double reverseMolarReactionFlow = 0.0;
if (precomputedReaction.reverse_symmetry_factor != 0.0 and m_useReverseReactions) {
const double bare_reverse_rate = bare_reverse_rates.at(reactionCounter);
double reverseAbundanceProduct = 1.0;
for (size_t i = 0; i < precomputedReaction.unique_product_indices.size(); ++i) {
const size_t productIndex = precomputedReaction.unique_product_indices[i];
reverseAbundanceProduct *= std::pow(local_abundances[productIndex], precomputedReaction.product_powers[i]);
}
reverseMolarReactionFlow = screeningFactor *
bare_reverse_rate *
precomputedReaction.reverse_symmetry_factor *
reverseAbundanceProduct *
std::pow(rho, numProducts > 1 ? static_cast<double>(numProducts) - 1 : 0.0);
}
return forwardMolarReactionFlow - reverseMolarReactionFlow;
}
std::pair<double, double> GraphEngine::compute_neutrino_fluxes(
const double netFlow,
const reaction::Reaction &reaction
) const {
if (reaction.type() == reaction::ReactionType::REACLIB_WEAK) {
const double q_abs = std::abs(reaction.qValue());
const REACLIB_WEAK_TYPES weakType = get_weak_reaclib_reaction_type(reaction);
double neutrino_loss_fraction = 0.0;
switch (weakType) {
case REACLIB_WEAK_TYPES::BETA_PLUS_DECAY:
[[fallthrough]];
case REACLIB_WEAK_TYPES::BETA_MINUS_DECAY:
neutrino_loss_fraction = 0.5; // Approximate 50% energy loss to neutrinos for beta decays
break;
case REACLIB_WEAK_TYPES::ELECTRON_CAPTURE:
[[fallthrough]];
case REACLIB_WEAK_TYPES::POSITRON_CAPTURE:
neutrino_loss_fraction = 1.0;
break;
default: ;
}
const double local_neutrino_loss = netFlow * q_abs * neutrino_loss_fraction * m_constants.Na * m_constants.MeV_to_erg;
const double local_neutrino_flux = netFlow * m_constants.Na;
return {local_neutrino_loss, local_neutrino_flux};
}
return {0.0, 0.0};
}
GraphEngine::PrecomputationKernelResults GraphEngine::accumulate_flows_serial(
const std::vector<double> &local_abundances,
const std::vector<double> &screening_factors,
const std::vector<double> &bare_rates,
const std::vector<double> &bare_reverse_rates,
const double rho,
const reaction::ReactionSet &activeReactions
) const {
PrecomputationKernelResults results;
results.dydt_vector.resize(m_networkSpecies.size(), 0.0);
std::vector<double> molarReactionFlows;
molarReactionFlows.reserve(m_precomputedReactions.size());
size_t reactionCounter = 0;
for (const auto& reaction : activeReactions) {
uint64_t reactionHash = utils::hash_reaction(*reaction);
const size_t reactionIndex = m_precomputedReactionIndexMap.at(reactionHash);
const PrecomputedReaction& precomputedReaction = m_precomputedReactions[reactionIndex];
double netFlow = compute_reaction_flow(
local_abundances,
screening_factors,
bare_rates,
bare_reverse_rates,
rho,
reactionCounter,
*reaction,
reactionIndex,
precomputedReaction);
molarReactionFlows.push_back(netFlow);
auto [local_neutrino_loss, local_neutrino_flux] = compute_neutrino_fluxes(netFlow, *reaction);
results.total_neutrino_energy_loss_rate += local_neutrino_loss;
results.total_neutrino_flux += local_neutrino_flux;
reactionCounter++;
}
LOG_TRACE_L3(m_logger, "Computed {} molar reaction flows for active reactions. Assembling these into RHS", molarReactionFlows.size());
reactionCounter = 0;
for (const auto& reaction: activeReactions) {
const size_t j = m_precomputedReactionIndexMap.at(utils::hash_reaction(*reaction));
const auto& precomp = m_precomputedReactions[j];
const double R_j = molarReactionFlows[reactionCounter];
for (size_t i = 0; i < precomp.affected_species_indices.size(); ++i) {
const size_t speciesIndex = precomp.affected_species_indices[i];
const int stoichiometricCoefficient = precomp.stoichiometric_coefficients[i];
const double dydt_increment = static_cast<double>(stoichiometricCoefficient) * R_j;
results.dydt_vector[speciesIndex] += dydt_increment;
}
reactionCounter++;
}
return results;
}
double GraphEngine::calculateReverseRate(
const reaction::Reaction &reaction,
const double T9,
@@ -655,6 +820,7 @@ namespace gridfire::engine {
}
StepDerivatives<double> GraphEngine::calculateAllDerivativesUsingPrecomputation(
const fourdst::composition::CompositionAbstract &comp,
const std::vector<double> &bare_rates,
@@ -672,132 +838,43 @@ namespace gridfire::engine {
T9,
rho
);
m_local_abundance_cache.clear();
for (const auto& species: m_networkSpecies) {
m_local_abundance_cache.push_back(comp.contains(species) ? comp.getMolarAbundance(species) : 0.0);
}
StepDerivatives<double> result;
std::vector<double> dydt_scratch(m_networkSpecies.size(), 0.0);
// --- Optimized loop ---
std::vector<double> molarReactionFlows;
molarReactionFlows.reserve(m_precomputedReactions.size());
#ifndef GRIDFIRE_USE_OPENMP
const auto [dydt_vector, total_neutrino_energy_loss_rate, total_neutrino_flux] = accumulate_flows_serial(
m_local_abundance_cache,
screeningFactors,
bare_rates,
bare_reverse_rates,
rho,
activeReactions
);
dydt_scratch = dydt_vector;
result.neutrinoEnergyLossRate = total_neutrino_energy_loss_rate;
result.totalNeutrinoFlux = total_neutrino_flux;
#else
const auto [dydt_vector, total_neutrino_energy_loss_rate, total_neutrino_flux] = accumulate_flows_parallel(
m_local_abundance_cache,
screeningFactors,
bare_rates,
bare_reverse_rates,
rho,
activeReactions
);
dydt_scratch = dydt_vector;
result.neutrinoEnergyLossRate = total_neutrino_energy_loss_rate;
result.totalNeutrinoFlux = total_neutrino_flux;
#endif
size_t reactionCounter = 0;
for (const auto& reaction : activeReactions) {
// --- Efficient lookup of only the active reactions ---
uint64_t reactionHash = utils::hash_reaction(*reaction);
const size_t reactionIndex = m_precomputedReactionIndexMap.at(reactionHash);
PrecomputedReaction precomputedReaction = m_precomputedReactions[reactionIndex];
// --- Forward abundance product ---
double forwardAbundanceProduct = 1.0;
for (size_t i = 0; i < precomputedReaction.unique_reactant_indices.size(); ++i) {
const size_t reactantIndex = precomputedReaction.unique_reactant_indices[i];
const fourdst::atomic::Species& reactant = m_networkSpecies[reactantIndex];
const int power = precomputedReaction.reactant_powers[i];
if (!comp.contains(reactant)) {
forwardAbundanceProduct = 0.0;
break; // No need to continue if one of the reactants has zero abundance
}
double factor = std::pow(comp.getMolarAbundance(reactant), power);
if (!std::isfinite(factor)) {
LOG_CRITICAL(m_logger, "Non-finite factor encountered in forward abundance product for reaction '{}'. Check input abundances for validity.", reaction->id());
throw exceptions::BadRHSEngineError("Non-finite factor encountered in forward abundance product.");
}
forwardAbundanceProduct *= std::pow(comp.getMolarAbundance(reactant), power);
}
const double bare_rate = bare_rates.at(reactionCounter);
const double screeningFactor = screeningFactors[reactionCounter];
const size_t numReactants = m_reactions[reactionIndex].reactants().size();
const size_t numProducts = m_reactions[reactionIndex].products().size();
// --- Forward reaction flow ---
const double forwardMolarReactionFlow =
screeningFactor *
bare_rate *
precomputedReaction.symmetry_factor *
forwardAbundanceProduct *
std::pow(rho, numReactants > 1 ? static_cast<double>(numReactants) - 1 : 0.0);
if (!std::isfinite(forwardMolarReactionFlow)) {
LOG_CRITICAL(m_logger, "Non-finite forward molar reaction flow computed for reaction '{}'. Check input abundances and rates for validity.", reaction->id());
throw exceptions::BadRHSEngineError("Non-finite forward molar reaction flow computed.");
}
// --- Reverse reaction flow ---
// Only do this is the reaction has a non-zero reverse symmetry factor (i.e. is reversible)
double reverseMolarReactionFlow = 0.0;
if (precomputedReaction.reverse_symmetry_factor != 0.0 and m_useReverseReactions) {
const double bare_reverse_rate = bare_reverse_rates.at(reactionCounter);
double reverseAbundanceProduct = 1.0;
for (size_t i = 0; i < precomputedReaction.unique_product_indices.size(); ++i) {
const size_t productIndex = precomputedReaction.unique_product_indices[i];
const fourdst::atomic::Species& product = m_networkSpecies[productIndex];
reverseAbundanceProduct *= std::pow(comp.getMolarAbundance(product), precomputedReaction.product_powers[i]);
}
reverseMolarReactionFlow = screeningFactor *
bare_reverse_rate *
precomputedReaction.reverse_symmetry_factor *
reverseAbundanceProduct *
std::pow(rho, numProducts > 1 ? static_cast<double>(numProducts) - 1 : 0.0);
}
molarReactionFlows.push_back(forwardMolarReactionFlow - reverseMolarReactionFlow);
if (reaction->type() == reaction::ReactionType::REACLIB_WEAK) {
double q_abs = std::abs(reaction->qValue());
REACLIB_WEAK_TYPES weakType = get_weak_reaclib_reaction_type(*reaction);
double neutrino_loss_fraction = 0.0;
switch (weakType) {
case REACLIB_WEAK_TYPES::BETA_PLUS_DECAY:
[[fallthrough]];
case REACLIB_WEAK_TYPES::BETA_MINUS_DECAY:
neutrino_loss_fraction = 0.5; // Approximate 50% energy loss to neutrinos for beta decays
break;
case REACLIB_WEAK_TYPES::ELECTRON_CAPTURE:
[[fallthrough]];
case REACLIB_WEAK_TYPES::POSITRON_CAPTURE:
neutrino_loss_fraction = 1.0;
break;
default: ;
}
double local_neutrino_loss = molarReactionFlows.back() * q_abs * neutrino_loss_fraction * m_constants.Na * m_constants.MeV_to_erg;
double local_neutrino_flux = molarReactionFlows.back() * m_constants.Na;
result.totalNeutrinoFlux += local_neutrino_flux;
result.neutrinoEnergyLossRate += local_neutrino_loss;
}
reactionCounter++;
}
LOG_TRACE_L3(m_logger, "Computed {} molar reaction flows for active reactions. Assembling these into RHS", molarReactionFlows.size());
// --- Assemble molar abundance derivatives ---
for (const auto& species: m_networkSpecies) {
result.dydt[species] = 0.0; // Initialize the change in abundance for each network species to 0
}
reactionCounter = 0;
for (const auto& reaction: activeReactions) {
size_t j = m_precomputedReactionIndexMap.at(utils::hash_reaction(*reaction));
const auto& precomp = m_precomputedReactions[j];
const double R_j = molarReactionFlows[reactionCounter];
for (size_t i = 0; i < precomp.affected_species_indices.size(); ++i) {
const size_t speciesIndex = precomp.affected_species_indices[i];
const fourdst::atomic::Species& species = m_networkSpecies[speciesIndex];
const int stoichiometricCoefficient = precomp.stoichiometric_coefficients[i];
// Update the derivative for this species
double dydt_increment = static_cast<double>(stoichiometricCoefficient) * R_j;
result.dydt.at(species) += dydt_increment;
result.reactionContributions[species][std::string(reaction->id())] = dydt_increment;
}
reactionCounter++;
// load scratch into result.dydt
for (size_t i = 0; i < m_networkSpecies.size(); ++i) {
result.dydt[m_networkSpecies[i]] = dydt_scratch[i];
}
// --- Calculate the nuclear energy generation rate ---
@@ -1502,4 +1579,69 @@ namespace gridfire::engine {
return true;
}
#ifdef GRIDFIRE_USE_OPENMP
GraphEngine::PrecomputationKernelResults GraphEngine::accumulate_flows_parallel(
const std::vector<double> &local_abundances,
const std::vector<double> &screening_factors,
const std::vector<double> &bare_rates,
const std::vector<double> &bare_reverse_rates,
const double rho,
const reaction::ReactionSet &activeReactions
) const {
int n_threads = omp_get_max_threads();
std::vector<std::vector<double>> thread_local_dydt(n_threads, std::vector<double>(m_networkSpecies.size(), 0.0));
double total_neutrino_energy_loss_rate = 0.0;
double total_neutrino_flux = 0.0;
#pragma omp parallel for schedule(static) reduction(+:total_neutrino_energy_loss_rate, total_neutrino_flux)
for (size_t k = 0; k < activeReactions.size(); ++k) {
int t_id = omp_get_thread_num();
const auto& reaction = activeReactions[k];
const size_t reactionIndex = m_precomputedReactionIndexMap.at(utils::hash_reaction(reaction));
const PrecomputedReaction& precomputedReaction = m_precomputedReactions[reactionIndex];
double netFlow = compute_reaction_flow(
local_abundances,
screening_factors,
bare_rates,
bare_reverse_rates,
rho,
reactionIndex,
reaction,
reactionIndex,
precomputedReaction
);
auto [neutrinoEnergyLossRate, neutrinoFlux] = compute_neutrino_fluxes(
netFlow,
reaction
);
total_neutrino_energy_loss_rate += neutrinoEnergyLossRate;
total_neutrino_flux += neutrinoFlux;
for (size_t i = 0; i < precomputedReaction.affected_species_indices.size(); ++i) {
thread_local_dydt[t_id][precomputedReaction.affected_species_indices[i]] +=
netFlow * precomputedReaction.stoichiometric_coefficients[i];
}
}
PrecomputationKernelResults results;
results.total_neutrino_energy_loss_rate = total_neutrino_energy_loss_rate;
results.total_neutrino_flux = total_neutrino_flux;
results.dydt_vector.resize(m_networkSpecies.size(), 0.0);
#pragma omp parallel for schedule(static)
for (size_t i = 0; i < m_networkSpecies.size(); ++i) {
double sum = 0.0;
for (int t = 0; t < n_threads; ++t) sum += thread_local_dydt[t][i];
results.dydt_vector[i] = sum;
}
return results;
}
#endif
}

View File

@@ -394,7 +394,9 @@ namespace gridfire::engine {
const double maxFlow
) const {
LOG_TRACE_L1(m_logger, "Culling reactions based on flow rates...");
const auto relative_culling_threshold = m_config.get<double>("gridfire:AdaptiveEngineView:RelativeCullingThreshold", 1e-75);
const auto relative_culling_threshold = m_config->engine.views.adaptiveEngineView.relativeCullingThreshold;
double absoluteCullingThreshold = relative_culling_threshold * maxFlow;
LOG_DEBUG(m_logger, "Relative culling threshold: {:7.3E} ({:7.3E})", relative_culling_threshold, absoluteCullingThreshold);
std::vector<const reaction::Reaction*> culledReactions;

View File

@@ -278,7 +278,12 @@ namespace gridfire::reaction {
double Ye,
double mue, const std::vector<double> &Y, const std::unordered_map<size_t, Species>& index_to_species_map
) const {
return calculate_rate<double>(T9);
if (m_cached_rates.contains(T9)) {
return m_cached_rates.at(T9);
}
const double rate = calculate_rate<double>(T9);
m_cached_rates[T9] = rate;
return rate;
}
double LogicalReaclibReaction::calculate_log_rate_partial_deriv_wrt_T9(
@@ -455,6 +460,10 @@ namespace gridfire::reaction {
return std::make_optional(m_reactions[m_reactionNameMap.at(std::string(id))]->clone());
}
std::unique_ptr<Reaction> ReactionSet::get(size_t index) const {
return m_reactions.at(index)->clone();
}
void ReactionSet::remove_reaction(const Reaction& reaction) {
const size_t rh = reaction.hash(0);
if (!m_reactionHashes.contains(rh)) {

View File

@@ -112,8 +112,8 @@ namespace gridfire::solver {
// 2. If the user has set tolerances in code, those override the config
// 3. If the user has not set tolerances in code and the config does not have them, use hardcoded defaults
auto absTol = m_config.get<double>("gridfire:solver:CVODESolverStrategy:absTol", 1.0e-8);
auto relTol = m_config.get<double>("gridfire:solver:CVODESolverStrategy:relTol", 1.0e-5);
auto absTol = m_config->solver.cvode.absTol;
auto relTol = m_config->solver.cvode.relTol;
if (m_absTol) {
absTol = *m_absTol;
@@ -935,8 +935,8 @@ namespace gridfire::solver {
sunrealtype *y_data = N_VGetArrayPointer(m_Y);
sunrealtype *y_err_data = N_VGetArrayPointer(m_YErr);
const auto absTol = m_config.get<double>("gridfire:solver:CVODESolverStrategy:absTol", 1.0e-8);
const auto relTol = m_config.get<double>("gridfire:solver:CVODESolverStrategy:relTol", 1.0e-8);
const auto absTol = m_config->solver.cvode.absTol;
const auto relTol = m_config->solver.cvode.relTol;
std::vector<double> err_ratios;
const size_t num_components = N_VGetLength(m_Y);

View File

@@ -38,16 +38,25 @@ gridfire_build_dependencies = [
log_dep,
xxhash_dep,
eigen_dep,
plugin_dep,
sundials_dep,
json_dep,
]
if get_option('plugin_support')
gridfire_build_dependencies += [plugin_dep]
endif
if get_option('openmp_support')
openmp_dep = dependency('openmp', required: true)
gridfire_build_dependencies += [openmp_dep]
endif
# Define the libnetwork library so it can be linked against by other parts of the build system
libgridfire = library('gridfire',
gridfire_sources,
include_directories: include_directories('include'),
dependencies: gridfire_build_dependencies,
objects: [cvode_objs, kinsol_objs],
install : true)
gridfire_dep = declare_dependency(
@@ -59,12 +68,11 @@ gridfire_dep = declare_dependency(
install_subdir('include/gridfire', install_dir: get_option('includedir'))
message('Configuring C API...')
subdir('extern')
if get_option('build-python')
message('Configuring Python bindings...')
subdir('python')
else
message('Skipping Python bindings...')
if not get_option('build_c_api') and get_option('build_fortran')
error('Cannot build fortran without C API. Set -Dbuild-c-api=true and -Dbuild-fortran=true')
endif
if get_option('build_c_api')
message('Configuring C API...')
subdir('extern')
endif

View File

@@ -16,5 +16,5 @@ sys.modules['gridfire.io'] = io
__all__ = ['type', 'utils', 'engine', 'solver', 'exceptions', 'partition', 'reaction', 'screening', 'io', 'policy']
__version__ = "v0.7.3_rc2"
__version__ = "v0.7.4_rc2"

View File

@@ -1,4 +1,4 @@
[wrap-git]
url = https://github.com/4D-STAR/fourdst
revision = v0.9.10
revision = v0.9.14
depth = 1

View File

@@ -1,2 +1,7 @@
subdir('C')
subdir('fortran')
if get_option('build_c_api')
subdir('C')
endif
if get_option('build_fortran')
subdir('fortran')
endif

View File

@@ -4,7 +4,6 @@
#include "gridfire/gridfire.h"
#include "fourdst/composition/composition.h"
#include "fourdst/plugin/bundle/bundle.h"
#include "fourdst/logging/logging.h"
#include "fourdst/atomic/species.h"
#include "fourdst/composition/utils.h"

View File

@@ -1,5 +1,5 @@
executable(
'graphnet_sandbox',
'main.cpp',
dependencies: [gridfire_dep, composition_dep, plugin_dep, cli11_dep],
dependencies: [gridfire_dep, cli11_dep],
)

View File

@@ -1,7 +1,7 @@
# Google Test dependency
gtest_dep = dependency('gtest', main: true, required : true)
gtest_main = dependency('gtest_main', required: true)
gtest_nomain_dep = dependency('gtest', main: false, required : true)
#gtest_dep = dependency('gtest', main: true, required : true)
#gtest_main = dependency('gtest_main', required: true)
#gtest_nomain_dep = dependency('gtest', main: false, required : true)
# Subdirectories for unit and integration tests
subdir('graphnet_sandbox')

View File

@@ -0,0 +1,48 @@
#include "fourdst/config/config.h"
#include "gridfire/config/config.h"
#include <source_location>
#include <filesystem>
#include "CLI/CLI.hpp"
consteval std::string_view strip_namespaces(const std::string_view fullName) {
const size_t pos = fullName.rfind("::");
if (pos == std::string_view::npos) {
return fullName;
}
return fullName.substr(pos + 2);
}
template <typename T>
consteval std::string_view get_type_name() {
constexpr std::string_view name = std::source_location::current().function_name();
const auto pos = name.find("T = ");
if (pos == std::string_view::npos) return name;
const auto start = pos + 4;
const auto end = name.rfind(']');
return name.substr(start, end - start);
}
int main(int argc, char** argv) {
CLI::App app{"GridFire Sandbox Application."};
std::string outputPath = ".";
app.add_option("-p,--path", outputPath, "path to save generated config files (default: current directory)");
CLI11_PARSE(app, argc, argv);
const std::filesystem::path outPath(outputPath);
if (!std::filesystem::exists(outPath)) {
std::cerr << "Error: The specified path does not exist: " << outputPath << std::endl;
return 1;
}
fourdst::config::Config<gridfire::config::GridFireConfig> configConfig;
const std::string_view name = strip_namespaces(get_type_name<gridfire::config::GridFireConfig>());
const std::string defaultConfigFilePath = (outPath / (std::string(name) + ".toml")).string();
const std::string schemaFilePath = (outPath / (std::string(name) + ".schema.json")).string();
configConfig.save(defaultConfigFilePath);
configConfig.save_schema(schemaFilePath);
}

1
tools/config/meson.build Normal file
View File

@@ -0,0 +1 @@
executable('gf_generate_config_file', 'generate_config_files.cpp', dependencies: [gridfire_dep, cli11_dep], install: true)

3
tools/meson.build Normal file
View File

@@ -0,0 +1,3 @@
if get_option('build_tools')
subdir('config')
endif

View File

@@ -0,0 +1,132 @@
#!/bin/bash
# --- Configuration ---
PYTHON_VERSIONS=("3.8.10" "3.9.13" "3.10.11" "3.11.9" "3.12.3" "3.13.0" "3.14.0")
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
BASE_OUTPUT_DIR="$SCRIPT_DIR/../../cross/python_includes"
# --- OS Detection ---
OS="$(uname -s)"
echo "Detected OS: $OS"
# --- Dependency Check ---
check_dependencies() {
if [ "$OS" == "Linux" ]; then
if ! command -v 7z &> /dev/null; then
echo "Error: '7z' (p7zip-full) is required on Linux."
exit 1
fi
if ! command -v cpio &> /dev/null; then
echo "Error: 'cpio' is required."
exit 1
fi
fi
}
# --- Extraction Logic (OS Specific) ---
extract_pkg() {
local pkg_file="$1"
local extract_root="$2"
local major_ver="$3" # e.g., 3.11
echo " -> Extracting..."
if [ "$OS" == "Darwin" ]; then
pkgutil --expand "$pkg_file" "$extract_root/expanded"
local payload_path="$extract_root/expanded/Python_Framework.pkg/Payload"
if [ ! -f "$payload_path" ]; then
echo " -> Error: Could not find Payload in package."
return 1
fi
mkdir -p "$extract_root/root"
pushd "$extract_root/root" > /dev/null
cat "$payload_path" | gunzip | cpio -id "*include/python${major_ver}/*" 2>/dev/null
popd > /dev/null
else
7z x "$pkg_file" -o"$extract_root/expanded" -y > /dev/null
local payload_path="$extract_root/expanded/Python_Framework.pkg/Payload"
if [ ! -f "$payload_path" ]; then
echo " -> Error: Could not find Payload in package."
return 1
fi
mkdir -p "$extract_root/root"
pushd "$extract_root/root" > /dev/null
cat "$payload_path" | gunzip | cpio -id "*include/python${major_ver}/*" 2>/dev/null
popd > /dev/null
fi
}
check_dependencies
mkdir -p "$BASE_OUTPUT_DIR"
for FULL_VER in "${PYTHON_VERSIONS[@]}"; do
MAJOR_VER=$(echo "$FULL_VER" | cut -d. -f1,2)
TARGET_DIR="$BASE_OUTPUT_DIR/python-$MAJOR_VER"
TEMP_DIR="$BASE_OUTPUT_DIR/tmp_$FULL_VER"
PKG_NAME="python-${FULL_VER}-macos11.pkg"
if [[ "$MAJOR_VER" == "3.8" ]]; then
PKG_NAME="python-${FULL_VER}-macosx10.9.pkg"
fi
DOWNLOAD_URL="https://www.python.org/ftp/python/${FULL_VER}/$PKG_NAME"
echo "Processing Python $FULL_VER..."
if [ -d "$TARGET_DIR" ] && [ "$(ls -A $TARGET_DIR)" ]; then
echo " -> Headers already exist in $TARGET_DIR. Skipping."
continue
fi
mkdir -p "$TEMP_DIR"
echo " -> Downloading from $DOWNLOAD_URL"
curl -L -s -o "$TEMP_DIR/python.pkg" "$DOWNLOAD_URL"
if [ $? -ne 0 ]; then
echo " -> Download failed! Check version number or internet connection."
rm -rf "$TEMP_DIR"
continue
fi
# 2. Extract
extract_pkg "$TEMP_DIR/python.pkg" "$TEMP_DIR" "$MAJOR_VER"
# 3. Move Headers to Final Location
# The cpio extraction usually results in: ./Versions/X.Y/include/pythonX.Y
# We want to move that specific include folder to our target dir
FOUND_HEADERS=$(find "$TEMP_DIR/root" -type d -path "*/include/python${MAJOR_VER}" | head -n 1)
if [ -n "$FOUND_HEADERS" ]; then
echo " -> Found headers at: $FOUND_HEADERS"
# Move the content to the final destination
# We want the folder to be .../python-3.11/include/python3.11
mkdir -p "$TARGET_DIR/include"
mv "$FOUND_HEADERS" "$TARGET_DIR/include/"
# Verify pyconfig.h exists (sanity check)
if [ -f "$TARGET_DIR/include/python${MAJOR_VER}/pyconfig.h" ]; then
echo " -> Success: Headers installed to $TARGET_DIR"
else
echo " -> Warning: Header move seemed successful, but pyconfig.h is missing."
fi
else
echo " -> Error: Could not locate header files after extraction."
fi
# 4. Cleanup
rm -rf "$TEMP_DIR"
echo "---------------------------------------------------"
done
echo "Done. All headers stored in $BASE_OUTPUT_DIR"

View File

@@ -1,6 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# 1. Validation
if [[ $(uname -m) != "arm64" ]]; then
echo "Error: This script is intended to run on an Apple Silicon (arm64) Mac."
exit 1
@@ -11,11 +12,12 @@ if [[ $# -ne 1 ]]; then
exit 1
fi
# --- Initial Setup ---
# 2. Setup Directories
REPO_URL="$1"
WORK_DIR="$(pwd)"
WHEEL_DIR="${WORK_DIR}/wheels_macos_aarch64_tmp"
FINAL_WHEEL_DIR="${WORK_DIR}/wheels_macos_aarch64"
RPATH_SCRIPT="${WORK_DIR}/../../build-python/fix_rpaths.py" # Assumes script is in this location relative to execution
echo "➤ Creating wheel output directories"
mkdir -p "${WHEEL_DIR}"
@@ -26,10 +28,22 @@ echo "➤ Cloning ${REPO_URL} → ${TMPDIR}/project"
git clone --depth 1 "${REPO_URL}" "${TMPDIR}/project"
cd "${TMPDIR}/project"
# --- macOS Build Configuration ---
# 3. Build Configuration
export MACOSX_DEPLOYMENT_TARGET=15.0
# Meson options passed to pip via config-settings
# Note: We use an array to keep the command clean
MESON_ARGS=(
"-Csetup-args=-Dunity=off"
"-Csetup-args=-Dbuild-python=true"
"-Csetup-args=-Dbuild-fortran=false"
"-Csetup-args=-Dbuild-tests=false"
"-Csetup-args=-Dpkg-config=false"
"-Csetup-args=-Dunity-safe=true"
)
PYTHON_VERSIONS=("3.8.20" "3.9.23" "3.10.18" "3.11.13" "3.12.11" "3.13.5" "3.13.5t" "3.14.0rc1" "3.14.0rc1t" 'pypy3.10-7.3.19' "pypy3.11-7.3.20")
PYTHON_VERSIONS=("3.9.23" "3.10.18" "3.11.13" "3.12.11" "3.13.5" "3.13.5t" "3.14.0rc1" "3.14.0rc1t" 'pypy3.10-7.3.19' "pypy3.11-7.3.20")
if ! command -v pyenv &> /dev/null; then
echo "Error: pyenv not found. Please install it to manage Python versions."
@@ -37,55 +51,48 @@ if ! command -v pyenv &> /dev/null; then
fi
eval "$(pyenv init -)"
# 4. Build Loop
for PY_VERSION in "${PYTHON_VERSIONS[@]}"; do
(
set -e
if ! pyenv versions --bare --filter="${PY_VERSION}." &>/dev/null; then
echo "⚠️ Python version matching '${PY_VERSION}.*' not found by pyenv. Skipping."
# Check if version exists in pyenv
if ! pyenv versions --bare --filter="${PY_VERSION}" &>/dev/null; then
echo "⚠️ Python version matching '${PY_VERSION}' not found by pyenv. Skipping."
continue
fi
pyenv shell "${PY_VERSION}"
PY="$(pyenv which python)"
echo "➤ Building for $($PY --version) on macOS arm64 (target: ${MACOSX_DEPLOYMENT_TARGET})"
echo "----------------------------------------------------------------"
echo "➤ Building for $($PY --version) on macOS arm64"
echo "----------------------------------------------------------------"
# Install build deps explicitly so we can skip build isolation
"$PY" -m pip install --upgrade pip setuptools wheel meson meson-python delocate
CC=clang CXX=clang++ "$PY" -m pip wheel . \
-w "${WHEEL_DIR}" -vv
# PERF: --no-build-isolation prevents creating a fresh venv and reinstalling meson/ninja
# for every single build, saving significant I/O and network time.
CC="ccache clang" CXX="ccache clang++" "$PY" -m pip wheel . \
--no-build-isolation \
"${MESON_ARGS[@]}" \
-w "${WHEEL_DIR}" -vv
echo "➤ Sanitizing RPATHs before delocation..."
# We expect exactly one new wheel in the tmp dir per iteration
CURRENT_WHEEL=$(find "${WHEEL_DIR}" -name "*.whl" | head -n 1)
if [ -f "$CURRENT_WHEEL" ]; then
"$PY" -m wheel unpack "$CURRENT_WHEEL" -d "${WHEEL_DIR}/unpacked"
UNPACKED_ROOT=$(find "${WHEEL_DIR}/unpacked" -mindepth 1 -maxdepth 1 -type d)
find "$UNPACKED_ROOT" -name "*.so" | while read -r SO_FILE; do
echo " Processing: $SO_FILE"
"$PY" "../../build-python/fix_rpaths.py" "$SO_FILE"
done
"$PY" -m wheel pack "$UNPACKED_ROOT" -d "${WHEEL_DIR}"
rm -rf "${WHEEL_DIR}/unpacked"
else
echo "Error: No wheel found to sanitize!"
exit 1
fi
echo "➤ Repairing wheel(s) with delocate"
delocate-wheel -w "${FINAL_WHEEL_DIR}" "${WHEEL_DIR}"/*.whl
rm "${WHEEL_DIR}"/*.whl
echo "➤ Repairing wheel with delocate"
# Delocate moves the repaired wheel to FINAL_WHEEL_DIR
delocate-wheel -w "${FINAL_WHEEL_DIR}" "$CURRENT_WHEEL"
# Clean up the intermediate wheel from this iteration so it doesn't confuse the next
rm "$CURRENT_WHEEL"
)
done
# Cleanup
rm -rf "${TMPDIR}"
rm -rf "${WHEEL_DIR}"
echo "✅ All builds complete. Artifacts in ${FINAL_WHEEL_DIR}"

View File

@@ -0,0 +1,90 @@
#!/bin/zsh
set -e
# Color codes for output
RED='\033[0;31m'
YELLOW='\033[1;33m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color
function fix_file_rpaths() {
local file_path="$1"
echo -e "${YELLOW}Fixing RPATHs in file: $file_path...${NC}"
python3 "$FIX_RPATH_SCRIPT" "$file_path"
if [ $? -ne 0 ]; then
echo -e "${RED}Error: RPATH fix script failed for file: $file_path${NC}"
exit 1
fi
echo -e "${GREEN}RPATHs fixed for file: $file_path${NC}"
}
export -f fix_file_rpaths
echo -e "${YELLOW}"
echo "========================================================================="
echo " TEMPORARY WHEEL REPAIR WORKAROUND"
echo "========================================================================="
echo -e "${NC}"
echo ""
echo -e "${YELLOW}WARNING:${NC} This script applies a temporary patch to fix"
echo "a known issue with meson-python that causes duplicate RPATH entries in"
echo "built Python wheels on macOS, preventing module imports."
echo ""
echo "This workaround will:"
echo " 1. Unzip the wheel file"
echo " 2. Locate the extension modules"
echo " 3. Remove duplicate RPATH entries using install_name_tool"
echo " 4. Resign the wheel if necessary"
echo " 5. Repackage the wheel file"
echo ""
FIX_RPATH_SCRIPT="../../build-python/fix_rpaths.py"
# get the wheel directory to scan through
WHEEL_DIR="$1"
if [ -z "$WHEEL_DIR" ]; then
echo -e "${RED}Error: No wheel directory specified.${NC}"
echo "Usage: $0 /path/to/wheel_directory"
exit 1
fi
REPAIRED_WHEELS_DIR="repaired_wheels"
mkdir -p "$REPAIRED_WHEELS_DIR"
REPAIRED_DELOCATED_WHEELS_DIR="${REPAIRED_WHEELS_DIR}/delocated"
# Scal all files ending in .whl and not starting with a dot
for WHEEL_PATH in "$WHEEL_DIR"/*.whl; do
if [ ! -f "$WHEEL_PATH" ]; then
echo -e "${YELLOW}No wheel files found in directory: $WHEEL_DIR${NC}"
exit 0
fi
echo ""
echo -e "${GREEN}Processing wheel: $WHEEL_PATH${NC}"
WHEEL_NAME=$(basename "$WHEEL_PATH")
TEMP_DIR=$(mktemp -d)
echo -e "${GREEN}Step 1: Unzipping wheel...${NC}"
python -m wheel unpack "$WHEEL_PATH" -d "$TEMP_DIR"
echo -e "${GREEN}Step 2: Locating extension modules...${NC}"
while IFS= read -r -d '' so_file; do
echo "Found library: $so_file"
fix_file_rpaths "$so_file"
done < <(find "$TEMP_DIR" -name "*.so" -print0)
echo -e "${GREEN}Step 4: Repackaging wheel...${NC}"
python -m wheel pack "$TEMP_DIR/gridfire-0.7.4rc2" -d "$REPAIRED_WHEELS_DIR"
REPAIRED_WHEEL_PATH="${REPAIRED_WHEELS_DIR}/${WHEEL_NAME}"
echo -e "${GREEN}Step 5: Delocating wheel...${NC}"
# Ensure delocate is installed
pip install delocate
delocate-wheel -w "$REPAIRED_DELOCATED_WHEELS_DIR" "$REPAIRED_WHEEL_PATH"
echo -e "${GREEN}Repaired wheel saved to: ${REPAIRED_DELOCATED_WHEELS_DIR}/${WHEEL_NAME}${NC}"
# Clean up temporary directory
rm -rf "$TEMP_DIR"
done