Kitchen sink¶
Here a full example of the configuration file is provided, with its fields and capabilities.
# ============================================================
# Nassu kitchensink configuration
# This file demonstrates every available configuration field.
# Use it as a reference when building your own .nassu.yaml.
# ============================================================
# Load parent configurations from other files before this one is parsed.
# Simulations defined in those files can be used as parents here.
# The path must be relative to your shell's current working directory,
# not to this file's location.
dependencies:
# sim_cfg_files: ["path/to/dependent/simulation_config.yaml"]
sim_cfg_files: [] # Empty list - no external dependencies
# Named values used throughout this file.
# Reference them with ${var_name} inside !sub and !math tags.
# Nested keys are accessed with dot notation: ${cfg_time.cst_step}
variables:
# Special built-in variables injected by Nassu at load time:
version: ${NASSU_VERSION} # x.y.z - running Nassu version string
filepath: ${NASSU_FILE_PATH} # docs/source/user_guide/02_config/file/kitchensink.nassu.yaml
foldername: ${NASSU_FILE_FOLDER} # docs/source/user_guide/02_config/file - directory containing this file
base_folder: "examples/study_cases/high_rise_building" # Root output path
domain_size: 96 # Base domain dimension in lattice nodes
height_ref: 8 # Reference building height in lattice units
# Nested variable maps - keys are accessed as ${cfg_time.develop_steps} etc.
cfg_time:
develop_steps: 5000 # Steps for flow to develop before statistics collection
# !math with variable references: computes one convective time unit (H/U)
cst_step: !math ${height_ref}/${u_ref[1]}
cst_run: 100 # Number of convective time units to run after development
# Multi-line !math expression - references other nested variables
total_steps:
!math ${cfg_time.develop_steps} + ${cfg_time.cst_step} * ${cfg_time.cst_run}
# !math applied to a list - each element is evaluated independently
u_ref: !math [0.05, 0.05*1.1] # Two reference velocities in lattice units
angle_use: !math cos(pi*45/180) # 45-degree wind angle in radians
nested_angle: !math cos(acos(pi/4)) # Nested trigonometric expression
min_max: !math min(cos(acos(pi/4)), acos(pi/4)) # min() across two expressions
# YAML anchor for a macroscopic rescale block - define once and reuse with *MACRS_RESCALE.
# Converts lattice values to physical units for output: result = value * mul + cte
macrs_rescale: &MACRS_RESCALE
rho: { mul: 1e3, cte: 100 } # Multiply density by 1000 (lattice rho to kg/m3)
ux: { mul: 100, cte: 0 } # Scale x-velocity to physical units
uy: { mul: 100, cte: 0 } # Scale y-velocity to physical units
uz: { mul: 100, cte: 0 } # Scale z-velocity to physical units
simulations:
- name: highRiseBuildingZero # Unique simulation name within this file
# False to skip this simulation without removing it from the file.
# Defaults to true
run_simul: true
# Total number of LBM time steps to advance.
n_steps: 50000
# !sub substitutes ${var_name} references - use it for any string field
# that needs variable interpolation.
save_path: !sub "${base_folder}/resuts/high_rise_building"
# Controls how often Nassu prints simulation progress to stdout.
report:
start_step: 0 # Step at which reporting begins (0 = from the start)
end_step: 0 # Step at which reporting stops (0 = until the end)
frequency: 1000 # Report simulation progress every 1000 steps
# ----------------------------------------------------------------
# Domain
# ----------------------------------------------------------------
domain:
# Lattice node counts for the full domain box.
# All dimensions must be divisible by block_size.
domain_size:
# !unroll expands this simulation into multiple variants, one per list
# element. All !unroll lists in the same simulation block must have the
# same length. Here two variants are produced: 96^3 and 192^3 domains.
x: !unroll [!math "${domain_size}", !math "${domain_size}*2"]
y: !unroll [!math "${domain_size}", !math "${domain_size}*2"]
z: !unroll [!math "${domain_size}", !math "${domain_size}*2"]
# Affine rescaling applied to node coordinates for all output files.
# pos_rescaled = pos * scale + translation
# Useful for converting from lattice units to physical metres.
domain_rescale:
translation: [0, -10, 20] # Offset applied after scaling [x, y, z]
scale: [1, 1, 0.5] # Per-axis scale factors [x, y, z]
# Size of each lattice block in nodes per edge.
# Changing this affects GPU shared-memory usage - leave as 8 unless advised.
block_size: 8
# Static (non-adaptive) mesh refinement rules.
# Each named entry under `static` is a user-chosen group name that
# combines any subset of: volumes_refine, bodies, volume_refinement_limit,
# volumes_not_refine. The names below are illustrative.
refinement:
static:
# Refinement group: refine a rectangular volume and a body simultaneously
volumes_body_refine:
# List of axis-aligned boxes to refine to a given level.
volumes_refine:
- is_abs:
true # true = coordinates in absolute lattice units;
# false = coordinates relative to domain size (0 to 1)
lvl: 1 # Target refinement level for blocks in this box
start: [0.0, 0.0, 0.0] # Lower corner [x, y, z]
# !math on a list computes each element independently
end: !math [32.0, 160.0, "${domain_size}/2"] # Upper corner [x, y, z]
# List of bodies whose bounding surfaces drive block refinement.
bodies:
- body_name: CAARC # Must match a key under domain.bodies
lvl: 4 # Refinement level to apply around this body
normal_offsets: [0] # Surface offset multipliers for refinement shell
# Optional per-body transformation applied before refinement lookup
transformation:
translation: [12.0, 0.0, 1.0] # Shift body for refinement only
# Refinement group: limit the spatial extent that any refinement can reach
limit_volume:
bodies:
- body_name: CAARC
lvl: 6
normal_offsets: [0]
# Clamp all refinement (volume and body) to this bounding box.
# No block outside this box is refined beyond level 0.
volume_refinement_limit:
is_abs: true
start: [0, 10, 0]
end: [40, 50, 150]
# Refinement group: suppress refinement inside specified volumes
ignore_volumes:
bodies:
- body_name: CAARC
lvl: 6
# !range generates a numeric list: np.arange(start, end, step)
# End is exclusive. Generates from -0.5 to 4.9, doesn't include 5
normal_offsets: !range [-0.5, 5, 0.2]
- { body_name: CAARC.surface_name, lvl: 4, normal_offsets: !math [1/4] }
# Blocks fully inside any of these volumes are not refined.
volumes_not_refine:
- is_abs: true # Absolute lattice coordinates
start: [0, 10, 0]
end: [40, 50, 150]
- is_abs: false # Relative coordinates (fraction of domain size)
start: [0, 0.15, 0.1]
end: [0.1, 0.2, 0.3]
# Refinement group: exhaustive example using all available sub-fields
full_example:
volumes_refine:
- is_abs: true
lvl: 1
start: [0.0, 0.0, 0.0]
end: [32.0, 160.0, 64.0]
- is_abs: true
lvl: 4
start: [10, 20, 30]
end: [32.0, 160.0, 64.0]
bodies:
- body_name: CAARC
lvl: 6
normal_offsets: [0]
volume_refinement_limit:
is_abs: true
start: [0, 10, 0]
end: [40, 50, 150]
volumes_not_refine:
- is_abs: true
start: [0, 10, 0]
end: [40, 50, 150]
- is_abs: false
start: [0, 0.15, 0.1]
end: [0.1, 0.2, 0.3]
# Point-cloud bodies defined by CSV files instead of .lnas geometry.
# The CSV must contain columns: x, y, z, nx, ny, nz, area
point_clouds:
tree: # User-chosen name for this point-cloud body
IBM:
# True to activate IBM for this point cloud
run: True
# IBM config name from models.IBM.body_cfgs
cfg_use: tree_cfg
# IBM force-spread order. Bodies with the same order are processed
# together; lower order runs first.
order: 1
# Interval in time steps during which IBM is active.
# end_step: 0 = run until the simulation ends; start_step: 0 = from step 0
interval_run:
end_step: 0
start_step: 0
csv_path: fixture/point_cloud/sphere_100.csv # Path to point-cloud CSV
# STL/LNAS bodies for IBM. Each key is a user-chosen body name.
# !not-inherit prevents this dict from merging with any parent simulation's
# bodies field - the child's bodies list replaces the parent's entirely.
bodies: !not-inherit
CAARC: # User-chosen body name; referenced elsewhere by this key
IBM:
run: True # True to activate IBM for this body
cfg_use: building_cfg # IBM config name from models.IBM.body_cfgs
# Bodies with the same order are processed together;
# lower order runs first
order: 1
# Interval in time steps during which IBM is active.
# 0 means no limit is applied
interval_run:
end_step: 0
start_step: 0
lnas_path: fixture/lnas/wind_tunnel/CAARC.lnas # Path to .lnas geometry file
# Offsets (in lattice nodes) at which to add copies of this geometry,
# displaced along surface normals. Produces layered IBM shells.
# !range [-5, 1, 0.5] -> [-5.0, -4.5, -4.0, ..., 0.5]
normal_offsets_add: !range [-5, 1, 0.5]
# Height attributed to each surface triangle for volumetric algorithms.
# Defaults to 1
height_assume: 1
# Whether the surface height scales with refinement level.
# height_at_lvl = height_assume * 2^lvl
# Defaults to false
height_scales_per_lvl: false
# How to handle triangles smaller than area.min.
# Options are: "add" (keep them), "ignore" (discard them)
# Defaults to "error" (raise an exception)
small_triangles: "add" # | "ignore"
# Triangle area bounds (at level 0). Triangles above area.max are
# subdivided; triangles below area.min follow the small_triangles policy.
# max should be at least 4× min.
# Defaults to { min: 0.25, max: 1 }
area: { min: 0.25, max: 1 }
# Clip triangles outside these bounding boxes at each geometry stage.
# Defaults to no limit (no triangles removed).
volumes_limits:
# Applied to raw geometry coordinates, before any transformation
raw:
- start: [10, 20, 0]
end: [100, 100, 100]
# Applied after this body's own transformation
body_transformed:
- start: [60, 40, 0]
end: [100, 200, 100]
# Applied after all transformations (including global_transformations)
full_transformed:
- start: [60, 40, 0]
end: [100, 200, 100]
- start: [100, 40, 50]
end: [150, 140, 70]
# Affine transformation applied to this body before it enters the domain.
# Order: translate to fixed_point -> scale -> rotate -> translate -> revert.
# rotation values are in radians.
transformation:
fixed_point: [0, 0, 0] # Pivot point for scale and rotation
rotation: [0, 0, 0] # Rotation angles [rx, ry, rz] in radians
scale: [1.0, 1.0, 1.0] # Per-axis scale factors
translation: [12.0, 0.0, 1.0] # Final translation [x, y, z]
cube_not_run: # Second body - IBM disabled for this one
IBM:
run: False # IBM is not computed for this body
cfg_use: terrain_cfg # Config name is still declared (for reference)
lnas_path: fixture/lnas/basic/cube.lnas
transformation:
fixed_point: [0, 0, 0]
rotation: [0, 0, 0]
scale: [1.0, 1.0, 1.0]
translation: [12.0, 0.0, 1.0]
# Clip all IBM nodes (bodies and point clouds) to this bounding box.
# Nodes outside this volume are discarded from the IBM kernel.
# Defaults to the full domain.
bodies_domain_limits:
start: [8.0, 4.0, 0.0]
end: [448.0, 156.0, 48.0]
is_abs: true # true = absolute lattice coordinates
# Apply the same affine transformation to multiple bodies, point clouds,
# and/or probe sets in one step. Transformations are applied after any
# body-specific ones and in list order.
global_transformations:
- transformation:
translation: [0, 0.5, 0] # [x, y, z] shift
fixed_point: [608.90625, 64, 0] # Pivot for rotation and scale
rotation: [0, 0, 3.141592654] # Rotate 180° around z-axis
scale: [1, 1, 1]
point_clouds_apply: [] # Point-cloud names to transform
bodies_apply: ["CAARC"] # Body names to transform
# Probe paths to transform. Syntax:
# "series.<series_name>" - all probes in a series
# "series.<series_name>.lines" - all lines in a series
# "series.<series_name>.points.<point_name>" - a specific point
# "spectrum.<point_name>" - a spectrum-analysis point
probes_apply:
[
"series.series1.lines",
"series.series1.points.point1",
"series.another_series",
]
# True to also apply this transformation to body-type probes
# (probes defined via a body_name) within the listed series.
# Defaults to false
apply_to_bodies_probes: false
- transformation:
translation: [50, -10, 0]
point_clouds_apply: []
bodies_apply: []
probes_apply: ["spectrum.downstream", "spectrum.upstream"]
# True to apply this transformation to bodies in probes series as well
apply_to_bodies_probes: true
# ----------------------------------------------------------------
# Checkpoint
# ----------------------------------------------------------------
checkpoint:
export:
# Interval at which to write checkpoint files to disk.
# No export when this block is absent.
interval: { end_step: 30000, frequency: 5000, start_step: 10000 }
# True to write a final checkpoint when the simulation ends.
# Defaults to false
finish_save: true
# True to delete all but the most recent checkpoint on disk,
# saving storage at the cost of losing earlier restart points.
# Defaults to false
keep_only_last_checkpoint: false
load:
# True to resume from a saved checkpoint at startup.
# Defaults to false
checkpoint_start: true
# True to reset the time counter to 0 after loading the checkpoint fields.
# Useful for branching a new run from an existing flow state.
# Defaults to false
reset_time_step: false
# Path to the folder containing the checkpoint to load.
# When null, Nassu looks for the latest checkpoint in save_path.
# Defaults to null
folderpath: "path/to/checkpoint/folder"
# ----------------------------------------------------------------
# Data export
# ----------------------------------------------------------------
data:
# Check for NaN values (simulation divergence) at this interval.
# end_step: 0 means check until the simulation ends.
divergence: { end_step: 0, frequency: 50, start_step: 0 }
# Global field monitors - compute scalar statistics (min, max, mean)
# over the domain and write them to CSV at each interval.
monitors:
fields:
# monitor_name:
# ... specs
# Monitor 1: track the maximum density and its position
rho_max:
macrs: [rho] # Macroscopics to monitor (rho, u, S, etc.)
# Statistics to compute. Options: min, max, mean, pos
# pos reports the grid coordinate of the max/min value
stats: [max, pos]
interval: { start_step: 500, end_step: 10000, frequency: 50 }
macrs_rescale: *MACRS_RESCALE # Converts lattice values to physical units
# Rename macroscopic output columns for readability.
# Keys are lattice names (ux, uy, rho ...), values are output column names.
macrs_rename: { rho: pressure, ux: vel_x, uy: vel_y }
# Multiply the time-step index by this factor for the output time column.
# Use the lattice-to-physical time ratio to convert to seconds.
# Defaults to 1
time_rescale: 1
# Monitor 2: min/max/mean of multiple macroscopics over a sub-volume
macrs_stats:
# u expands to ux/uy/uz; S expands to all Sij components
macrs: [rho, u, S]
stats: [min, max, mean]
interval: { start_step: 0, end_step: 0, frequency: 500 }
macrs_rescale: *MACRS_RESCALE
macrs_rename: { rho: pressure, ux: vel_x, uy: vel_y }
time_rescale: 2.45
# Restrict monitoring to blocks that overlap these boxes.
# If omitted, the full domain is monitored.
volumes_monitor:
- start: [10, 20, 0]
end: [100, 100, 100]
is_abs: true # Absolute lattice coordinates
- start: [0.1, 0.1, 0.1]
end: [0.9, 0.9, 0.9]
is_abs: false # Relative coordinates (fraction of domain size)
# Exclude blocks fully inside these boxes from the monitor.
volumes_ignore:
- start: [50, 60, 5]
end: [70, 120, 10]
is_abs: true
# Time-series probes - record macroscopic values at specific positions
# or on body surfaces at every sampled step.
probes:
# historic_series: probes that accumulate a time history.
# Each named entry writes an HDF5 and optionally CSV/XDMF output.
historic_series:
series1:
# Output formats for this series.
# Options are: csv, hdf, xdmf
# Defaults to ["hdf", "xdmf"]
formats_export: [csv, hdf, xdmf]
macrs: [rho, u] # Macroscopics to sample
interval:
start_step: 78 # First step to sample
end_step: 0 # 0 = sample until the simulation ends
frequency: 10 # Sample every 10 steps
# Refinement level whose time step governs the frequency counter.
# Use the highest level present for the finest time resolution.
lvl: 4
macrs_rescale: *MACRS_RESCALE
# Rename output columns; keys are lattice names, values are output names
macrs_rename: { rho: pressure }
# Multiply the time-step counter by this factor for the output time axis.
# Defaults to 1
time_rescale: 10
# Group this many time steps into a single HDF5 table.
# Smaller values reduce memory at the cost of more tables per file.
# Defaults to 1000
interval_group: 1000
# Flush the HDF5 buffer to disk every N sampled steps to prevent
# data loss on unexpected termination.
# Defaults to 250
interval_flush: 250
# Probe lines - uniformly spaced points along a straight segment
lines:
line1:
start_pos: [200.46875, 79.4285, 2.905] # Line start [x, y, z]
end_pos: [200.46875, 80.5715, 2.905] # Line end [x, y, z]
dist: 0.28575 # Spacing between sample points along the line
# Single probe points
points:
point1:
pos: [200.46875, 79.4285, 2.905] # Point position [x, y, z]
# Body-surface probes - sample at triangle centroids or vertices
# of a named body at a given normal offset.
bodies:
my_CAARC:
body_name: "CAARC" # Must match a key in domain.bodies
normal_offset: 0.03125 # Offset from the surface along the normal
# "cell" samples at triangle centroids; "vertex" at triangle vertices
element_type: "cell" # or "vertex"
my_surface:
# Reference a named sub-surface using "body_name.surface_name" syntax
body_name: "CAARC.surface_name"
normal_offset: -0.03125 # Negative offset = inside the body
element_type: "cell" # or "vertex"
# CSV-defined probe points - read positions from a file.
# The CSV must have a header row with columns x, y, z (comma-separated).
csvs:
my_csv:
filename: "my_filename.csv"
my_other_csv:
filename: "another csv"
# Minimal series example - only required fields
another_series:
macrs: [u, S]
time_rescale: 2.3
macrs_rescale:
ux:
mul: 51
cte: 0
interval:
frequency: 10 # start_step and end_step default to 0 (full run)
lvl: 4
lines:
line1:
start_pos: [200.46875, 79.4285, 2.905]
end_pos: [200.46875, 80.5715, 2.905]
dist: 0.28575
# spectrum_analysis: records every single time step without spatial
# interpolation - essential for frequency-domain (FFT) analysis.
# Only point probes are supported (no interpolation that could alias the signal).
spectrum_analysis:
macrs: [rho, u] # Macroscopics to record at each time step
time_rescale: 10.3 # Physical time per step - scales the output time axis
macrs_rescale: *MACRS_RESCALE
macrs_rename: { rho: pressure }
# Individual probe points. If the position does not coincide with a node,
# the nearest lattice node is used (no interpolation).
points:
upstream:
pos: [200.46875, 80.0, 4.81]
downstream:
pos: [201.48375, 80.0, 4.81]
# Running statistics - accumulate mean (1st order) and mean-of-square
# (2nd order) fields in memory during the simulation.
statistics:
# Interval over which to accumulate statistical samples.
interval:
start_step: 78 # Begin accumulating at this step
end_step: 0 # 0 = accumulate until the simulation ends
frequency: 10 # Sample every 10 steps
# Restrict accumulation to blocks overlapping this box.
# Blocks outside are exported as NaN.
volume_stats:
start: [32, 0, 0]
end: [448, 128, 64]
is_abs: true
# Macroscopics for which to compute first-order statistics (time-mean).
macrs_1st_order: [rho, u]
# Macroscopics for which to compute second-order statistics (mean of square).
# Combine with macrs_1st_order to derive variance: <x^2> - <x>^2
macrs_2nd_order: [rho, u]
# Named export configurations - each writes a separate output file set.
exports:
default:
interval:
frequency: 5000 # Write statistics snapshot every 5000 steps
export_area2:
# Physical-units time multiplier for the output time axis.
# Defaults to 1
time_rescale: 5.67
# WARNING: apply macrs_rescale on statistics only if you know what
# you are doing - a linear rescale on 2nd-order fields distorts variance.
macrs_rescale: *MACRS_RESCALE
# Restrict this export to a sub-volume of the statistics domain.
volume_export:
start: [32, 0, 0]
end: [448, 128, 64]
is_abs: true
interval:
frequency: 50 # Export more frequently for this sub-volume
# Instantaneous field snapshots - write full volumetric fields at intervals.
# Each named entry produces an independent XDMF+HDF5 (or VTK .vtm) file set.
instantaneous:
default:
interval:
start_step: 78
end_step: 0 # 0 = export until the simulation ends
frequency: 10 # Write a snapshot every 10 steps
# Restrict the export to blocks that overlap this bounding box.
# Any block fully outside the box is omitted from the file.
volume_export:
start: [32, 0, 0]
end: [448, 128, 64]
is_abs: true
macrs: ["rho", "S"] # Macroscopics to include in each snapshot
# Multiply the time-step counter for the output time axis.
# Defaults to 1
time_rescale: 1
macrs_rescale: *MACRS_RESCALE
# Maximum refinement level to include. Coarser output reduces file size.
# Defaults to -1 (export all levels up to the maximum)
max_lvl: 3
# Output format.
# Options are: xdmf (XDMF+HDF5, default), vtm (legacy VTK multiblock)
format_export: xdmf
# Roll over to a new HDF5 file when the current one exceeds this size (GB).
# Only used when format_export is "xdmf".
# Defaults to 4.0
max_h5_size_gb: 4.0
# Second export configuration - different macroscopics, same interval
export_area2:
interval:
frequency: 10
macrs: ["u", "rho", "Sxx"] # Export individual stress component Sxx
macrs_rescale: *MACRS_RESCALE
time_rescale: 1
# Export IBM Lagrangian node data (position, interpolated velocity,
# spread force) to CSV files at the specified interval.
export_IBM_nodes:
export_caarc:
body_name: CAARC # Must match a key in domain.bodies or domain.point_clouds
start_step: 0 # 0 = start from the first step
end_step: 0 # 0 = export until the simulation ends
frequency: 0 # 0 = export at every step (use with care - large output)
# ----------------------------------------------------------------
# Models
# ----------------------------------------------------------------
models:
# GPU engine configuration
engine:
# Specific GPU device indices to use (e.g. [0, 1]).
# null lets Nassu pick devices automatically in ascending order.
# Defaults to null
devices_numbers: null
# Number of GPU devices to use. Multi-device is not supported yet.
# Defaults to 1
n_devices: 1
# Compute engine.
# Options are: CUDA
name: CUDA
# Floating-point precision settings
precision:
# Precision used inside CUDA kernels for intermediate calculations.
# Options are: single, double, default
# Defaults to default
calculations: default
# Base precision - must always be specified explicitly.
# Options are: single, double
default: single
# Precision used for macroscopic field storage (rho, u, pi_neq, ...).
# Options are: single, double, default
# Defaults to default
macroscopics: default
# Precision used for LBM populations in GPU shared memory.
# Options are: single, double, default
# Defaults to default
populations: default
# Multiblock grid communication settings
multiblock:
# Number of overlap nodes on the fine side of each Fine-to-Coarse interface.
# Defaults to 1
overlap_F2C: 2
# Per-level override for overlap_F2C. Key is the level number, value is the
# overlap count to use for that level's F2C communication.
# Defaults to {} (no per-level overrides)
custom_overlap_F2C:
1: 3 # Level 1 -> use overlap of 3
4: 2 # Level 4 -> use overlap of 2
# Spatial filter size (in nodes) for averaging the stress tensor
# at fine-to-coarse interfaces. 0 disables filtering.
# Defaults to 0
stress_filter_F2C: 1
# Mark coarse-side nodes inside F2C communication regions as unused
# to prevent undefined behaviour from uninitialised values.
# Defaults to true
mark_nodes_as_unused: true
# LES turbulence model
LES:
# Subgrid scale model to use.
# Options are: Smagorinsky
model: Smagorinsky
# Smagorinsky constant C_S. Typical CWE range: 0.10 to 0.17
sgs_cte: 0.17
# Lattice Boltzmann Method settings
LBM:
# Global body force vector applied uniformly across the domain [x, y, z].
# Non-zero values drive a pressure-gradient flow (e.g. channel flow).
F: { x: 0, y: 0, z: 0 }
# Collision operator.
# Options are: RBGK (2nd-order Hermite), RRBGK (3rd-order, default for LES),
# HRRBGK (hybrid regularised)
coll_oper: RRBGK
# Operator-specific parameters (only relevant for HRRBGK)
coll_oper_params:
# Blending constant sigma for HRRBGK. Valid range: 0.95 to 1.0
# Defaults to 0.99
sigma_hrrbgk: 0.99
# Blending mode for HRRBGK.
# Options are: dynamic, constant
# Defaults to constant
mode_hrrbgk: dynamic
# True to activate the thermal model (variable theta ≠ 0).
# The isothermal path (theta = 0) is the production default.
thermal_model: false
# Relaxation time tau = 1/omega. Controls kinematic viscosity:
# nu = cs^2 * (tau - 0.5) * dt. Must satisfy tau > 0.5 for stability.
# With RRBGK, values up to tau ≈ 1 remain stable at high Re.
tau: 0.5000008125
# Velocity set (lattice topology).
# Options are: D2Q9 (2D), D3Q15, D3Q19, D3Q27 (3D)
# D3Q27 is preferred for LES - complete 27-dimensional Hermite basis
vel_set: D3Q27
# Immersed Boundary Method settings
IBM:
# Discrete Dirac delta kernel width.
# Options are: 3_points (support ±1.5 Δx), 4_points (support ±2 Δx)
# Defaults to 3_points
dirac_delta: 3_points
# Minimum sum of Dirac delta weights for IBM to operate on a node.
# Use 0.99 to skip nodes near domain boundaries or multiblock transitions
# where the stencil is truncated. Lower values allow IBM everywhere.
# Defaults to 0.99
min_dirac_sum: 0.99
# Number of steps over which the IBM force ramps linearly from 0 to 100 %.
# Prevents a large transient impulse at startup.
# Defaults to 0 (no ramp)
forces_accomodate_time: 500
# Convergence threshold on the force change between IBM sub-iterations.
# A reasonable value for CWE is 1e-3 (≈ rho * u_ref^2).
# Defaults to 1000 (effectively no limit)
forces_spread_limit: 1e-3
# True to set IBM forces to zero at the start of each time step,
# so each step converges from scratch. False reuses the previous
# step's force as the initial guess (can speed up convergence).
# Defaults to true
reset_forces: false
# Named IBM configurations referenced by domain.bodies.IBM.cfg_use
body_cfgs:
# Built-in default config - empty means all sub-fields use their defaults
default: {}
# Configuration for a building body with an equilibrium TBL wall model
building_cfg:
# Number of IBM sub-iterations per time step.
# More iterations improve convergence at the cost of runtime.
# Defaults to 5
n_iterations: 5
# Multiply the computed IBM force by this factor before spreading.
# 1.0 = standard IBM; values < 1 damp the force (useful for porous media).
# Defaults to 1
forces_factor: 1
# True to use kinetic-energy-based force correction instead of velocity
# correction (for modelling porous drag coefficients).
# Incompatible with wall_model.
# Defaults to false
kinetic_energy_correction: false
# Equilibrium wall model - replaces conventional no-slip IBM near walls.
wall_model:
# Wall model type.
# Options are: EqTBL (turbulent boundary layer), EqLog (log-law)
name: EqTBL
# Distance (in lattice nodes) from the wall at which to interpolate
# the tangential velocity used by the wall model.
dist_ref: 2
# Thickness of the IBM spreading shell (in lattice nodes).
dist_shell: 0.25
# Step at which to switch from conventional IBM to the wall model.
# Conventional IBM is applied for steps < start_step.
# Defaults to 1000
start_step: 1000
# Parameters specific to the EqTBL model
params:
z0: 0.0001 # Roughness length in lattice units
TDMA_max_error: 5e-06 # TDMA solver convergence tolerance
TDMA_max_iters: 50 # Maximum TDMA iterations per node
# Number of TDMA grid divisions (must be an odd number)
TDMA_max_div: 25 # Maximum divisions
TDMA_min_div: 21 # Minimum divisions
# Target y+ for the first TDMA node nearest the wall
# Defaults to 0.2
TDMA_yp_target: 0.2
# True to apply viscous velocity correction at near-wall IBM nodes.
# Improves accuracy in the viscous sublayer.
visc_correction: true
# Configuration for a terrain body with a log-law wall model
terrain_cfg:
n_iterations: 3
forces_factor: 1
kinetic_energy_correction: false
wall_model:
# Equilibrium log-law wall model
name: EqLog
dist_ref: 2.5
dist_shell: 0.5
# Parameters specific to the EqLog model
params:
z0: 0.0001 # Aerodynamic roughness length in lattice units
visc_correction: false
# Configuration for a tree point-cloud body (porous canopy model)
tree_cfg:
n_iterations: 5
# forces_factor < 1 with kinetic_energy_correction = true models
# drag through a porous medium (e.g. a forest canopy)
forces_factor: 0.12
kinetic_energy_correction: true
# Configuration using a constant-force model instead of velocity matching
constant_body_cfg:
n_iterations: 1
forces_factor: 0.12
# Apply a fixed body force rather than iterating to enforce a velocity.
# Useful for modelling prescribed-drag elements (signage, screens, etc.)
constant_model:
# Target velocity magnitude imposed on the body normal direction.
constant_velocity: 1e-2
# Factor applied to the tangential component of the force.
# 0 = no tangential correction; 1 = drive tangential velocity to zero.
tangential_force_factor: 0
# True to use individual surface normals for force direction.
# False = use direction_apply for all nodes.
use_normal: false
# Global force direction when use_normal is false [x, y, z].
# The vector does not need to be normalised.
# Defaults to null
direction_apply: [0, 1, -1]
# Initial condition for the macroscopic fields at t = 0
initialization:
# Path to a .vtm file from a previous simulation to initialise from.
# Nassu uses linear interpolation to map the field onto the current mesh.
# The file must contain at least rho and u; S is reconstructed via finite
# differences on u.
# Mutually exclusive with equations. Defaults to null.
# vtm_filename: ./fixture/vtms/multiblock_load.vtm
# Equation-based initialization: SymPy expressions for rho, ux, uy, uz
# as functions of physical node coordinates x, y, z (lattice units).
# Allowed functions: sin, cos, tan, asin, acos, atan, atan2, log, log2, log10,
# exp, sqrt, Abs, ceil, floor, Min/min, Max/max.
# Defaults: rho="1", ux="0", uy="0", uz="0"
equations:
rho: "1"
ux: "0.05 * (z / 100.0) ** 0.25"
uy: "0"
uz: "0"
# SEM field initialization: prefill domain with mean SEM velocity profile ux(z).
# Requires models.BC.SEM to be configured.
# Defaults to false
sem_field: false
# Boundary conditions applied at each domain face
BC:
# Which domain faces use periodic BCs [x, y, z].
# A true entry means the face pair at x=0 and x=N_x are periodic.
periodic_dims: [false, false, false]
# Global TDMA parameters shared across all wall-model BCs that use TDMA.
# Individual IBM wall models can override these with their own params block.
WM_cfg:
TDMA_max_error: 5e-06 # Convergence tolerance for TDMA
TDMA_max_iters: 50 # Maximum iterations per TDMA solve
# Number of TDMA divisions (must be an odd number)
TDMA_max_div: 25
# Ordered list of BCs to apply. BCs with the same order are applied
# simultaneously; conflicts are resolved in list order (last wins).
# Available BC types:
# HWBB, RegularizedHWBB - no-slip walls
# VelocityBounceBack - moving wall
# UniformFlow - prescribed inlet velocity
# Neumann, RegularizedNeumannSlip - zero-gradient / free-slip
# RegularizedNeumannOutlet - zero-gradient outlet with fixed rho
BC_map:
- # Boundary condition name (see available types above)
BC: RegularizedNeumannOutlet # Zero-gradient outlet
# Order in which to apply the BC. 0 runs first, then 1, then 2.
order: 2
# Face position. Options: E (x=N), W (x=0), N (y=N), S (y=0),
# F (z=N), B (z=0). Combine for edges/corners: NF, SW, etc.
pos: E
# Outward normal direction of this face (points out of the domain).
# Combine directions for edge/corner normals the same way as pos.
wall_normal: E
# BC-specific kwarg: target density at the outlet
rho: 1.0
- BC: RegularizedNeumannSlip # Free-slip top face
order: 1
pos: F # z = N (top face)
wall_normal: F
- BC: RegularizedHWBB # No-slip ground (regularised halfway bounce-back)
order: 1
pos: B # z = 0 (bottom face)
wall_normal: B
- BC: RegularizedNeumannSlip # Free-slip north lateral face
order: 0
pos: N # y = N
wall_normal: N
- BC: RegularizedNeumannSlip # Free-slip south lateral face
order: 0
pos: S # y = 0
wall_normal: S
- BC: Neumann # Zero-gradient at top-north edge
order: 0
# Combined position: nodes at both z=N and y=N
pos: NF
# Wall normal points in the N direction for this combined edge
wall_normal: N
- BC: Neumann # Zero-gradient at top-south edge
order: 0
pos: SF
wall_normal: S
# Reset mean density to rho_norm on the specified face after each step.
# Prevents slow pressure drift caused by mass conservation error at outlets.
# Empty list by default (no normalisation).
rho_normalization:
- pos: W # Face to compute the mean over. Options: N, S, W, E, F, B
rho_norm: 1.0 # Target mean density. Defaults to 1
# Synthetic Eddy Method - generates turbulent inflow at the inlet (x = 0).
# Requires the inlet face (W) to NOT have a UniformFlow BC assigned.
SEM:
eddies:
# Integral length scale used for eddy size [x, y, z] in lattice units.
# Larger values produce bigger, lower-frequency structures.
lengthscale: { x: 14, y: 14, z: 14 }
# Volumetric eddy density: n_eddies = density × (SEM_volume / eddy_volume).
# Higher values improve turbulence isotropy at the cost of more compute.
eddies_vol_density: 10
# Seed for the random number generator that initialises eddy positions.
# 0 = non-reproducible; any other integer gives a reproducible field.
seed_rand: 0
# Bounding box [y, z] for the eddy generation volume at the inlet.
# x extent is set automatically to 2 × lengthscale.x.
domain_limits_yz:
start: [16, 0] # [y_min, z_min] in lattice units
end: [48, 96] # [y_max, z_max] in lattice units
profile:
# CSV file containing the mean velocity and Reynolds stress tensor
# profile as a function of height z.
csv_profile_data: "fixture/SEM/example/real_profile.csv"
# Constant offset added to all z values when reading the profile.
# Shifts the profile vertically relative to the domain.
z_offset: 0
# Turbulence intensity tuning constant K.
# 1.0 matches the target turbulence intensity exactly.
K: 1
# Optional CSV with z height values that vary along the y direction.
# Used when the terrain height is not uniform across the inlet width.
csv_y_height: "fixture/SEM/example/y_heights.csv"
# Scale factor applied to all z (height) coordinates in the profile CSV
# and in csv_y_height. Use to convert from metres to lattice units.
# Defaults to 1
length_mul: 1
# Scale factor applied to all velocity values in the profile CSV.
# Reynolds stresses are scaled by vel_mul^2.
# Defaults to 1
vel_mul: 1
# ----------------------------------------------------------------
# Debug
# ----------------------------------------------------------------
# Developer flags - leave all values at their defaults for production runs.
debug:
IBM: { no_force_spread: false, no_nodes_export: false }
# no_force_spread: true skips IBM force spreading (diagnostics only)
# no_nodes_export: true suppresses IBM node export even if configured
LBM: { collision_only: false, no_macrs_export: false, streaming_only: false }
# collision_only: true runs only the collision step (no streaming)
# no_macrs_export: true suppresses all macroscopic field output
# streaming_only: true runs only the streaming step (no collision)
code_generation: { load_generated_files: false }
# load_generated_files: true reuses previously generated CUDA source files
# instead of regenerating - never use in production
multiblock: { export_comm_vtk: false, run_communication: true }
# export_comm_vtk: true writes block-communication debug VTK files
# run_communication: false skips multiblock communication (diagnostics)
output_IBM_nodes: false # True to print IBM node data to stdout
output_only: false # True to run export routines without advancing the LBM
profile: false # True to enable CUDA profiling markers