Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2026-03-28 07:46:08

0001 #!/usr/bin/env python3
0002 
0003 # Copyright (c) 2025 ACTS-Project
0004 # This file is part of ACTS.
0005 # See LICENSE for details.
0006 
0007 import argparse
0008 import time
0009 from pathlib import Path
0010 
0011 import acts
0012 import matplotlib.pyplot as plt
0013 import numpy as np
0014 from acts import MagneticFieldContext
0015 from matplotlib.colors import LogNorm
0016 from mpl_toolkits.axes_grid1 import make_axes_locatable
0017 
0018 
0019 def create_toroidal_field():
0020     """Create a toroidal field with default configuration"""
0021     config = acts.ToroidField.Config()
0022     return acts.ToroidField(config)
0023 
0024 
0025 def benchmark_single_evaluations(field, num_evaluations=10000):
0026     """
0027     Benchmark single magnetic field evaluations at random points.
0028 
0029     This function tests the performance of individual field evaluations across
0030     a representative sample of detector geometry positions. It measures the
0031     time required for single getField() calls and provides statistics on
0032     evaluation speed.
0033     """
0034     print("\n=== Single Field Evaluation Benchmark ===")
0035     print(f"Number of evaluations: {num_evaluations}")
0036 
0037     np.random.seed(42)  # For reproducible results
0038 
0039     # Generate points in cylindrical coordinates, then convert
0040     r = np.random.uniform(0.1, 5.0, num_evaluations)  # 0.1m to 5m radius
0041     phi = np.random.uniform(0, 2 * np.pi, num_evaluations)
0042     z = np.random.uniform(-10.0, 10.0, num_evaluations)  # -10m to +10m in z
0043 
0044     x = r * np.cos(phi)
0045     y = r * np.sin(phi)
0046 
0047     positions = np.column_stack((x, y, z))
0048 
0049     # Warm up - evaluate a few fields first
0050     ctx = MagneticFieldContext()
0051     cache = field.makeCache(ctx)
0052     for i in range(10):
0053         pos = acts.Vector3(positions[i])
0054         field.getField(pos, cache)
0055 
0056     # Benchmark single evaluations
0057     start_time = time.perf_counter()
0058 
0059     for i in range(num_evaluations):
0060         pos = acts.Vector3(positions[i])
0061         field.getField(pos, cache)
0062 
0063     end_time = time.perf_counter()
0064     total_time = end_time - start_time
0065 
0066     avg_time_per_eval = total_time / num_evaluations
0067     evaluations_per_second = num_evaluations / total_time
0068 
0069     print(f"Total time: {total_time:.4f} seconds")
0070     print(f"Average time per evaluation: " f"{avg_time_per_eval*1e6:.2f} microseconds")
0071     print(f"Evaluations per second: {evaluations_per_second:.0f}")
0072 
0073     return avg_time_per_eval, evaluations_per_second
0074 
0075 
0076 def benchmark_vectorized_evaluations(field, batch_sizes=None):
0077     """Benchmark magnetic field evaluations with different batch sizes"""
0078     if batch_sizes is None:
0079         batch_sizes = [1, 10, 100, 1000, 10000]
0080     print("\n=== Vectorized Field Evaluation Benchmark ===")
0081 
0082     results = []
0083 
0084     for batch_size in batch_sizes:
0085         print(f"\nBatch size: {batch_size}")
0086 
0087         # Generate random test points for this batch size
0088         np.random.seed(42)
0089         r = np.random.uniform(0.1, 5.0, batch_size)
0090         phi = np.random.uniform(0, 2 * np.pi, batch_size)
0091         z = np.random.uniform(-10.0, 10.0, batch_size)
0092 
0093         x = r * np.cos(phi)
0094         y = r * np.sin(phi)
0095         positions = np.column_stack((x, y, z))
0096 
0097         # Create cache for this batch
0098         ctx = MagneticFieldContext()
0099         cache = field.makeCache(ctx)
0100 
0101         # Warm up
0102         for i in range(min(10, batch_size)):
0103             pos = acts.Vector3(positions[i])
0104             field.getField(pos, cache)
0105 
0106         # Benchmark this batch size
0107         # Adjust iterations based on batch size
0108         num_iterations = max(1, 1000 // batch_size)
0109 
0110         start_time = time.perf_counter()
0111 
0112         for _iteration in range(num_iterations):
0113             for i in range(batch_size):
0114                 pos = acts.Vector3(positions[i])
0115                 field.getField(pos, cache)
0116 
0117         end_time = time.perf_counter()
0118 
0119         total_evaluations = num_iterations * batch_size
0120         total_time = end_time - start_time
0121         avg_time_per_eval = total_time / total_evaluations
0122         evaluations_per_second = total_evaluations / total_time
0123 
0124         print(f"  Total evaluations: {total_evaluations}")
0125         print(f"  Total time: " f"{total_time:.4f} seconds")
0126         print(
0127             f"  Average time per evaluation: "
0128             f"{avg_time_per_eval*1e6:.2f} microseconds"
0129         )
0130         print(f"  Evaluations per second: {evaluations_per_second:.0f}")
0131 
0132         results.append(
0133             {
0134                 "batch_size": batch_size,
0135                 "avg_time_us": avg_time_per_eval * 1e6,
0136                 "eval_per_sec": evaluations_per_second,
0137                 "total_evaluations": total_evaluations,
0138             }
0139         )
0140 
0141     return results
0142 
0143 
0144 def benchmark_spatial_distribution(field, grid_resolution=50):
0145     """Benchmark field evaluation across different spatial regions"""
0146     print("\n=== Spatial Distribution Benchmark ===")
0147     print(f"Grid resolution: {grid_resolution}x{grid_resolution} points")
0148 
0149     # Test different spatial regions
0150     regions = [
0151         {"name": "Barrel Toroid", "r_range": (1.0, 3.0), "z_range": (-2.0, 2.0)},
0152         {"name": "Forward Endcap", "r_range": (0.5, 4.0), "z_range": (2.0, 8.0)},
0153         {"name": "Backward Endcap", "r_range": (0.5, 4.0), "z_range": (-8.0, -2.0)},
0154         {"name": "Central", "r_range": (0.1, 1.0), "z_range": (-1.0, 1.0)},
0155     ]
0156 
0157     region_results = []
0158 
0159     for region in regions:
0160         print(f"\n--- {region['name']} Region ---")
0161 
0162         # Generate grid points in this region
0163         r_min, r_max = region["r_range"]
0164         z_min, z_max = region["z_range"]
0165 
0166         r_vals = np.linspace(r_min, r_max, grid_resolution)
0167         z_vals = np.linspace(z_min, z_max, grid_resolution)
0168 
0169         # Create cache for this region
0170         ctx = MagneticFieldContext()
0171         cache = field.makeCache(ctx)
0172 
0173         times = []
0174         field_magnitudes = []
0175 
0176         start_time = time.perf_counter()
0177 
0178         for r in r_vals:
0179             for z in z_vals:
0180                 # Use phi=0 for consistency
0181                 x = r
0182                 y = 0.0
0183 
0184                 pos = acts.Vector3(x, y, z)
0185                 eval_start = time.perf_counter()
0186                 b_field = field.getField(pos, cache)
0187                 eval_end = time.perf_counter()
0188 
0189                 times.append(eval_end - eval_start)
0190                 field_magnitudes.append(
0191                     np.sqrt(b_field[0] ** 2 + b_field[1] ** 2 + b_field[2] ** 2)
0192                 )
0193 
0194         end_time = time.perf_counter()
0195 
0196         total_evaluations = len(times)
0197         total_time = end_time - start_time
0198         avg_time = np.mean(times)
0199         std_time = np.std(times)
0200         avg_field_magnitude = np.mean(field_magnitudes)
0201 
0202         print(f"  Total evaluations: {total_evaluations}")
0203         print(f"  Total time: " f"{total_time:.4f} seconds")
0204         print(
0205             f"  Average time per evaluation: "
0206             f"{avg_time*1e6:.2f} ± {std_time*1e6:.2f} microseconds"
0207         )
0208         print(f"  Average field magnitude: " f"{avg_field_magnitude:.4f} Tesla")
0209         print(f"  Evaluations per second: " f"{total_evaluations/total_time:.0f}")
0210 
0211         region_results.append(
0212             {
0213                 "region": region["name"],
0214                 "total_evaluations": total_evaluations,
0215                 "avg_time_us": avg_time * 1e6,
0216                 "std_time_us": std_time * 1e6,
0217                 "avg_field_magnitude": avg_field_magnitude,
0218                 "eval_per_sec": total_evaluations / total_time,
0219             }
0220         )
0221 
0222     return region_results
0223 
0224 
0225 def plot_benchmark_results(batch_results, region_results, output_dir):
0226     """Create plots showing benchmark results"""
0227     output_dir = Path(output_dir)
0228     output_dir.mkdir(exist_ok=True)
0229 
0230     # Plot 1: Batch size performance
0231     fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
0232 
0233     batch_sizes = [r["batch_size"] for r in batch_results]
0234     avg_times = [r["avg_time_us"] for r in batch_results]
0235     eval_rates = [r["eval_per_sec"] for r in batch_results]
0236 
0237     ax1.semilogx(batch_sizes, avg_times, "o-", linewidth=2, markersize=8)
0238     ax1.set_xlabel("Batch Size")
0239     ax1.set_ylabel("Average Time per Evaluation (μs)")
0240     ax1.set_title("Evaluation Time vs Batch Size")
0241     ax1.grid(True, alpha=0.3)
0242 
0243     ax2.semilogx(
0244         batch_sizes, eval_rates, "s-", linewidth=2, markersize=8, color="orange"
0245     )
0246     ax2.set_xlabel("Batch Size")
0247     ax2.set_ylabel("Evaluations per Second")
0248     ax2.set_title("Evaluation Rate vs Batch Size")
0249     ax2.grid(True, alpha=0.3)
0250 
0251     plt.tight_layout()
0252     plt.savefig(output_dir / "batch_performance.png", dpi=150, bbox_inches="tight")
0253     plt.close()
0254 
0255     # Plot 2: Regional performance
0256     fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
0257 
0258     regions = [r["region"] for r in region_results]
0259     region_times = [r["avg_time_us"] for r in region_results]
0260     region_rates = [r["eval_per_sec"] for r in region_results]
0261 
0262     bars1 = ax1.bar(
0263         regions, region_times, color=["skyblue", "lightgreen", "lightcoral", "gold"]
0264     )
0265     ax1.set_ylabel("Average Time per Evaluation (μs)")
0266     ax1.set_title("Evaluation Time by Region")
0267     ax1.tick_params(axis="x", rotation=45)
0268 
0269     # Add value labels on bars
0270     for bar, time_val in zip(bars1, region_times):
0271         height = bar.get_height()
0272         ax1.text(
0273             bar.get_x() + bar.get_width() / 2.0,
0274             height + height * 0.01,
0275             f"{time_val:.1f}μs",
0276             ha="center",
0277             va="bottom",
0278         )
0279 
0280     bars2 = ax2.bar(
0281         regions, region_rates, color=["skyblue", "lightgreen", "lightcoral", "gold"]
0282     )
0283     ax2.set_ylabel("Evaluations per Second")
0284     ax2.set_title("Evaluation Rate by Region")
0285     ax2.tick_params(axis="x", rotation=45)
0286 
0287     # Add value labels on bars
0288     for bar, rate_val in zip(bars2, region_rates):
0289         height = bar.get_height()
0290         ax2.text(
0291             bar.get_x() + bar.get_width() / 2.0,
0292             height + height * 0.01,
0293             f"{rate_val:.0f}/s",
0294             ha="center",
0295             va="bottom",
0296         )
0297 
0298     plt.tight_layout()
0299     plt.savefig(output_dir / "regional_performance.png", dpi=150, bbox_inches="tight")
0300     plt.close()
0301 
0302     print(f"\nPlots saved to {output_dir}/")
0303 
0304 
0305 def _eval_field_batch(field, points):
0306     """Evaluate B-field for an array of points (N,3) -> (N,3)."""
0307     ctx = MagneticFieldContext()
0308     cache = field.makeCache(ctx)
0309     out = np.empty_like(points, dtype=np.float64)
0310     for i in range(points.shape[0]):
0311         b_field = field.getField(acts.Vector3(points[i]), cache)
0312         out[i, 0] = b_field[0]
0313         out[i, 1] = b_field[1]
0314         out[i, 2] = b_field[2]
0315     return out
0316 
0317 
0318 def plot_field_maps(
0319     field,
0320     output_dir,
0321     # XY slice params
0322     xy_z_plane=0.20,
0323     xy_xlim=(-10.0, 10.0),
0324     xy_ylim=(-10.0, 10.0),
0325     xy_nx=520,
0326     xy_ny=520,
0327     # ZX slice params
0328     zx_y_plane=0.10,
0329     zx_zlim=(-22.0, 22.0),
0330     zx_xlim=(-11.0, 11.0),
0331     zx_nz=560,
0332     zx_nx=560,
0333     # Viz params
0334     log_vmin=1e-4,
0335     log_vmax=4.1,
0336     quiver_stride_xy=28,  # ~ how many arrows across
0337     quiver_stride_zx=28,
0338 ):
0339     """
0340     Produce two figures:
0341       1) XY slice at fixed z=xy_z_plane
0342       2) ZX slice at fixed y=zx_y_plane
0343     Saved to output_dir as field_xy.png and field_zx.png
0344     """
0345     output_dir = Path(output_dir)
0346     output_dir.mkdir(exist_ok=True)
0347 
0348     # ---------- XY slice ----------
0349     x = np.linspace(xy_xlim[0], xy_xlim[1], int(max(60, xy_nx)), dtype=np.float64)
0350     y = np.linspace(xy_ylim[0], xy_ylim[1], int(max(60, xy_ny)), dtype=np.float64)
0351     X, Y = np.meshgrid(x, y, indexing="xy")
0352     Z = np.full_like(X, float(xy_z_plane), dtype=np.float64)
0353 
0354     pts_xy = np.column_stack([X.ravel(), Y.ravel(), Z.ravel()])
0355     B_xy = _eval_field_batch(field, pts_xy).reshape(*X.shape, 3)
0356     Bx, By, Bz = B_xy[..., 0], B_xy[..., 1], B_xy[..., 2]
0357     Bmag = np.sqrt(Bx**2 + By**2 + Bz**2, dtype=np.float64)
0358 
0359     Bpos = np.clip(Bmag, log_vmin * 1e-2, None)  # avoid zeros on log scale
0360     norm = LogNorm(vmin=float(log_vmin), vmax=float(log_vmax))
0361 
0362     fig, ax = plt.subplots(figsize=(8.8, 8.8))
0363     im = ax.imshow(
0364         Bpos,
0365         extent=[x.min(), x.max(), y.min(), y.max()],
0366         origin="lower",
0367         aspect="equal",
0368         norm=norm,
0369         cmap="gnuplot2",
0370     )
0371     cbar = plt.colorbar(im, ax=ax, fraction=0.046, pad=0.04)
0372     cbar.set_label("|B| [T] (log scale)")
0373 
0374     # Quiver (unit-length for direction)
0375     step = max(1, X.shape[1] // quiver_stride_xy)
0376     Bsafe = np.where(Bmag > 0, Bmag, np.inf)
0377     Ux = Bx / Bsafe
0378     Uy = By / Bsafe
0379     ax.quiver(
0380         X[::step, ::step],
0381         Y[::step, ::step],
0382         Ux[::step, ::step],
0383         Uy[::step, ::step],
0384         scale=28,
0385         width=0.003,
0386     )
0387 
0388     ax.set_xlim(xy_xlim)
0389     ax.set_ylim(xy_ylim)
0390     ax.set_xlabel("x [m]")
0391     ax.set_ylabel("y [m]")
0392     ax.set_title(f"|B| in z = {float(xy_z_plane):.2f} m plane")
0393     plt.tight_layout()
0394     (output_dir / "field_xy.png").unlink(missing_ok=True)
0395     plt.savefig(output_dir / "field_xy.png", dpi=150, bbox_inches="tight")
0396     plt.close()
0397 
0398     # ---------- ZX slice (Z horizontal, X vertical) ----------
0399     z = np.linspace(zx_zlim[0], zx_zlim[1], int(max(60, zx_nz)), dtype=np.float64)
0400     x = np.linspace(zx_xlim[0], zx_xlim[1], int(max(60, zx_nx)), dtype=np.float64)
0401     Z, Xg = np.meshgrid(z, x, indexing="xy")
0402     Y = np.full_like(Xg, float(zx_y_plane), dtype=np.float64)
0403 
0404     pts_zx = np.column_stack([Xg.ravel(), Y.ravel(), Z.ravel()])
0405     B_zx = _eval_field_batch(field, pts_zx).reshape(*Xg.shape, 3)
0406     Bx, By, Bz = B_zx[..., 0], B_zx[..., 1], B_zx[..., 2]
0407     Bmag = np.sqrt(Bx**2 + By**2 + Bz**2, dtype=np.float64)
0408 
0409     Bpos = np.clip(Bmag, log_vmin * 1e-2, None)
0410     norm = LogNorm(vmin=float(log_vmin), vmax=float(log_vmax))
0411 
0412     fig, ax = plt.subplots(figsize=(10, 10))
0413     im = ax.imshow(
0414         Bpos,
0415         extent=[z.min(), z.max(), x.min(), x.max()],
0416         origin="lower",
0417         aspect="equal",
0418         norm=norm,
0419         cmap="gnuplot2",
0420     )
0421     divider = make_axes_locatable(ax)
0422     cax = divider.append_axes("right", size="5%", pad=0.10)
0423     cbar = plt.colorbar(im, cax=cax)
0424     cbar.set_label("|B| [T] (log scale)")
0425 
0426     # Quiver (projected in z–x plane)
0427     step = max(1, Z.shape[1] // quiver_stride_zx)
0428     Bsafe = np.where(Bmag > 0, Bmag, np.inf)
0429     Uz = Bz / Bsafe
0430     Ux = Bx / Bsafe
0431     ax.quiver(
0432         Z[::step, ::step],
0433         Xg[::step, ::step],
0434         Uz[::step, ::step],
0435         Ux[::step, ::step],
0436         scale=28,
0437         width=0.003,
0438     )
0439 
0440     ax.set_xlim(zx_zlim)
0441     ax.set_ylim(zx_xlim)
0442     ax.set_xlabel("z [m]")
0443     ax.set_ylabel("x [m]")
0444     ax.set_title(f"|B| in y = {float(zx_y_plane):.2f} m plane")
0445     plt.tight_layout()
0446     (output_dir / "field_zx.png").unlink(missing_ok=True)
0447     plt.savefig(output_dir / "field_zx.png", dpi=150, bbox_inches="tight")
0448     plt.close()
0449 
0450     print(
0451         f"\nSaved field maps to: {output_dir}/field_xy.png and {output_dir}/field_zx.png"
0452     )
0453 
0454 
0455 def main():
0456     parser = argparse.ArgumentParser(
0457         description="Benchmark toroidal magnetic field performance"
0458     )
0459     parser.add_argument(
0460         "--num-evaluations",
0461         type=int,
0462         default=10000,
0463         help="Number of evaluations for single benchmark (default: 10000)",
0464     )
0465     parser.add_argument(
0466         "--batch-sizes",
0467         type=int,
0468         nargs="+",
0469         default=[1, 10, 100, 1000, 10000],
0470         help="Batch sizes to test (default: 1 10 100 1000 10000)",
0471     )
0472     parser.add_argument(
0473         "--grid-resolution",
0474         type=int,
0475         default=50,
0476         help="Grid resolution for spatial benchmark (default: 50)",
0477     )
0478     parser.add_argument(
0479         "--output-dir",
0480         type=str,
0481         default="toroidal_field_benchmark",
0482         help="Output directory for results (default: toroidal_field_benchmark)",
0483     )
0484     parser.add_argument(
0485         "--skip-plots", action="store_true", help="Skip generating performance plots"
0486     )
0487     parser.add_argument(
0488         "--plot-field",
0489         action="store_true",
0490         help="Also compute and save XY/ZX field maps",
0491     )
0492 
0493     # XY slice controls
0494     parser.add_argument(
0495         "--xy-z-plane",
0496         type=float,
0497         default=0.20,
0498         help="z (m) for XY slice (default: 0.20)",
0499     )
0500     parser.add_argument(
0501         "--xy-xlim",
0502         type=float,
0503         nargs=2,
0504         default=(-10.0, 10.0),
0505         help="x limits for XY slice (m) (default: -10 10)",
0506     )
0507     parser.add_argument(
0508         "--xy-ylim",
0509         type=float,
0510         nargs=2,
0511         default=(-10.0, 10.0),
0512         help="y limits for XY slice (m) (default: -10 10)",
0513     )
0514     parser.add_argument(
0515         "--xy-nx", type=int, default=520, help="grid Nx for XY slice (default: 520)"
0516     )
0517     parser.add_argument(
0518         "--xy-ny", type=int, default=520, help="grid Ny for XY slice (default: 520)"
0519     )
0520 
0521     # ZX slice controls
0522     parser.add_argument(
0523         "--zx-y-plane",
0524         type=float,
0525         default=0.10,
0526         help="y (m) for ZX slice (default: 0.10)",
0527     )
0528     parser.add_argument(
0529         "--zx-zlim",
0530         type=float,
0531         nargs=2,
0532         default=(-22.0, 22.0),
0533         help="z limits for ZX slice (m) (default: -22 22)",
0534     )
0535     parser.add_argument(
0536         "--zx-xlim",
0537         type=float,
0538         nargs=2,
0539         default=(-11.0, 11.0),
0540         help="x limits for ZX slice (m) (default: -11 11)",
0541     )
0542     parser.add_argument(
0543         "--zx-nz", type=int, default=560, help="grid Nz for ZX slice (default: 560)"
0544     )
0545     parser.add_argument(
0546         "--zx-nx", type=int, default=560, help="grid Nx for ZX slice (default: 560)"
0547     )
0548 
0549     # Color/arrow controls
0550     parser.add_argument(
0551         "--log-vmin",
0552         type=float,
0553         default=1e-4,
0554         help="LogNorm vmin for |B| (T) (default: 1e-4)",
0555     )
0556     parser.add_argument(
0557         "--log-vmax",
0558         type=float,
0559         default=4.1,
0560         help="LogNorm vmax for |B| (T) (default: 4.1)",
0561     )
0562     parser.add_argument(
0563         "--quiver-stride-xy",
0564         type=int,
0565         default=28,
0566         help="Quiver density for XY (default: 28)",
0567     )
0568     parser.add_argument(
0569         "--quiver-stride-zx",
0570         type=int,
0571         default=28,
0572         help="Quiver density for ZX (default: 28)",
0573     )
0574 
0575     args = parser.parse_args()
0576 
0577     print("=== Toroid Magnetic Field Benchmark ===")
0578     print(f"ACTS version: {acts.version}")
0579 
0580     # Create toroidal field
0581     print("\nCreating toroidal field...")
0582     field = create_toroidal_field()
0583     print("✓ Toroid field created successfully")
0584 
0585     # Run benchmarks
0586     try:
0587         # Single evaluation benchmark
0588         single_time, single_rate = benchmark_single_evaluations(
0589             field, args.num_evaluations
0590         )
0591 
0592         # Batch evaluation benchmark
0593         batch_results = benchmark_vectorized_evaluations(field, args.batch_sizes)
0594 
0595         # Spatial distribution benchmark
0596         region_results = benchmark_spatial_distribution(field, args.grid_resolution)
0597 
0598         # Print summary
0599         print("\n=== Benchmark Summary ===")
0600         print(
0601             f"Single evaluation average: {single_time*1e6:.2f} μs ({single_rate:.0f} eval/s)"
0602         )
0603         print(
0604             f"Best batch performance: {min(r['avg_time_us'] for r in batch_results):.2f} μs"
0605         )
0606         print(
0607             f"Fastest region: {min(region_results, key=lambda x: x['avg_time_us'])['region']}"
0608         )
0609         print(
0610             f"Slowest region: {max(region_results, key=lambda x: x['avg_time_us'])['region']}"
0611         )
0612 
0613         # Generate plots if requested
0614         if not args.skip_plots:
0615             try:
0616                 plot_benchmark_results(batch_results, region_results, args.output_dir)
0617             except ImportError:
0618                 print("\nWarning: matplotlib not available, skipping performance plots")
0619 
0620         # Field maps (XY, ZX) using ACTS field.getField
0621         if args.plot_field:
0622             try:
0623                 plot_field_maps(
0624                     field,
0625                     output_dir=args.output_dir,
0626                     xy_z_plane=args.xy_z_plane,
0627                     xy_xlim=tuple(args.xy_xlim),
0628                     xy_ylim=tuple(args.xy_ylim),
0629                     xy_nx=args.xy_nx,
0630                     xy_ny=args.xy_ny,
0631                     zx_y_plane=args.zx_y_plane,
0632                     zx_zlim=tuple(args.zx_zlim),
0633                     zx_xlim=tuple(args.zx_xlim),
0634                     zx_nz=args.zx_nz,
0635                     zx_nx=args.zx_nx,
0636                     log_vmin=args.log_vmin,
0637                     log_vmax=args.log_vmax,
0638                     quiver_stride_xy=args.quiver_stride_xy,
0639                     quiver_stride_zx=args.quiver_stride_zx,
0640                 )
0641             except ImportError:
0642                 print(
0643                     "\nWarning: matplotlib (extras) not available, skipping field maps"
0644                 )
0645 
0646         print("\n✓ Benchmark completed successfully!")
0647 
0648     except Exception as e:
0649         print(f"\n❌ Benchmark failed: {e}")
0650         return 1
0651 
0652     return 0
0653 
0654 
0655 if __name__ == "__main__":
0656     exit(main())