From 31a4d043ac03978f582eaa7f4add5388bfce46ac Mon Sep 17 00:00:00 2001 From: davidwalter2 Date: Tue, 6 Jan 2026 15:16:20 -0500 Subject: [PATCH 1/6] Allow to use externalPostfit as starting point for fit; profile beta parameters after loading externalPostfit --- bin/rabbit_fit.py | 52 ++++++++++++++++++++++++----------------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/bin/rabbit_fit.py b/bin/rabbit_fit.py index 58d7170..968c396 100755 --- a/bin/rabbit_fit.py +++ b/bin/rabbit_fit.py @@ -276,41 +276,43 @@ def fit(args, fitter, ws, dofit=True): if args.externalPostfit is not None: fitter.load_fitresult(args.externalPostfit, args.externalPostfitResult) - else: - if dofit: - cb = fitter.minimize() - if cb is not None: - ws.add_loss_time_hist(cb.loss_history, cb.time_history) + if fitter.binByBinStat: + fitter._profile_beta() - if not args.noHessian: - # compute the covariance matrix and estimated distance to minimum + if dofit: + cb = fitter.minimize() - val, grad, hess = fitter.loss_val_grad_hess() - edmval, cov = edmval_cov(grad, hess) + if cb is not None: + ws.add_loss_time_hist(cb.loss_history, cb.time_history) - logger.info(f"edmval: {edmval}") + if not args.noHessian: + # compute the covariance matrix and estimated distance to minimum + + val, grad, hess = fitter.loss_val_grad_hess() + edmval, cov = edmval_cov(grad, hess) + logger.info(f"edmval: {edmval}") - fitter.cov.assign(cov) + fitter.cov.assign(cov) - del cov + del cov - if fitter.binByBinStat and fitter.diagnostics: - # This is the estimated distance to minimum with respect to variations of - # the implicit binByBinStat nuisances beta at fixed parameter values. - # It should be near-zero by construction as long as the analytic profiling is - # correct - _, gradbeta, hessbeta = fitter.loss_val_grad_hess_beta() - edmvalbeta, covbeta = edmval_cov(gradbeta, hessbeta) - logger.info(f"edmvalbeta: {edmvalbeta}") + if fitter.binByBinStat and fitter.diagnostics: + # This is the estimated distance to minimum with respect to variations of + # the implicit binByBinStat nuisances beta at fixed parameter values. + # It should be near-zero by construction as long as the analytic profiling is + # correct + _, gradbeta, hessbeta = fitter.loss_val_grad_hess_beta() + edmvalbeta, covbeta = edmval_cov(gradbeta, hessbeta) + logger.info(f"edmvalbeta: {edmvalbeta}") - if args.doImpacts: - ws.add_impacts_hists(*fitter.impacts_parms(hess)) + if args.doImpacts: + ws.add_impacts_hists(*fitter.impacts_parms(hess)) - del hess + del hess - if args.globalImpacts: - ws.add_global_impacts_hists(*fitter.global_impacts_parms()) + if args.globalImpacts: + ws.add_global_impacts_hists(*fitter.global_impacts_parms()) nllvalreduced = fitter.reduced_nll().numpy() From 84bec047df46338c1b509aba85166461d38515ce Mon Sep 17 00:00:00 2001 From: davidwalter2 Date: Tue, 6 Jan 2026 15:20:29 -0500 Subject: [PATCH 2/6] Fix feezing of constraint parameters; using freezing functionality in likelihood scans --- bin/rabbit_plot_likelihood_scan.py | 63 ++++---- rabbit/fitter.py | 241 ++++++++++++----------------- 2 files changed, 125 insertions(+), 179 deletions(-) diff --git a/bin/rabbit_plot_likelihood_scan.py b/bin/rabbit_plot_likelihood_scan.py index a6a4752..a052db1 100755 --- a/bin/rabbit_plot_likelihood_scan.py +++ b/bin/rabbit_plot_likelihood_scan.py @@ -1,9 +1,7 @@ #!/usr/bin/env python3 import argparse -import os -import matplotlib.pyplot as plt import mplhep as hep import numpy as np @@ -16,32 +14,6 @@ logger = None -def writeOutput(fig, outfile, extensions=[], postfix=None, args=None, meta_info=None): - name, _ = os.path.splitext(outfile) - - if postfix: - name += f"_{postfix}" - - for ext in extensions: - if ext[0] != ".": - ext = "." + ext - output = name + ext - print(f"Write output file {output}") - plt.savefig(output) - - output = name.rsplit("/", 1) - output[1] = os.path.splitext(output[1])[0] - if len(output) == 1: - output = (None, *output) - if args is None and meta_info is None: - return - output_tools.write_logfile( - *output, - args=args, - meta_info=meta_info, - ) - - def parseArgs(): parser = argparse.ArgumentParser() parser.add_argument( @@ -73,6 +45,11 @@ def parseArgs(): default="./test", help="Folder path for output", ) + parser.add_argument( + "--eoscp", + action="store_true", + help="Override use of xrdcp and use the mount instead", + ) parser.add_argument( "-p", "--postfix", type=str, help="Postfix for output file name" ) @@ -113,6 +90,13 @@ def parseArgs(): default=None, help="x axis limits", ) + parser.add_argument( + "--ylim", + type=float, + nargs=2, + default=None, + help="y axis limits", + ) parser.add_argument("--titlePos", type=int, default=2, help="title position") parser.add_argument( "--config", @@ -133,6 +117,7 @@ def plot_scan( subtitle=None, titlePos=0, xlim=None, + ylim=None, combine=None, ylabel=r"$-2\,\Delta \log L$", config={}, @@ -148,13 +133,15 @@ def plot_scan( if xlim is None: xlim = (min(x), max(x)) + if ylim is None: + ylim = (min(y), max(y)) fig, ax = plot_tools.figure( x, xlabel, ylabel, xlim=xlim, - ylim=(min(y), max(y)), # logy=args.logy + ylim=ylim, # logy=args.logy ) ax.axhline(y=1, color="gray", linestyle="--", alpha=0.5) @@ -227,6 +214,7 @@ def plot_scan( def main(): args = parseArgs() + outdir = output_tools.make_plot_dir(args.outpath, eoscp=args.eoscp) global logger logger = logging.setup_logger(__file__, args.verbose, args.noColorLogger) @@ -280,19 +268,20 @@ def main(): subtitle=args.subtitle, titlePos=args.titlePos, xlim=args.xlim, + ylim=args.ylim, config=config, combine=(vals, nlls) if args.combine is not None else None, no_hessian=args.noHessian, ) - os.makedirs(args.outpath, exist_ok=True) - outfile = os.path.join(args.outpath, f"nll_scan_{param}") - writeOutput( - fig, - outfile=outfile, - extensions=["png", "pdf"], - meta_info=meta, + + to_join = [f"nll_scan_{param}", args.postfix] + outfile = "_".join(filter(lambda x: x, to_join)) + plot_tools.save_pdf_and_png(outdir, outfile) + output_tools.write_index_and_log( + outdir, + outfile, + analysis_meta_info=meta, args=args, - postfix=args.postfix, ) diff --git a/rabbit/fitter.py b/rabbit/fitter.py index faba62a..967f883 100644 --- a/rabbit/fitter.py +++ b/rabbit/fitter.py @@ -45,9 +45,7 @@ def __init__(self, xv): def __call__(self, intermediate_result): loss = intermediate_result.fun - logger.debug( - f"Iteration {self.iiter}: loss value {loss}" - ) # ; status {intermediate_result.status}") + logger.debug(f"Iteration {self.iiter}: loss value {loss}") if np.isnan(loss): raise ValueError(f"Loss value is NaN at iteration {self.iiter}") @@ -404,7 +402,14 @@ def set_blinding_offsets(self, blind=True): ) def get_blinded_theta(self): - theta = self.x[self.poi_model.npoi :] + theta = self.x[self.poi_model.npoi : self.poi_model.npoi + self.indata.nsyst] + theta = tf.where( + self.frozen_params_mask[ + self.poi_model.npoi : self.poi_model.npoi + self.indata.nsyst + ], + tf.stop_gradient(theta), + theta, + ) if self.do_blinding: return theta + self._blinding_offsets_theta else: @@ -416,6 +421,9 @@ def get_blinded_poi(self): poi = xpoi else: poi = tf.square(xpoi) + poi = tf.where( + self.frozen_params_mask[: self.poi_model.npoi], tf.stop_gradient(poi), poi + ) if self.do_blinding: return poi * self._blinding_offsets_poi else: @@ -1301,15 +1309,6 @@ def _compute_yields_noBBB(self, full=True): poi = self.get_blinded_poi() theta = self.get_blinded_theta() - poi = tf.where( - self.frozen_params_mask[: self.poi_model.npoi], tf.stop_gradient(poi), poi - ) - theta = tf.where( - self.frozen_params_mask[self.poi_model.npoi :], - tf.stop_gradient(theta), - theta, - ) - rnorm = self.poi_model.compute(poi) normcentral = None @@ -1941,14 +1940,12 @@ def _compute_lbeta(self, beta, full_nll=False): return None def _compute_nll_components(self, profile=True, full_nll=False): - nexpfullcentral, _, beta = self._compute_yields_with_beta( + nexp, _, beta = self._compute_yields_with_beta( profile=profile, compute_norm=False, full=False, ) - nexp = nexpfullcentral - if self.chisqFit: ln = 0.5 * tf.reduce_sum((nexp - self.nobs) ** 2 / self.varnobs, axis=-1) elif self.covarianceFit: @@ -2065,6 +2062,68 @@ def loss_val_grad_hess_beta(self, profile=True): return val, grad, hess + def fit(self): + def scipy_loss(xval): + self.x.assign(xval) + val, grad = self.loss_val_grad() + return val.__array__(), grad.__array__() + + def scipy_hessp(xval, pval): + self.x.assign(xval) + p = tf.convert_to_tensor(pval) + val, grad, hessp = self.loss_val_grad_hessp(p) + return hessp.__array__() + + def scipy_hess(xval): + self.x.assign(xval) + val, grad, hess = self.loss_val_grad_hess() + if self.diagnostics: + cond_number = tfh.cond_number(hess) + logger.info(f" - Condition number: {cond_number}") + edmval = tfh.edmval(grad, hess) + logger.info(f" - edmval: {edmval}") + return hess.__array__() + + xval = self.x.numpy() + + callback = FitterCallback(self) + + if self.minimizer_method in [ + "trust-krylov", + "trust-ncg", + ]: + info_minimize = dict(hessp=scipy_hessp) + elif self.minimizer_method in [ + "trust-exact", + "dogleg", + ]: + info_minimize = dict(hess=scipy_hess) + else: + info_minimize = dict() + + try: + res = scipy.optimize.minimize( + scipy_loss, + xval, + method=self.minimizer_method, + jac=True, + tol=0.0, + callback=callback, + **info_minimize, + ) + except Exception as ex: + # minimizer could have called the loss or hessp functions with "random" values, so restore the + # state from the end of the last iteration before the exception + xval = callback.xval + logger.debug(ex) + else: + xval = res["x"] + logger.debug(res) + + self.x.assign(xval) + + return callback + def minimize(self): if self.is_linear: logger.info( @@ -2092,65 +2151,7 @@ def minimize(self): callback = None else: - - def scipy_loss(xval): - self.x.assign(xval) - val, grad = self.loss_val_grad() - return val.__array__(), grad.__array__() - - def scipy_hessp(xval, pval): - self.x.assign(xval) - p = tf.convert_to_tensor(pval) - val, grad, hessp = self.loss_val_grad_hessp(p) - return hessp.__array__() - - def scipy_hess(xval): - self.x.assign(xval) - val, grad, hess = self.loss_val_grad_hess() - if self.diagnostics: - cond_number = tfh.cond_number(hess) - logger.info(f" - Condition number: {cond_number}") - edmval = tfh.edmval(grad, hess) - logger.info(f" - edmval: {edmval}") - return hess.__array__() - - xval = self.x.numpy() - - callback = FitterCallback(xval) - - if self.minimizer_method in [ - "trust-krylov", - "trust-ncg", - ]: - info_minimize = dict(hessp=scipy_hessp) - elif self.minimizer_method in [ - "trust-exact", - "dogleg", - ]: - info_minimize = dict(hess=scipy_hess) - else: - info_minimize = dict() - - try: - res = scipy.optimize.minimize( - scipy_loss, - xval, - method=self.minimizer_method, - jac=True, - tol=0.0, - callback=callback, - **info_minimize, - ) - except Exception as ex: - # minimizer could have called the loss or hessp functions with "random" values, so restore the - # state from the end of the last iteration before the exception - xval = callback.xval - logger.debug(ex) - else: - xval = res["x"] - logger.debug(res) - - self.x.assign(xval) + callback = self.fit() # force profiling of beta with final parameter values # TODO avoid the extra calculation and jitting if possible since the relevant calculation @@ -2164,6 +2165,9 @@ def nll_scan(self, param, scan_range, scan_points, use_prefit=False): # make a likelihood scan for a single parameter # assuming the likelihood is minimized + # freeze minimize which mean to not update it in the fit + self.freeze_params(param) + idx = np.where(self.parms.astype(str) == param)[0][0] # store current state of x temporarily @@ -2189,51 +2193,28 @@ def nll_scan(self, param, scan_range, scan_points, use_prefit=False): if i == 0: continue + logger.debug(f"Now at i={i} x={ixval}") self.x.assign(tf.tensor_scatter_nd_update(self.x, [[idx]], [ixval])) - def scipy_loss(xval): - self.x.assign(xval) - val, grad = self.loss_val_grad() - grad = grad.numpy() - grad[idx] = 0 # Zero out gradient for the frozen parameter - return val.numpy(), grad - - def scipy_hessp(xval, pval): - self.x.assign(xval) - pval[idx] = ( - 0 # Ensure the perturbation does not affect frozen parameter - ) - p = tf.convert_to_tensor(pval) - val, grad, hessp = self.loss_val_grad_hessp(p) - hessp = hessp.numpy() - # TODO: worth testing modifying the loss/grad/hess functions to imply 1 - # for the corresponding hessian element instead of 0, - # since this might allow the minimizer to converge more efficiently - hessp[idx] = ( - 0 # Zero out Hessian-vector product at the frozen index - ) - return hessp - - res = scipy.optimize.minimize( - scipy_loss, - self.x, - method="trust-krylov", - jac=True, - hessp=scipy_hessp, - ) - if res["success"]: - dnlls[nscans // 2 + sign * i] = ( - self.reduced_nll().numpy() - nll_best - ) - scan_vals[nscans // 2 + sign * i] = ixval + self.fit() + + dnlls[nscans // 2 + sign * i] = self.reduced_nll().numpy() - nll_best + + scan_vals[nscans // 2 + sign * i] = ixval # reset x to original state self.x.assign(xval) + # let the parameter be free again + self.defreeze_params(param) + return scan_vals, dnlls def nll_scan2D(self, param_tuple, scan_range, scan_points, use_prefit=False): + # freeze minimize which mean to not update it in the fit + self.freeze_params(param_tuple) + idx0 = np.where(self.parms.astype(str) == param_tuple[0])[0][0] idx1 = np.where(self.parms.astype(str) == param_tuple[1])[0][0] @@ -2280,7 +2261,9 @@ def nll_scan2D(self, param_tuple, scan_range, scan_points, use_prefit=False): ix = best_fit - i iy = best_fit + j - # print(f"i={i}, j={j}, r={r} drow={drow}, dcol={dcol} | ix={ix}, iy={iy}") + logger.debug( + f"Now at (ix,iy) = ({ix},{iy}) (x,y)= ({x_scans[ix]},{y_scans[iy]})" + ) self.x.assign( tf.tensor_scatter_nd_update( @@ -2288,41 +2271,15 @@ def nll_scan2D(self, param_tuple, scan_range, scan_points, use_prefit=False): ) ) - def scipy_loss(xval): - self.x.assign(xval) - val, grad = self.loss_val_grad() - grad = grad.numpy() - grad[idx0] = 0 - grad[idx1] = 0 - return val.numpy(), grad - - def scipy_hessp(xval, pval): - self.x.assign(xval) - pval[idx0] = 0 - pval[idx1] = 0 - p = tf.convert_to_tensor(pval) - val, grad, hessp = self.loss_val_grad_hessp(p) - hessp = hessp.numpy() - hessp[idx0] = 0 - hessp[idx1] = 0 - - if np.allclose(hessp, 0, atol=1e-8): - return np.zeros_like(hessp) + self.fit() - return hessp + dnlls[ix, iy] = self.reduced_nll().numpy() - nll_best - res = scipy.optimize.minimize( - scipy_loss, - self.x, - method="trust-krylov", - jac=True, - hessp=scipy_hessp, - ) + self.x.assign(xval) - if res["success"]: - dnlls[ix, iy] = self.reduced_nll().numpy() - nll_best + # let the parameter be free again + self.defreeze_params(param_tuple) - self.x.assign(xval) return x_scans, y_scans, dnlls def contour_scan(self, param, nll_min, q=1, signs=[-1, 1], fun=None): From e9cd3270ab85d938c394e47d84aa574d628e1ec8 Mon Sep 17 00:00:00 2001 From: davidwalter2 Date: Sun, 25 Jan 2026 08:59:17 -0500 Subject: [PATCH 3/6] Add option to skip postfit profiling (e.g. only using externalPostfit); Add possibility to only load parameters, not covariance from external postfit; always draw zero line in hist plot --- bin/rabbit_fit.py | 10 ++++++++-- bin/rabbit_plot_hists.py | 11 ++++++++++- rabbit/fitter.py | 15 ++++++++++----- 3 files changed, 28 insertions(+), 8 deletions(-) diff --git a/bin/rabbit_fit.py b/bin/rabbit_fit.py index 968c396..2bb289d 100755 --- a/bin/rabbit_fit.py +++ b/bin/rabbit_fit.py @@ -149,6 +149,12 @@ def make_parser(): type=str, help="Specify result from external postfit file", ) + parser.add_argument( + "--noPostfitProfile", + default=False, + action="store_true", + help="Do not profile beta parameters in the postfit (e.g. when using --externalPostfit).", + ) parser.add_argument( "--doImpacts", default=False, @@ -333,7 +339,7 @@ def fit(args, fitter, ws, dofit=True): "nllvalreduced": nllvalreduced, "ndfsat": ndfsat, "edmval": edmval, - "postfit_profile": args.externalPostfit is None, + "postfit_profile": not args.noPostfitProfile, } ) @@ -561,7 +567,7 @@ def main(): ifitter, ws, prefit=False, - profile=args.externalPostfit is None, + profile=not args.noPostfitProfile, ) else: fit_time.append(time.time()) diff --git a/bin/rabbit_plot_hists.py b/bin/rabbit_plot_hists.py index bb66e0a..aebf7af 100755 --- a/bin/rabbit_plot_hists.py +++ b/bin/rabbit_plot_hists.py @@ -817,6 +817,7 @@ def make_plot( h_den = h_inclusive if diff: + h0 = hh.addHists(h_num, h_num, scale2=-1) h1 = hh.addHists(h_inclusive, h_den, scale2=-1) h2 = hh.addHists(h_data, h_den, scale2=-1) if h_data_stat is not None: @@ -824,6 +825,14 @@ def make_plot( h_data_stat, h_den, cutoff=cutoff, rel_unc=True ) else: + h0 = hh.divideHists( + h_num, + h_num, + cutoff=1e-8, + rel_unc=True, + flow=False, + by_ax_name=False, + ) h1 = hh.divideHists( h_inclusive, h_den, @@ -839,7 +848,7 @@ def make_plot( ) hep.histplot( - h1, + h0, histtype="step", color="grey", alpha=0.5, diff --git a/rabbit/fitter.py b/rabbit/fitter.py index 967f883..3fbc00e 100644 --- a/rabbit/fitter.py +++ b/rabbit/fitter.py @@ -286,12 +286,14 @@ def __init__( def load_fitresult(self, fitresult_file, fitresult_key): # load results from external fit and set postfit value and covariance elements for common parameters + cov_ext = None with h5py.File(fitresult_file, "r") as fext: if "x" in fext.keys(): # fitresult from combinetf x_ext = fext["x"][...] parms_ext = fext["parms"][...].astype(str) - cov_ext = fext["cov"][...] + if "cov" in fext.keys(): + cov_ext = fext["cov"][...] else: # fitresult from rabbit h5results_ext = io_tools.get_fitresult(fext, fitresult_key) @@ -299,10 +301,10 @@ def load_fitresult(self, fitresult_file, fitresult_key): x_ext = h_parms_ext.values() parms_ext = np.array(h_parms_ext.axes["parms"]) - cov_ext = h5results_ext["cov"].get().values() + if "cov" in h5results_ext.keys(): + cov_ext = h5results_ext["cov"].get().values() xvals = self.x.numpy() - covval = self.cov.numpy() parms = self.parms.astype(str) # Find common elements with their matching indices @@ -310,10 +312,13 @@ def load_fitresult(self, fitresult_file, fitresult_key): parms, parms_ext, assume_unique=True, return_indices=True ) xvals[idxs] = x_ext[idxs_ext] - covval[np.ix_(idxs, idxs)] = cov_ext[np.ix_(idxs_ext, idxs_ext)] self.x.assign(xvals) - self.cov.assign(tf.constant(covval)) + + if cov_ext is not None: + covval = self.cov.numpy() + covval[np.ix_(idxs, idxs)] = cov_ext[np.ix_(idxs_ext, idxs_ext)] + self.cov.assign(tf.constant(covval)) def update_frozen_params(self): new_mask_np = np.isin(self.parms, self.frozen_params) From d2bb63e109e0e62dc9bf10442e5bb9904df5a118 Mon Sep 17 00:00:00 2001 From: davidwalter2 Date: Sun, 25 Jan 2026 11:48:37 -0500 Subject: [PATCH 4/6] Minor fix on pulls labeling --- bin/rabbit_plot_pulls_and_impacts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/rabbit_plot_pulls_and_impacts.py b/bin/rabbit_plot_pulls_and_impacts.py index b847c30..e13574d 100755 --- a/bin/rabbit_plot_pulls_and_impacts.py +++ b/bin/rabbit_plot_pulls_and_impacts.py @@ -324,7 +324,7 @@ def make_bar( size=8, ), error_x=error_x, - name="Pulls ± Constraints", + name=f"Pulls ± Constraints ({name})", showlegend=include_ref, ), row=1, From e2eea24df8ec231930d4c2ccc0f4c9114399e340 Mon Sep 17 00:00:00 2001 From: davidwalter2 Date: Sun, 25 Jan 2026 12:53:20 -0500 Subject: [PATCH 5/6] Fix betavar in presence of multiple channels --- rabbit/tensorwriter.py | 33 +++++++++++++++++++++++++++++---- 1 file changed, 29 insertions(+), 4 deletions(-) diff --git a/rabbit/tensorwriter.py b/rabbit/tensorwriter.py index 8af8109..108e5f0 100644 --- a/rabbit/tensorwriter.py +++ b/rabbit/tensorwriter.py @@ -439,7 +439,10 @@ def add_beta_variations( variation = h.project(*dest_axes_names, *source_axes_names).values() variation = variation.reshape((*norm.shape, -1)) - self.dict_beta_variations[dest_channel][process] = variation + if source_channel not in self.dict_beta_variations[dest_channel].keys(): + self.dict_beta_variations[dest_channel][source_channel] = {} + + self.dict_beta_variations[dest_channel][source_channel][process] = variation def get_logk(self, syst, norm, kfac=1.0, systematic_type=None): if not np.all(np.isfinite(syst)): @@ -795,9 +798,31 @@ def write(self, outfolder="./", outfilename="rabbit_input.hdf5", args={}): dict_logkhalfdiff_proc[syst] ) - if proc in self.dict_beta_variations[chan]: - beta_vars = self.dict_beta_variations[chan][proc] - beta_variations[ibin : ibin + nbinschan, :, iproc] = beta_vars + for ( + source_channel, + source_channel_dict, + ) in self.dict_beta_variations[chan].items(): + if proc in source_channel_dict: + + # find the bins of the source channel + ibin_start = 0 + for c, nb in self.nbinschan.items(): + if self.channels[c]["masked"]: + continue # masked channels can not be source channels + if c == source_channel: + ibin_end = ibin_start + nb + break + else: + ibin_start += nb + else: + raise RuntimeError( + f"Did not find source channel {source_channel} in list of channels {[k for k in self.nbinschan.keys()]}" + ) + + beta_vars = source_channel_dict[proc] + beta_variations[ + ibin : ibin + nbinschan, ibin_start:ibin_end, iproc + ] = beta_vars ibin += nbinschan From 0ced4fd6a2f9f0ec0d8b260949a4965c2a677443 Mon Sep 17 00:00:00 2001 From: davidwalter2 Date: Sun, 25 Jan 2026 13:48:33 -0500 Subject: [PATCH 6/6] Add functionality to set parameter constraint minium --- bin/rabbit_fit.py | 7 +++++++ rabbit/fitter.py | 25 ++++++++++++++++++++----- 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/bin/rabbit_fit.py b/bin/rabbit_fit.py index 2bb289d..41abbe8 100755 --- a/bin/rabbit_fit.py +++ b/bin/rabbit_fit.py @@ -182,6 +182,13 @@ def make_parser(): action="store_true", help="compute impacts of frozen (non-profiled) systematics", ) + parser.add_argument( + "--setConstraintMinimum", + default=[], + nargs=2, + action="append", + help="Set the constraint minima of specified parameter to specified value", + ) return parser.parse_args() diff --git a/rabbit/fitter.py b/rabbit/fitter.py index 6c8652d..dc1db75 100644 --- a/rabbit/fitter.py +++ b/rabbit/fitter.py @@ -139,12 +139,27 @@ def __init__( self.parms = np.concatenate([self.poi_model.pois, self.indata.systs]) + # tf tensor containing default constraint minima + theta0default = np.zeros(self.indata.nsyst) + for parm, val in options.setConstraintMinimum: + idx = np.where(self.indata.systs.astype(str) == parm)[0] + if len(idx) != 1: + raise RuntimeError( + f"Expect to find exactly one match for {parm} to set constraint minimum, but found {len(len(idx))}" + ) + theta0default[idx[0]] = val + + self.theta0default = tf.convert_to_tensor( + theta0default, dtype=self.indata.dtype + ) + # tf variable containing all fit parameters - thetadefault = tf.zeros([self.indata.nsyst], dtype=self.indata.dtype) if self.poi_model.npoi > 0: - xdefault = tf.concat([self.poi_model.xpoidefault, thetadefault], axis=0) + xdefault = tf.concat( + [self.poi_model.xpoidefault, self.theta0default], axis=0 + ) else: - xdefault = thetadefault + xdefault = self.theta0default self.x = tf.Variable(xdefault, trainable=True, name="x") @@ -184,7 +199,7 @@ def __init__( # constraint minima for nuisance parameters self.theta0 = tf.Variable( - tf.zeros([self.indata.nsyst], dtype=self.indata.dtype), + self.theta0default, trainable=False, name="theta0", ) @@ -488,7 +503,7 @@ def set_beta0(self, values): self.logbeta0.assign(tf.math.log(beta0safe)) def theta0defaultassign(self): - self.theta0.assign(tf.zeros([self.indata.nsyst], dtype=self.theta0.dtype)) + self.theta0.assign(self.theta0default) def xdefaultassign(self): if self.poi_model.npoi == 0: