Skip to content

Commit

Permalink
Merge remote-tracking branch 'fk/main'
Browse files Browse the repository at this point in the history
  • Loading branch information
cmantill committed Oct 10, 2024
2 parents 42aa8af + 4e7346c commit 1f80c03
Show file tree
Hide file tree
Showing 17 changed files with 780 additions and 52 deletions.
78 changes: 36 additions & 42 deletions boostedhiggs/corrections.py
Original file line number Diff line number Diff line change
Expand Up @@ -416,7 +416,7 @@ def _combine(eff, sf, passbtag):
Electrons:
- UL CorrectionLib htmlfiles:
https://cms-nanoaod-integration.web.cern.ch/commonJSONSFs/EGM_electron_Run2_UL/
https://cms-nanoaod-integration.web.cern.ch/commonJSONSFs/
- ID and Isolation:
- wp90noiso for high pT electrons
- wp90iso for low pT electrons
Expand Down Expand Up @@ -539,35 +539,30 @@ def get_clip(lep_pt, lep_eta, lepton_type, corr=None):
else:
values["nominal"] = cset["UL-Electron-ID-SF"].evaluate(ul_year, "sf", json_map_name, lepton_eta, lepton_pt)

if lepton_type == "muon":
values["up"] = cset[json_map_name].evaluate(lepton_eta, lepton_pt, "systup")
values["down"] = cset[json_map_name].evaluate(lepton_eta, lepton_pt, "systdown")
if (lepton_type == "muon") and (corr == "id"):
# split the stat. and syst. unc. for the id SF for muons
for unc_type in ["stat", "syst"]:
values[unc_type] = cset[json_map_name].evaluate(lepton_eta, lepton_pt, unc_type)
for key, val in values.items():
values[key] = set_isothreshold(corr, val, np.array(ak.fill_none(lepton.pt, 0.0)), lepton_type)

up = values["nominal"] * (1 + values[unc_type])
down = values["nominal"] * (1 - values[unc_type])
weights.add(f"{corr}_{lepton_type}_{unc_type}", values["nominal"], up, down)

else:
values["up"] = cset["UL-Electron-ID-SF"].evaluate(ul_year, "sfup", json_map_name, lepton_eta, lepton_pt)
values["down"] = cset["UL-Electron-ID-SF"].evaluate(ul_year, "sfdown", json_map_name, lepton_eta, lepton_pt)

for key, val in values.items():
values[key] = set_isothreshold(corr, val, np.array(ak.fill_none(lepton.pt, 0.0)), lepton_type)

# add weights (for now only the nominal weight)
weights.add(f"{corr}_{lepton_type}", values["nominal"], values["up"], values["down"])

# # quick hack to add electron trigger SFs
# if lepton_type == "electron":
# corr = "trigger"
# with importlib.resources.path("boostedhiggs.data", f"electron_trigger_{ul_year}_UL.json") as filename:
# cset = correctionlib.CorrectionSet.from_file(str(filename))
# lepton_pt, lepton_eta = get_clip(lep_pt, lep_eta, lepton_type, corr)
# values["nominal"] = cset["UL-Electron-Trigger-SF"].evaluate(
# ul_year + "_UL", "sf", "trigger", lepton_eta, lepton_pt
# )
# values["up"] = cset["UL-Electron-Trigger-SF"].evaluate(
# ul_year + "_UL", "sfup", "trigger", lepton_eta, lepton_pt,
# )
# values["down"] = cset["UL-Electron-Trigger-SF"].evaluate(
# ul_year + "_UL", "sfdown", "trigger", lepton_eta, lepton_pt
# )
# weights.add(f"{corr}_{lepton_type}", values["nominal"], values["up"], values["down"])
if lepton_type == "muon":
values["up"] = cset[json_map_name].evaluate(lepton_eta, lepton_pt, "systup")
values["down"] = cset[json_map_name].evaluate(lepton_eta, lepton_pt, "systdown")
else:
values["up"] = cset["UL-Electron-ID-SF"].evaluate(ul_year, "sfup", json_map_name, lepton_eta, lepton_pt)
values["down"] = cset["UL-Electron-ID-SF"].evaluate(ul_year, "sfdown", json_map_name, lepton_eta, lepton_pt)

for key, val in values.items():
values[key] = set_isothreshold(corr, val, np.array(ak.fill_none(lepton.pt, 0.0)), lepton_type)

# add weights (for now only the nominal weight)
weights.add(f"{corr}_{lepton_type}", values["nominal"], values["up"], values["down"])


def get_pileup_weight(year: str, mod: str, nPU: np.ndarray):
Expand Down Expand Up @@ -898,16 +893,14 @@ def getJMSRVariables(fatjetvars, candidatelep_p4, met, mass_shift=None):
return variables


def add_TopPtReweighting(topPt):
def add_TopPtReweighting(weights, topPt):

toppt_weight1 = np.exp(0.0615 - 0.0005 * np.clip(topPt[:, 0], 0.0, 500.0))
toppt_weight2 = np.exp(0.0615 - 0.0005 * np.clip(topPt[:, 1], 0.0, 500.0))

nominal = np.sqrt(toppt_weight1 * toppt_weight2)

# weights.add("TopPtReweight", nominal, nominal**2, np.ones_like(nominal))

return nominal
weights.add("TopPtReweight", nominal, np.ones_like(nominal), nominal**2)

# weights.add(
# "TopPtReweight",
Expand All @@ -916,6 +909,8 @@ def add_TopPtReweighting(topPt):
# np.sqrt(toppt_weight1 * toppt_weight2),
# )

return nominal


def get_JetVetoMap(jets, year: str):
"""
Expand Down Expand Up @@ -1039,16 +1034,16 @@ def getGenLepGenQuarks(dataset, genparts: GenParticleArray):

bquarks = daughters[(daughters_pdgId == b_PDGID)]

bquarksdaughters = ak.flatten(bquarks.distinctChildren, axis=2)
bquarksdaughters_pdgId = abs(bquarksdaughters.pdgId)
# bquarksdaughters = ak.flatten(bquarks.distinctChildren, axis=2)
# bquarksdaughters_pdgId = abs(bquarksdaughters.pdgId)

bquarkslep = (
(bquarksdaughters_pdgId == ELE_PDGID)
| (bquarksdaughters_pdgId == MU_PDGID)
| (bquarksdaughters_pdgId == TAU_PDGID)
)
# bquarkslep = (
# (bquarksdaughters_pdgId == ELE_PDGID)
# | (bquarksdaughters_pdgId == MU_PDGID)
# | (bquarksdaughters_pdgId == TAU_PDGID)
# )

print("bquarkslep", bquarkslep)
# print("bquarkslep", bquarkslep)

lepVars = {
"lepton_pt": wboson_daughters[leptons].pt,
Expand Down Expand Up @@ -1212,7 +1207,6 @@ def getLPweights(dataset, events, candidatefj, fj_idx_lep, candidatelep_p4):
msk_delta = GenLep.delta_r(jet_pfcands) < 0.2
msk_pt = pt_array < 1

msk = (msk_lep | msk_gamma) & msk_delta
msk = ((msk_lep | msk_gamma) & msk_delta) | msk_pt

# apply the masking by selecting particles that don't have "msk"
Expand Down
7 changes: 6 additions & 1 deletion boostedhiggs/fakesprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def process(self, events: ak.Array):
(muons.pt > 30)
& (np.abs(muons.eta) < 2.4)
& muons.mediumId
& (((muons.pfRelIso04_all < 0.20) & (muons.pt < 55)) | (muons.pt >= 55) & (muons.miniPFRelIso_all < 0.2))
& (((muons.pfRelIso04_all < 0.20) & (muons.pt < 55)) | ((muons.pt >= 55) & (muons.miniPFRelIso_all < 0.2)))
# additional cuts
& (np.abs(muons.dz) < 0.1)
& (np.abs(muons.dxy) < 0.02)
Expand Down Expand Up @@ -247,6 +247,7 @@ def process(self, events: ak.Array):

# OBJECT: AK8 fatjets
fatjets = events.FatJet
fatjets["msdcorr"] = fatjets.msoftdrop
fatjet_selector = (fatjets.pt > 200) & (abs(fatjets.eta) < 2.5) & fatjets.isTight
good_fatjets = fatjets[fatjet_selector]
good_fatjets = good_fatjets[ak.argsort(good_fatjets.pt, ascending=False)] # sort them by pt
Expand Down Expand Up @@ -293,6 +294,10 @@ def process(self, events: ak.Array):
"fj_phi": candidatefj.phi,
"mT_tight1": mT_tight1,
"mT_loose1": mT_loose1,
# added on Oct 9
"fj_mass": candidatefj.msdcorr,
"loose_lep1_miso": loose_lep1.miniPFRelIso_all,
"loose_lep2_miso": loose_lep2.miniPFRelIso_all,
}

for ch in self._channels:
Expand Down
55 changes: 47 additions & 8 deletions boostedhiggs/hwwprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
get_jec_jets,
get_JetVetoMap,
get_jmsr,
get_pileup_weight,
getJECVariables,
getJMSRVariables,
met_factory,
Expand Down Expand Up @@ -162,6 +163,12 @@ def add_selection(self, name: str, sel: np.ndarray, channel: str = "all"):
else:
self.cutflows[ch][name] = np.sum(selection_ch)

def pileup_cutoff(self, events, year, yearmod, cutoff: float = 4):
pweights = get_pileup_weight(year, yearmod, events.Pileup.nPU.to_numpy())
pw_pass = (pweights["nominal"] <= cutoff) * (pweights["up"] <= cutoff) * (pweights["down"] <= cutoff)
logging.info(f"Passing pileup weight cut: {np.sum(pw_pass)} out of {len(events)} events")
return pw_pass

def process(self, events: ak.Array):
"""Returns skimmed events which pass preselection cuts and with the branches listed in self._skimvars"""

Expand Down Expand Up @@ -237,28 +244,36 @@ def process(self, events: ak.Array):
# OBJECT: muons
muons = ak.with_field(events.Muon, 0, "flavor")

loose_muons = (
# for now use 2 definitions of loose lepton and cut on the looser definition (i.e. without miso cut)
loose_muons1 = (
(muons.pt > 30)
& (np.abs(muons.eta) < 2.4)
& (muons.looseId)
& (((muons.pfRelIso04_all < 0.25) & (muons.pt < 55)) | (muons.pt >= 55))
)
loose_muons2 = (
(muons.pt > 30)
& (np.abs(muons.eta) < 2.4)
& (muons.looseId)
& (((muons.pfRelIso04_all < 0.25) & (muons.pt < 55)) | ((muons.pt >= 55) & (muons.miniPFRelIso_all < 0.8)))
)

tight_muons = (
(muons.pt > 30)
& (np.abs(muons.eta) < 2.4)
& muons.mediumId
& (((muons.pfRelIso04_all < 0.20) & (muons.pt < 55)) | (muons.pt >= 55) & (muons.miniPFRelIso_all < 0.2))
& (((muons.pfRelIso04_all < 0.20) & (muons.pt < 55)) | ((muons.pt >= 55) & (muons.miniPFRelIso_all < 0.2)))
# additional cuts
& (np.abs(muons.dz) < 0.1)
& (np.abs(muons.dxy) < 0.02)
)

n_loose_muons = ak.sum(loose_muons, axis=1)
n_loose_muons1 = ak.sum(loose_muons1, axis=1)
n_loose_muons2 = ak.sum(loose_muons2, axis=1)
n_tight_muons = ak.sum(tight_muons, axis=1)

if self._uselooselep:
good_muons = loose_muons
good_muons = loose_muons1
else:
good_muons = tight_muons

Expand Down Expand Up @@ -338,6 +353,7 @@ def process(self, events: ak.Array):
# OBJECT: AK4 jets
jets, jec_shifted_jetvars = get_jec_jets(events, events.Jet, self._year, not self.isMC, self.jecs, fatjets=False)
met = met_factory.build(events.MET, jets, {}) if self.isMC else events.MET
# met = events.MET

ht = ak.sum(jets.pt, axis=1)

Expand Down Expand Up @@ -398,6 +414,15 @@ def process(self, events: ak.Array):
# delta phi MET and higgs candidate
met_fj_dphi = candidatefj.delta_phi(met)

# leptonic tau veto
from boostedhiggs.utils import ELE_PDGID, MU_PDGID

loose_taus = (events.Tau.pt > 20) & (abs(events.Tau.eta) < 2.3)

loose_taus = events.Tau[loose_taus]
leptonic_taus = (loose_taus["decayMode"] == ELE_PDGID) | (loose_taus["decayMode"] == MU_PDGID)
msk_leptonic_taus = ~ak.any(leptonic_taus, axis=1)

######################
# Store variables
######################
Expand Down Expand Up @@ -442,14 +467,21 @@ def process(self, events: ak.Array):
# number
"n_loose_electrons": n_loose_electrons,
"n_tight_electrons": n_tight_electrons,
"n_loose_muons": n_loose_muons,
"n_loose_muons1": n_loose_muons1,
"n_loose_muons2": n_loose_muons2,
"n_tight_muons": n_tight_muons,
# second fatjet after candidate jet
"VH_fj_pt": VH_fj.pt,
"VH_fj_eta": VH_fj.eta,
"VH_fj_VScore": VScore(VH_fj),
# add jetveto as optional selection
"jetvetomap": cut_jetveto,
# added on October 9th
"loose_lep1_miso": ak.firsts(
muons[loose_muons1][ak.argsort(muons[loose_muons1].pt, ascending=False)]
).miniPFRelIso_all,
"loose_lep1_pt": ak.firsts(muons[loose_muons1][ak.argsort(muons[loose_muons1].pt, ascending=False)]).pt,
"msk_leptonic_taus": msk_leptonic_taus,
}

fatjetvars = {
Expand Down Expand Up @@ -536,7 +568,13 @@ def process(self, events: ak.Array):
# Selection
######################

if self.isMC:
# remove events with pileup weights un-physically large
pw_pass = self.pileup_cutoff(events, self._year, self._yearmod, cutoff=4)
self.add_selection(name="PU_cutoff", sel=pw_pass)

for ch in self._channels:

# trigger
if ch == "mu":
self.add_selection(
Expand All @@ -549,7 +587,7 @@ def process(self, events: ak.Array):

self.add_selection(name="METFilters", sel=metfilters)
self.add_selection(name="OneLep", sel=(n_good_muons == 1) & (n_loose_electrons == 0), channel="mu")
self.add_selection(name="OneLep", sel=(n_loose_muons == 0) & (n_good_electrons == 1), channel="ele")
self.add_selection(name="OneLep", sel=(n_loose_muons1 == 0) & (n_good_electrons == 1), channel="ele")
self.add_selection(name="NoTaus", sel=(n_loose_taus_mu == 0), channel="mu")
self.add_selection(name="NoTaus", sel=(n_loose_taus_ele == 0), channel="ele")
self.add_selection(name="AtLeastOneFatJet", sel=(NumFatjets >= 1))
Expand Down Expand Up @@ -660,8 +698,9 @@ def process(self, events: ak.Array):
tops = events.GenPart[
get_pid_mask(events.GenPart, 6, byall=False) * events.GenPart.hasFlags(["isLastCopy"])
]
# add_TopPtReweighting(self.weights[ch], tops.pt)
variables["top_reweighting"] = add_TopPtReweighting(tops.pt)

# will also save it as a variable just in case
variables["top_reweighting"] = add_TopPtReweighting(self.weights[ch], tops.pt)

if self.isSignal:
ew_weight = add_HiggsEW_kFactors(events.GenPart, dataset)
Expand Down
9 changes: 8 additions & 1 deletion boostedhiggs/zllprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def process(self, events: ak.Array):
(muons.pt > 30)
& (np.abs(muons.eta) < 2.4)
& muons.mediumId
& (((muons.pfRelIso04_all < 0.20) & (muons.pt < 55)) | (muons.pt >= 55) & (muons.miniPFRelIso_all < 0.2))
& (((muons.pfRelIso04_all < 0.20) & (muons.pt < 55)) | ((muons.pt >= 55) & (muons.miniPFRelIso_all < 0.2)))
# additional cuts
& (np.abs(muons.dz) < 0.1)
& (np.abs(muons.dxy) < 0.02)
Expand Down Expand Up @@ -249,6 +249,7 @@ def process(self, events: ak.Array):

# OBJECT: AK8 fatjets
fatjets = events.FatJet
fatjets["msdcorr"] = fatjets.msoftdrop
fatjet_selector = (fatjets.pt > 200) & (abs(fatjets.eta) < 2.5) & fatjets.isTight
good_fatjets = fatjets[fatjet_selector]
good_fatjets = good_fatjets[ak.argsort(good_fatjets.pt, ascending=False)] # sort them by pt
Expand Down Expand Up @@ -295,6 +296,12 @@ def process(self, events: ak.Array):
"fj_phi": candidatefj.phi,
"mT_tight1": mT_tight1,
"mT_loose1": mT_loose1,
# added on Oct 9
"fj_mass": candidatefj.msdcorr,
"loose_lep1_miso": loose_lep1.miniPFRelIso_all,
"loose_lep2_miso": loose_lep2.miniPFRelIso_all,
"tight_lep1_miso": tight_lep1.miniPFRelIso_all,
"tight_lep2_miso": tight_lep2.miniPFRelIso_all,
}

for ch in self._channels:
Expand Down
54 changes: 54 additions & 0 deletions triton-server/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
ARG SERVERBASE=21.06-py3

FROM nvcr.io/nvidia/pytorch:${SERVERBASE} AS builder

ENV FORCE_CUDA=1
ARG LIB_WITH_CUDA=ON
ARG NPROC=4

RUN git clone https://github.com/rusty1s/pytorch_cluster.git -b 1.5.9
RUN pushd pytorch_cluster &&\
mkdir build && pushd build &&\
cmake -DCMAKE_PREFIX_PATH=/opt/conda/lib/python3.8/site-packages/torch -DWITH_CUDA=${LIB_WITH_CUDA} .. &&\
make -j ${NPROC} && mv *.so /workspace/ && popd &&\
popd

RUN git clone https://github.com/rusty1s/pytorch_scatter.git -b 2.0.7
RUN pushd pytorch_scatter &&\
mkdir build && pushd build &&\
cmake -DCMAKE_PREFIX_PATH=/opt/conda/lib/python3.8/site-packages/torch -DWITH_CUDA=${LIB_WITH_CUDA} .. &&\
make -j ${NPROC} && mv *.so /workspace/ && popd &&\
popd

RUN git clone https://github.com/rusty1s/pytorch_spline_conv.git -b 1.2.1
RUN pushd pytorch_spline_conv &&\
mkdir build && pushd build &&\
cmake -DCMAKE_PREFIX_PATH=/opt/conda/lib/python3.8/site-packages/torch -DWITH_CUDA=${LIB_WITH_CUDA} .. &&\
make -j ${NPROC} && mv *.so /workspace/ && popd &&\
popd

RUN git clone https://github.com/rusty1s/pytorch_sparse.git -b 0.6.10
RUN pushd pytorch_sparse &&\
mkdir build && pushd build &&\
cmake -DCMAKE_PREFIX_PATH=/opt/conda/lib/python3.8/site-packages/torch -DWITH_CUDA=${LIB_WITH_CUDA} .. &&\
make -j ${NPROC} && mv *.so /workspace/ && popd &&\
popd

RUN git clone https://github.com/rusty1s/pytorch_geometric.git
RUN pushd pytorch_geometric && pip install -e . && popd

FROM nvcr.io/nvidia/tritonserver:${SERVERBASE}

ENV LD_LIBRARY_PATH="/opt/tritonserver/backends/pytorch:/usr/local/cuda/compat/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64"
ENV LD_PRELOAD="/torch_geometric/lib/libtorchscatter.so /torch_geometric/lib/libtorchsparse.so /torch_geometric/lib/libtorchcluster.so /torch_geometric/lib/libtorchsplineconv.so"

RUN mkdir -p /run/shm
RUN mkdir -p /models

RUN mkdir -p /torch_geometric/lib
RUN mkdir -p /torch_geometric/examples/

COPY --from=builder /workspace/libtorchscatter.so /torch_geometric/lib/
COPY --from=builder /workspace/libtorchsparse.so /torch_geometric/lib/
COPY --from=builder /workspace/libtorchcluster.so /torch_geometric/lib/
COPY --from=builder /workspace/libtorchsplineconv.so /torch_geometric/lib/
Loading

0 comments on commit 1f80c03

Please sign in to comment.