from FWCore/Framework/bin/cmsRun.cpp#L140
EventProcessorWithSentry proc;
from FWCore/Framework/bin/cmsRun.cpp#L351
EventProcessorWithSentry procTmp(
#! /bin/bash | |
# to launch a container using this profile: | |
# lxc launch <container> -p default -p $USER | |
set -eu | |
#UID=$(id -u) | |
GID=$(id -g) | |
SUBUID=root:$UID:1 | |
SUBGID=root:$GID:1 |
#! /usr/bin/env python | |
import sys | |
import os | |
import fcntl | |
import argparse | |
import json | |
# the default location for the compilation database is the file 'compile_commands.json' in the current directory | |
def_dbname = 'compile_commands.json' |
--- config/SCRAM/GMake/Makefile.rules 2016-09-09 13:00:19.000000000 +0200 | |
+++ config/SCRAM/GMake/Makefile.rules 2016-10-31 19:31:37.061151791 +0100 | |
@@ -515,6 +515,7 @@ define run_compile_command | |
$(CMD_echo) ">> Compiling $(2) $< " &&\ | |
$(VERB_ECHO) $4 $5 $< -o $@ &&\ | |
($4 $5 $< -o $@ || ($(if $(strip $6),$4 $6 $< -o $(basename $@).d && $(CMD_sed) -i -e 's|.*:|$@:|' $(basename $@).d && exit 1,exit 1))) $3 $(endlog_$(1)) | |
+ @llvm-ccdb-add -p "$(CMSSW_BASE)/.SCRAM/$(SCRAM_ARCH)/compile_commands.json" "$<" "$4 $5 $< -o $@" | |
endef | |
define compile_cxx_common | |
$(call run_compile_command,$2,$4,$5,$(CXX) -c $(3) $(CXXOPTIMISEDFLAGS) $(CXXSHAREDOBJECTFLAGS),$(if $(6),,$(CXX_MMD) $(CXX_MF) $(basename $@).d),$(if $(6),,$(CXX_MM))) |
from FWCore/Framework/bin/cmsRun.cpp#L140
EventProcessorWithSentry proc;
from FWCore/Framework/bin/cmsRun.cpp#L351
EventProcessorWithSentry procTmp(
Given the diversity and possible complexity of some legacy DQM modules, this guide will likely not address all the required changes. Please consider it more as a set of suggestions than as a strict list of steps to be follwed; for any more information please refer to the migration guide to the DQMAnalyzer
module, or ask the DQM Core team.
A DQMGlobalEDAnalyzer
module is a specialisation of an edm::global::EDanalyzer
module:
global
module rather than a legacy module: there is only one copy of it, no matter how many threads or streams the job is configured to use, but it can "see" multiple events being processed at the same time; the advantage is that multiple events can be analised concurrently; on the other hand the module's internal state (the data members) are not allowed to change during the analyze()
(or dqmAnalyze()
) method, unless in a concurrency-safe way.
See [FWMultithreadedFrameworkGlobalModuleIntA DQMGlobalEDAnalyzer
module is similar to a DQMEDanalyzer
, with few differences:
global
module rather than a stream
module: there is only one copy of it, no matter how many threads or streams the job is configured to use, and it will "see" all events being processed; the advantage is a significant reduction in memory usage as the number of streams increases; on the other hand this means that its internal state (the data members) are not allowed to change during the analyze()
(or dqmAnalyze()
) method, unless in a concurrency-safe way.
See FWMultithreadedFrameworkGlobalModuleInterface for the details.ConcurrentMonitorElement
s rather than MonitorElement
s to expose a concurrecy-safe interface to the DQMStore
; these objects are also "global": the DQMStore
holds a single copy of the histograms, and there is not me#include <cstdio> | |
#include <cuda.h> | |
__host__ __device__ | |
void where() { | |
#if defined __CUDA_ARCH__ | |
printf(" on the device"); | |
#else | |
printf(" on the host"); | |
#endif |
ssh -f -N -o ControlMaster=auto -D 1080 cmsusr.cms
export ALL_PROXY=socks5://localhost:1080
curl http://developer.download.nvidia.com/compute/cuda/10.1/Prod/local_installers/cuda-repo-rhel7-10-1-local-10.1.243-418.87.00-1.0-1.x86_64.rpm -o cuda-repo-rhel7-10-1-local-10.1.243-418.87.00-1.0-1.x86_64.rpm
#! /bin/bash -e | |
CUDA_BASE=/usr/local/cuda | |
SYCL_BASE=$PWD | |
INSTALL_PATH=/opt/llvm | |
mkdir -p $SYCL_BASE/llvm | |
cd $SYCL_BASE/llvm | |
if ! [ -d .git ]; then |