From a83d432bee01a0c59f46c6126cf20b6c43e89fa0 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 24 Sep 2020 09:50:46 +0200 Subject: [PATCH 01/57] Time to compile --- CControl_Handler.cc | 51 ++-- CControl_Handler.hh | 14 +- DAQController.cc | 314 ++++++++--------------- DAQController.hh | 40 +-- DDC10.cc | 16 +- DDC10.hh | 2 - MongoLog.cc | 45 ++-- MongoLog.hh | 4 +- Options.cc | 2 +- Options.hh | 3 +- StraxFormatter.cc | 482 ++++++++++++++++++++++++++++++++++ StraxFormatter.hh | 113 ++++++++ StraxInserter.cc | 610 -------------------------------------------- StraxInserter.hh | 100 -------- V1495.cc | 2 +- V1495.hh | 8 +- V1724.cc | 229 ++++++++--------- V1724.hh | 40 +-- V1724_MV.cc | 26 +- V1724_MV.hh | 5 +- V1730.cc | 32 ++- V1730.hh | 4 +- V2718.cc | 4 +- V2718.hh | 4 +- ccontrol.cc | 30 +-- main.cc | 117 ++------- 26 files changed, 989 insertions(+), 1308 deletions(-) create mode 100644 StraxFormatter.cc create mode 100644 StraxFormatter.hh delete mode 100644 StraxInserter.cc delete mode 100644 StraxInserter.hh diff --git a/CControl_Handler.cc b/CControl_Handler.cc index f01b8076..119ec158 100644 --- a/CControl_Handler.cc +++ b/CControl_Handler.cc @@ -9,14 +9,14 @@ #include #include -CControl_Handler::CControl_Handler(MongoLog *log, std::string procname){ - fOptions = NULL; +CControl_Handler::CControl_Handler(std::shared_ptr& log, std::string procname){ + fOptions = nullptr; fLog = log; fProcname = procname; fCurrentRun = fBID = fBoardHandle-1; - fV2718 = NULL; - fV1495 = NULL; - fDDC10 = NULL; + fV2718 = nullptr; + fV1495 = nullptr; + fDDC10 = nullptr; fStatus = DAXHelpers::Idle; } @@ -25,7 +25,7 @@ CControl_Handler::~CControl_Handler(){ } // Initialising various devices namely; V2718 crate controller, V1495, DDC10... -int CControl_Handler::DeviceArm(int run, Options *opts){ +int CControl_Handler::DeviceArm(int run, std::shared_ptr& opts){ fStatus = DAXHelpers::Arming; @@ -52,7 +52,7 @@ int CControl_Handler::DeviceArm(int run, Options *opts){ return -1; } BoardType cc_def = bv[0]; - fV2718 = new V2718(fLog); + fV2718 = std::make_unique(fLog); if (fV2718->CrateInit(copts, cc_def.link, cc_def.crate)!=0){ fLog->Entry(MongoLog::Error, "Failed to initialize V2718 crate controller"); fStatus = DAXHelpers::Idle; @@ -69,7 +69,7 @@ int CControl_Handler::DeviceArm(int run, Options *opts){ // Init DDC10 only when included in config - only for TPC if (dv.size() == 1){ if(fOptions->GetHEVOpt(hopts) == 0){ - fDDC10 = new DDC10(); + fDDC10 = std::make_unique(); if(fDDC10->Initialize(hopts) != 0){ fLog->Entry(MongoLog::Error, "Failed to initialise DDC10 HEV"); fStatus = DAXHelpers::Idle; @@ -90,7 +90,7 @@ int CControl_Handler::DeviceArm(int run, Options *opts){ if (mv.size() == 1){ BoardType mv_def = mv[0]; fBID = mv_def.board; - fV1495 = new V1495(fLog, fOptions, mv_def.board, fBoardHandle, mv_def.vme_address); + fV1495 = std::make_unique(fLog, fOptions, mv_def.board, fBoardHandle, mv_def.vme_address); // Writing registers to the V1495 board for(auto regi : fOptions->GetRegisters(fBID, true)){ unsigned int reg = DAXHelpers::StringToHex(regi.reg); @@ -110,16 +110,13 @@ int CControl_Handler::DeviceArm(int run, Options *opts){ } // end devicearm - - - // Send the start signal from crate controller int CControl_Handler::DeviceStart(){ if(fStatus != DAXHelpers::Armed){ fLog->Entry(MongoLog::Warning, "V2718 attempt to start without arming. Maybe unclean shutdown"); return 0; } - if(fV2718 == NULL || fV2718->SendStartSignal()!=0){ + if(!fV2718 || fV2718->SendStartSignal()!=0){ fLog->Entry(MongoLog::Error, "V2718 either failed to start"); fStatus = DAXHelpers::Error; return -1; @@ -135,29 +132,20 @@ int CControl_Handler::DeviceStop(){ //fLog->Entry(MongoLog::Local, "Beginning stop sequence"); // If V2718 here then send stop signal - if(fV2718 != NULL){ + if(fV2718){ if(fV2718->SendStopSignal() != 0){ fLog->Entry(MongoLog::Warning, "Failed to stop V2718"); } - delete fV2718; - fV2718 = NULL; + fV2718.reset(); } // Don't need to stop the DDC10 but just clean up a bit - if(fDDC10 != NULL){ - delete fDDC10; - fDDC10 = NULL; - } - - if(fV1495 != NULL){ - delete fV1495; - fV1495 = NULL; - } + fDDC10.reset(); + fV1495.reset(); fStatus = DAXHelpers::Idle; return 0; } - // Reporting back on the status of V2718, V1495, DDC10 etc... bsoncxx::document::value CControl_Handler::GetStatusDoc(std::string hostname){ using namespace std::chrono; @@ -168,7 +156,7 @@ bsoncxx::document::value CControl_Handler::GetStatusDoc(std::string hostname){ "time" << bsoncxx::types::b_date(system_clock::now()); auto in_array = builder << "active" << bsoncxx::builder::stream::open_array; - if(fV2718 != NULL){ + if(fV2718){ auto crate_options = fV2718->GetCrateOptions(); in_array << bsoncxx::builder::stream::open_document << "run_number" << fCurrentRun @@ -182,8 +170,9 @@ bsoncxx::document::value CControl_Handler::GetStatusDoc(std::string hostname){ } auto after_array = in_array << bsoncxx::builder::stream::close_array; return after_array << bsoncxx::builder::stream::finalize; + /* // DDC10 parameters might change for future updates of the XENONnT HEV - if(fDDC10 != NULL){ + if(fDDC10){ auto hev_options = fDDC10->GetHEVOptions(); in_array << bsoncxx::builder::stream::open_document << "type" << "DDC10" @@ -207,7 +196,7 @@ bsoncxx::document::value CControl_Handler::GetStatusDoc(std::string hostname){ << bsoncxx::builder::stream::close_document; } // Write the settings for the Muon Veto V1495 board into status doc - if(fV1495 != NULL){ + if(fV1495){ auto registers = fOptions->GetRegisters(fBID); in_array << bsoncxx::builder::stream::open_document << "type" << "V1495" @@ -223,5 +212,5 @@ bsoncxx::document::value CControl_Handler::GetStatusDoc(std::string hostname){ after_array = in_array << bsoncxx::builder::stream::close_array; return after_array << bsoncxx::builder::stream::finalize; - -} +*/ +} diff --git a/CControl_Handler.hh b/CControl_Handler.hh index 55d9d46c..62b02c80 100644 --- a/CControl_Handler.hh +++ b/CControl_Handler.hh @@ -13,27 +13,27 @@ class V1495; class CControl_Handler{ public: - CControl_Handler(MongoLog *log, std::string procname); + CControl_Handler(std::shared_ptr& log, std::string procname); ~CControl_Handler(); bsoncxx::document::value GetStatusDoc(std::string hostname); - int DeviceArm(int run, Options *opts); + int DeviceArm(int run, std::shared_ptr& opts); int DeviceStart(); int DeviceStop(); private: - V2718 *fV2718; - DDC10 *fDDC10; - V1495 *fV1495; + std::unique_ptr fV2718; + std::unique_ptr fDDC10; + std::unique_ptr fV1495; int fStatus; int fCurrentRun; int fBID; int fBoardHandle; std::string fProcname; - Options *fOptions; - MongoLog *fLog; + std::shared_ptr fOptions; + std::shared_ptr fLog; }; #endif diff --git a/DAQController.cc b/DAQController.cc index c3bdbaef..dd04b7c3 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -22,8 +22,6 @@ // 3-running // 4-error -const int MaxBoardsPerLink(8); - DAQController::DAQController(MongoLog *log, std::string hostname){ fLog=log; fOptions = NULL; @@ -51,10 +49,10 @@ std::string DAQController::run_mode(){ } } -int DAQController::InitializeElectronics(Options *options, std::vector&keys){ +int DAQController::InitializeElectronics(std::shared_ptr& options){ End(); - + fOptions = options; fNProcessingThreads = fOptions->GetNestedInt("processing_threads."+fHostname, 8); fLog->Entry(MongoLog::Local, "Beginning electronics initialization with %i threads", @@ -66,32 +64,19 @@ int DAQController::InitializeElectronics(Options *options, std::vector&keys for(auto d : fOptions->GetBoards("V17XX")){ fLog->Entry(MongoLog::Local, "Arming new digitizer %i", d.board); - V1724 *digi; - if(d.type == "V1724_MV") - digi = new V1724_MV(fLog, fOptions); - else if(d.type == "V1730") - digi = new V1730(fLog, fOptions); - else - digi = new V1724(fLog, fOptions); - - - if(digi->Init(d.link, d.crate, d.board, d.vme_address)==0){ - fDigitizers[d.link].push_back(digi); - BIDs.push_back(digi->bid()); - fBoardMap[digi->bid()] = digi; - fCheckFails[digi->bid()] = false; - - if(std::find(keys.begin(), keys.end(), d.link) == keys.end()){ - fLog->Entry(MongoLog::Local, "Defining a new optical link at %i", d.link); - keys.push_back(d.link); - } - fLog->Entry(MongoLog::Debug, "Initialized digitizer %i", d.board); - - } - else{ - delete digi; - fLog->Entry(MongoLog::Warning, "Failed to initialize digitizer %i", d.board); - fStatus = DAXHelpers::Idle; + std::shared_ptr digi; + try{ + if(d.type == "V1724_MV") + digi = std::make_shared(fLog, fOptions, d.link, d.crate, d.board, d.vme_address); + else if(d.type == "V1730") + digi = std::make_shared(fLog, fOptions, d.link, d.crate, d.board, d.vme_address); + else + digi = std::make_shared(fLog, fOptions, d.link, d.crate, d.board, d.vme_address); + fDigitizers[d.link].emplace_back(digi); + BIDs.push_back(digi->bid()); + }catch(const std::exception& e) { + fLog->Entry(MongoLog::Warning, "Failed to initialize digitizer %i: %s", d.board, + e.what()); return -1; } } @@ -128,27 +113,30 @@ int DAQController::InitializeElectronics(Options *options, std::vector&keys fLog->Entry(MongoLog::Debug, "Digitizer programming successful"); if (fOptions->GetString("baseline_dac_mode") == "fit") fOptions->UpdateDAC(dac_values); - for(auto const& link : fDigitizers ) { - for(auto digi : link.second){ + for(auto& link : fDigitizers ) { + for(auto& digi : link.second){ if(fOptions->GetInt("run_start", 0) == 1) digi->SINStart(); else digi->AcquisitionStop(); } } + if (OpenThreads()) { + fLog->Entry(MongoLog::Warning, "Error opening threads"); + fStatus = DAQXHelpers::Idle; + return -1; + } sleep(1); fStatus = DAXHelpers::Armed; fLog->Entry(MongoLog::Local, "Arm command finished, returning to main loop"); - - return 0; } int DAQController::Start(){ if(fOptions->GetInt("run_start", 0) == 0){ - for( auto const& link : fDigitizers ){ - for(auto digi : link.second){ + for(auto& link : fDigitizers ){ + for(auto& digi : link.second){ // Ensure digitizer is ready to start if(digi->EnsureReady(1000, 1000)!= true){ @@ -204,55 +192,37 @@ int DAQController::Stop(){ void DAQController::End(){ Stop(); + fLog->Entry(MongoLog::Local, "Closing Processing Threads"); + CloseThreads(); fLog->Entry(MongoLog::Local, "Closing Digitizers"); - for( auto const& link : fDigitizers ){ - for(auto digi : link.second){ + for(auto& link : fDigitizers ){ + for(auto& digi : link.second){ digi->End(); - delete digi; + digi.reset(); } + link.clear(); } - fLog->Entry(MongoLog::Local, "Closing Processing Threads"); - CloseProcessingThreads(); fDigitizers.clear(); fStatus = DAXHelpers::Idle; - if(fBuffer.size() != 0){ - fLog->Entry(MongoLog::Warning, "Deleting uncleard buffer of size %i", - fBuffer.size()); - std::for_each(fBuffer.begin(), fBuffer.end(), [](auto dp){delete dp;}); - fBuffer.clear(); - fBufferLength = 0; - fBufferSize = 0; - } - fOptions = NULL; std::cout<<"Finished end"<Entry(MongoLog::Debug, "Raw data buffer being brute force cleared."); - std::for_each(fBuffer.begin(), fBuffer.end(), [](auto dp){delete dp;}); - fBuffer.clear(); - } - fBufferLength = 0; + fDataRate = 0; - fBufferSize = 0; - fBufferMutex.unlock(); - u_int32_t board_status = 0; + uint32_t board_status = 0; int readcycler = 0; int err_val = 0; - std::list local_buffer; - data_packet* dp = nullptr; + std::list> local_buffer; + std::unique_ptr dp; + int words = 0; int local_size(0); fRunning[link] = true; while(fReadLoop){ - - for(auto digi : fDigitizers[link]) { + for(auto& digi : fDigitizers[link]) { // Every 1k reads check board status if(readcycler%10000==0){ @@ -261,9 +231,8 @@ void DAQController::ReadData(int link){ fLog->Entry(MongoLog::Local, "Board %i has status 0x%04x", digi->bid(), board_status); } - if (fCheckFails[digi->bid()]) { - fCheckFails[digi->bid()] = false; - err_val = fBoardMap[digi->bid()]->CheckErrors(); + if (digi->CheckFail()) { + err_val = digi->CheckErrors(); fLog->Entry(MongoLog::Local, "Error %i from board %i", err_val, digi->bid()); if (err_val == -1 || err_val == 0) { @@ -275,32 +244,20 @@ void DAQController::ReadData(int link){ digi->bid()); } } - if (dp == nullptr) dp = new data_packet; - if((dp->size = digi->ReadMBLT(dp->buff))<0){ - if (dp->buff != nullptr) { - delete[] dp->buff; // possible leak, catch here - dp->buff = nullptr; - delete dp; - dp = nullptr; - } + if((words = digi->Read(dp))<0){ + dp.reset(); fStatus = DAXHelpers::Error; - break; - } - if(dp->size>0){ - dp->bid = digi->bid(); - dp->header_time = digi->GetHeaderTime(dp->buff, dp->size); - dp->clock_counter = digi->GetClockCounter(dp->header_time); - local_buffer.push_back(dp); - local_size += dp->size; - dp = nullptr; + break; + } else if(words>0){ + dp->digi = digi; + local_buffer.emplace_back(std::move(dp)); + local_size += words*sizeof(char32_t); } } // for digi in digitizers if (local_buffer.size() > 0) { - const std::lock_guard lg(fBufferMutex); - fBufferLength += local_buffer.size(); - fBuffer.splice(fBuffer.end(), local_buffer); // clears local_buffer - fBufferSize += local_size; fDataRate += local_size; + int selector = (fCounter++)%fNProcessingThreads; + fProcessingThreads[selector]->ReceiveDatapackets(local_buffer); local_size = 0; } readcycler++; @@ -315,8 +272,8 @@ std::map DAQController::GetDataPerChan(){ // Clears the private maps in the StraxInserters const std::lock_guard lg(fPTmutex); std::map retmap; - for (const auto& pt : fProcessingThreads) - pt.inserter->GetDataPerChan(retmap); + for (auto& p : fProcessors) + p->GetDataPerChan(retmap); return retmap; } @@ -333,107 +290,51 @@ int DAQController::GetBufferLength() { [](int tot, auto pt){return tot + pt.inserter->GetBufferLength();}); } -void DAQController::GetDataFormat(std::map>& retmap){ - for( auto const& link : fDigitizers ) - for(auto digi : link.second) - retmap[digi->bid()] = digi->DataFormatDefinition; -} - -int DAQController::GetData(std::list* retQ, unsigned num){ - if (fBufferLength == 0) return 0; - int ret = 0; - data_packet* dp = nullptr; - const std::lock_guard lg(fBufferMutex); - if (fBuffer.size() == 0) { - return 0; - } - if (num == 0) num = std::max(16, std::min(fMaxEventsPerThread, fBufferLength >> 4)); - do { - dp = fBuffer.front(); - fBuffer.pop_front(); - fBufferLength--; - fBufferSize -= dp->size; - ret += dp->size; - retQ->push_back(dp); - } while (retQ->size() < num && fBuffer.size()>0); - return ret; -} - -int DAQController::GetData(data_packet* &dp) { - if (fBufferLength == 0) return 0; - const std::lock_guard lg(fBufferMutex); - if (fBuffer.size() == 0) { - return 0; - } - dp = fBuffer.front(); - fBuffer.pop_front(); - fBufferSize -= dp->size; - fBufferLength--; - return 1; -} - -bool DAQController::CheckErrors(){ - - // This checks for errors from the threads by checking the - // error flag in each object. It's appropriate to poll this - // on the order of ~second(s) and initialize a STOP in case - // the function returns "true" - - const std::lock_guard lg(fPTmutex); - for(unsigned int i=0; iCheckError()){ - fLog->Entry(MongoLog::Error, "Error found in processing thread."); - fStatus=DAXHelpers::Error; - return true; - } - } - return false; -} - -int DAQController::OpenProcessingThreads(){ +int DAQController::OpenThreads(){ int ret = 0; const std::lock_guard lg(fPTmutex); + fProcessingThreads.reserve(fNProcessingThreads); for(int i=0; iInitialize(fOptions, fLog, this, fHostname)) { - p.pthread = new std::thread(); // something to delete later - ret++; - } else - p.pthread = new std::thread(&StraxInserter::ReadAndInsertData, p.inserter); - fProcessingThreads.push_back(p); + try { + fFormatters.emplace_back(std::make_unique(fOptions, fLog)); + fProcessingThreads.emplace_back(&StraxFormatter::Process, fFormatters.back().get()); + } catch(const std::exception& e) { + fLog->Entry(MongoLog::Warning, "Error opening processing threads: %s", + e.what()); + return -1; + } } - return ret; + fReadoutThreads.reserve(fDigitizers.size()); + for (auto& p : fDigitizers) + fReadoutThreads.emplace_back(&DAQController::ReadData, this, p.first); + return 0; } -void DAQController::CloseProcessingThreads(){ +void DAQController::CloseThreads(){ std::map board_fails; const std::lock_guard lg(fPTmutex); - for(unsigned int i=0; iClose(board_fails); - // two stage process so there's time to clear data - } + for (auto& sf : fFormatters) sf->Close(board_fails); + // give threads time to finish std::this_thread::sleep_for(std::chrono::milliseconds(10)); - for(unsigned int i=0; ijoin(); - delete fProcessingThreads[i].pthread; - } - + for (auto& sf : fFormatters) sf.reset(); + fFormatters.clear(); + for (auto& t : fProcessingThreads) if (t.joinable()) t.join(); fProcessingThreads.clear(); + if (std::accumulate(board_fails.begin(), board_fails.end(), 0, - [=](int tot, std::pair iter) {return tot + iter.second;})) { + [=](int tot, auto& iter) {return std::move(tot) + iter.second;})) { std::stringstream msg; msg << "Found board failures: "; for (auto& iter : board_fails) msg << iter.first << ":" << iter.second << " | "; fLog->Entry(MongoLog::Warning, msg.str()); } + for (auto& t : fReadoutThreads) if (t.joinable()) t.join(); } -void DAQController::InitLink(std::vector& digis, +void DAQController::InitLink(std::vector>& digis, std::map>>& cal_values, int& ret) { std::string BL_MODE = fOptions->GetString("baseline_dac_mode", "fixed"); - std::map> dac_values; + std::map> dac_values; int nominal_baseline = fOptions->GetInt("baseline_value", 16000); if (BL_MODE == "fit") { if ((ret = FitBaselines(digis, dac_values, nominal_baseline, cal_values))) { @@ -499,7 +400,7 @@ void DAQController::InitLink(std::vector& digis, return; } -int DAQController::FitBaselines(std::vector &digis, +int DAQController::FitBaselines(std::vector> &digis, std::map> &dac_values, int target_baseline, std::map>> &cal_values) { using std::vector; @@ -516,8 +417,8 @@ int DAQController::FitBaselines(std::vector &digis, std::chrono::milliseconds ms_between_triggers(fOptions->GetInt("baseline_ms_between_triggers", 10)); vector DAC_cal_points = {60000, 30000, 6000}; // arithmetic overflow std::map> channel_finished; - std::map buffers; - std::map bytes_read; + std::map> buffers; + std::map words_read; std::map>> bl_per_channel; std::map> diff; @@ -534,7 +435,7 @@ int DAQController::FitBaselines(std::vector &digis, int counts_total(0), counts_around_max(0); double B,C,D,E,F, slope, yint, baseline; double fraction_around_max = fOptions->GetDouble("baseline_fraction_around_max", 0.8); - u_int32_t words_in_event, channel_mask, words_per_channel, idx; + u_int32_t words_in_event, channel_mask, words; u_int16_t val0, val1; int channels_in_event; @@ -614,68 +515,63 @@ int DAQController::FitBaselines(std::vector &digis, // readout for (auto d : digis) { - bytes_read[d->bid()] = d->ReadMBLT(buffers[d->bid()]); + bytes_read[d->bid()] = d->Read(buffers[d->bid()]); } // decode - if (std::any_of(bytes_read.begin(), bytes_read.end(), + if (std::any_of(words_read.begin(), words_read.end(), [=](auto p) {return p.second < 0;})) { for (auto d : digis) { - if (bytes_read[d->bid()] < 0) + if (words_read[d->bid()] < 0) fLog->Entry(MongoLog::Error, "Board %i has readout error in baselines", d->bid()); } - std::for_each(buffers.begin(), buffers.end(), [](auto p){delete[] p.second;}); return -2; } - if (std::any_of(bytes_read.begin(), bytes_read.end(), [=](auto p) { + if (std::any_of(words_read.begin(), words_read.end(), [=](auto p) { return (0 <= p.second) && (p.second <= 16);})) { // header-only readouts??? - for (auto& p : bytes_read) if ((0 <= p.second) && (p.second <= 16)) + for (auto& p : words_read) if ((0 <= p.second) && (p.second <= 16)) fLog->Entry(MongoLog::Local, "Board %i undersized readout (%i)", p.first, p.second); step--; steps_repeated++; - for (auto p : buffers) - if (bytes_read[p.first] > 0) delete[] p.second; continue; } // analyze for (auto d : digis) { bid = d->bid(); - idx = 0; - while (((int)idx * sizeof(u_int32_t) < bytes_read[bid])) { - if ((buffers[bid][idx]>>28) == 0xA) { - words_in_event = buffers[bid][idx]&0xFFFFFFF; - if (words_in_event == 4) { - idx += 4; + auto it = buffers[bid]->buff.begin(); + while (it < buffers[bid]->buff.end()) { + if ((*it)>>28 == 0xA) { + words = (*it)&0x7FFFFFFF; + std::u32string_view sv(buffers[bid]->buff.data() + std::distance(buffers[bid]->buff.begin(), it), words); + std::tie(words_in_event, channel_mask, std::ignore, std::ignore) = d->UnpackEventHeader(sv); + if (words == 4) { + it += 4; continue; } - channel_mask = buffers[bid][idx+1]&0xFF; - if (d->DataFormatDefinition["channel_mask_msb_idx"] != -1) { - channel_mask |= ( ((buffers[bid][idx+2]>>24)&0xFF)<<8 ); - } - if (channel_mask == 0) { // should be impossible? - idx += 4; + if (mask == 0) { // should be impossible? + it += 4; continue; } channels_in_event = std::bitset<16>(channel_mask).count(); - words_per_channel = (words_in_event - 4)/channels_in_event; - words_per_channel -= d->DataFormatDefinition["channel_header_words"]; - - idx += 4; + it += words; + sv.remove_prefix(4); for (unsigned ch = 0; ch < d->GetNumChannels(); ch++) { if (!(channel_mask & (1 << ch))) continue; - idx += d->DataFormatDefinition["channel_header_words"]; + std::u32string_view wf; + std::tie(std::ignore, words, std::ignore, wf) = d->UnpackChannelHeader(sv, + 0, 0, 0, words, channels_in_event); vector hist(0x4000, 0); - for (unsigned w = 0; w < words_per_channel; w++) { - val0 = buffers[bid][idx+w]&0xFFFF; - val1 = (buffers[bid][idx+w]>>16)&0xFFFF; + for (auto w : wf) { + val0 = w&0x3FFF; + val1 = (w>>16)&0x3FFF; if (val0*val1 == 0) continue; hist[val0 >> rebin_factor]++; hist[val1 >> rebin_factor]++; } - idx += words_per_channel; + sv.remove_prefix(words); auto max_it = std::max_element(hist.begin(), hist.end()); auto max_start = std::max(max_it - bins_around_max, hist.begin()); auto max_end = std::min(max_it + bins_around_max+1, hist.end()); @@ -688,7 +584,7 @@ int DAQController::FitBaselines(std::vector &digis, std::distance(hist.begin(), max_it)<Entry(MongoLog::Local, "Bd %i ch %i too many skipped samples", bid, ch); } @@ -702,12 +598,10 @@ int DAQController::FitBaselines(std::vector &digis, } // for each channel } else { // if header - idx++; + it++; } } // end of while in buffer } // process per digi - // cleanup buffers - for (auto p : buffers) delete[] p.second; if (redo_iter) { redo_iter = false; step--; diff --git a/DAQController.hh b/DAQController.hh index 195864be..a7022472 100644 --- a/DAQController.hh +++ b/DAQController.hh @@ -14,12 +14,6 @@ class StraxInserter; class MongoLog; class Options; class V1724; -class data_packet; - -struct processingThread{ - std::thread *pthread; - StraxInserter *inserter; -}; class DAQController{ /* @@ -28,10 +22,10 @@ class DAQController{ */ public: - DAQController(MongoLog *log=NULL, std::string hostname="DEFAULT"); + DAQController(std::shared_ptr&, std::string hostname="DEFAULT"); ~DAQController(); - int InitializeElectronics(Options *options, std::vector &keys); + int InitializeElectronics(std::shared_ptr&); int status(){return fStatus;} int GetBufferLength(); @@ -39,50 +33,42 @@ public: int Start(); int Stop(); - void ReadData(int link); void End(); - int GetData(std::list* retQ, unsigned num = 0); - int GetData(data_packet* &dp); - int GetDataSize(){int ds = fDataRate; fDataRate=0; return ds;} std::map GetDataPerChan(); - bool CheckErrors(); void CheckError(int bid) {fCheckFails[bid] = true;} - int OpenProcessingThreads(); - void CloseProcessingThreads(); long GetStraxBufferSize(); int GetBufferSize() {return fBufferSize.load();} void GetDataFormat(std::map>&); private: - - void InitLink(std::vector&, std::map>>&, int&); - int FitBaselines(std::vector&, std::map>&, int, + void ReadData(int link); + int OpenProcessingThreads(); + void CloseProcessingThreads(); + void InitLink(std::vector>&, std::map>>&, int&); + int FitBaselines(std::vector>&, std::map>&, int, std::map>>&); - std::vector fProcessingThreads; - std::map> fDigitizers; - std::list fBuffer; - std::mutex fBufferMutex; + std::vector> fFormatters; + std::vector fProcessingThreads; + std::map>> fDigitizers; std::mutex fMapMutex; - std::mutex fPTmutex; std::atomic_bool fReadLoop; std::map fRunning; int fStatus; int fNProcessingThreads; std::string fHostname; - MongoLog *fLog; - Options *fOptions; - int fMaxEventsPerThread; + std::shared_ptr fLog; + std::shared_ptr fOptions; + std::shared_ptr fTP; // For reporting to frontend std::atomic_int fBufferSize; std::atomic_int fBufferLength; std::atomic_int fDataRate; - std::map fBoardMap; std::map fCheckFails; }; diff --git a/DDC10.cc b/DDC10.cc index e37db39c..ba029472 100644 --- a/DDC10.cc +++ b/DDC10.cc @@ -41,12 +41,12 @@ int DDC10::Initialize(HEVOptions d_opts) exp_glob, "Network is unreachable", connection_failed, exp_end)) { case connection_failed: - cout << endl << "DDC10: connection failed" << endl; + std::cout << std::endl << "DDC10: connection failed" << std::endl; return 1; case prompt: break; case EXP_TIMEOUT: - cout << "DDC10: Timeout, may be invalid host" << endl; + std::cout << "DDC10: Timeout, may be invalid host" << std::endl; return 1; } @@ -58,10 +58,10 @@ int DDC10::Initialize(HEVOptions d_opts) case prompt: break; case permission_denied: - cout << endl << "DDC10: Permission denied" << endl; + std::cout << std::endl << "DDC10: Permission denied" << std::endl; return 1; case EXP_TIMEOUT: - cout << "DDC10: Timeout, may be invalid host" << endl; + std::cout << "DDC10: Timeout, may be invalid host" << std::endl; return 1; } @@ -116,22 +116,22 @@ int DDC10::Initialize(HEVOptions d_opts) exp_glob, "initialization done", prompt, // third case exp_end)) { case command_not_found: - cout << endl << "DDC10: unknown command" << endl; + std::cout << std::endl << "DDC10: unknown command" << std::endl; success = false; break; case usage: success = false; - cout << endl << "DDC10: wrong usage of \"Initialize\"" << endl; + std::cout << std::endl << "DDC10: wrong usage of \"Initialize\"" << std::endl; break; case EXP_TIMEOUT: success = false; - cout << "DDC10: Login timeout" << endl; + std::cout << "DDC10: Login timeout" << std::endl; break; case prompt: break; default: success = false; - cout << endl << "DDC10: unknown error" << endl; + std::cout << std::endl << "DDC10: unknown error" << std::endl; break; } diff --git a/DDC10.hh b/DDC10.hh index f6e8fa71..dd6b8fde 100644 --- a/DDC10.hh +++ b/DDC10.hh @@ -3,8 +3,6 @@ #include "Options.hh" -using namespace std; - class DDC10{ public: diff --git a/MongoLog.cc b/MongoLog.cc index c3dfc975..39443b23 100644 --- a/MongoLog.cc +++ b/MongoLog.cc @@ -5,9 +5,10 @@ #include #include -MongoLog::MongoLog(int DeleteAfterDays, std::string log_dir){ +MongoLog::MongoLog(int DeleteAfterDays, std::string log_dir, std::string connection_uri, + std::string db, std::string collection, std::string host){ fLogLevel = 0; - fHostname = "_host_not_set"; + fHostname = host; fDeleteAfterDays = DeleteAfterDays; fFlushPeriod = 5; // seconds fOutputDir = log_dir; @@ -16,6 +17,20 @@ MongoLog::MongoLog(int DeleteAfterDays, std::string log_dir){ fFlush = true; fFlushThread = std::thread(&MongoLog::Flusher, this); fRunId = "none"; + + try{ + mongocxx::uri uri{connection_uri}; + fMongoClient = mongocxx::client(uri); + fMongoCollection = fMongoClient[db][collection]; + } + catch(const std::exception &e){ + std::cout<<"Couldn't initialize the log. So gonna fail then."< #include -Options::Options(MongoLog *log, std::string options_name, std::string hostname, +Options::Options(std::shared_ptr& log, std::string options_name, std::string hostname, std::string suri, std::string dbname, std::string override_opts) : fLog(log), fDBname(dbname), fHostname(hostname) { bson_value = NULL; diff --git a/Options.hh b/Options.hh index d5204295..52512525 100644 --- a/Options.hh +++ b/Options.hh @@ -60,13 +60,14 @@ class MongoLog; class Options{ public: - Options(MongoLog*, std::string, std::string, std::string, std::string, std::string); + Options(std::shared_ptr&, std::string, std::string, std::string, std::string, std::string); ~Options(); int GetInt(std::string, int=-1); long int GetLongInt(std::string, long int=-1); double GetDouble(std::string, double=-1); std::string GetString(std::string, std::string=""); + std::string Hostname() {return fHostname;} std::vector GetBoards(std::string); std::vector GetRegisters(int, bool=false); diff --git a/StraxFormatter.cc b/StraxFormatter.cc new file mode 100644 index 00000000..5b8f4c83 --- /dev/null +++ b/StraxFormatter.cc @@ -0,0 +1,482 @@ +#include "StraxFormatter.hh" +#include +#include "DAQController.hh" +#include "MongoLog.hh" +#include "Options.hh" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace fs=std::experimental::filesystem; +using namespace std::chrono; +const int event_header_words = 4, max_channels = 16; + +double timespec_subtract(struct timespec& a, struct timespec& b) { + return (a.tv_sec - b.tv_sec)*1e6 + (a.tv_nsec - b.tv_nsec)/1e3; +} + +StraxFormatter::StraxFormatter(std::shared_ptr& opts, std::shared_ptr& log){ + fActive = true; + fChunkNameLength=6; + fStraxHeaderSize=24; + fBytesProcessed = 0; + fBufferSize = 0; + fProcTimeDP = fProcTimeEv = fProcTimeCh = fCompTime = 0.; + fOptions = options; + fChunkLength = long(fOptions->GetDouble("strax_chunk_length", 5)*1e9); // default 5s + fChunkOverlap = long(fOptions->GetDouble("strax_chunk_overlap", 0.5)*1e9); // default 0.5s + fFragmentBytes = fOptions->GetInt("strax_fragment_payload_bytes", 110*2); + fCompressor = fOptions->GetString("compressor", "lz4"); + fFullChunkLength = fChunkLength+fChunkOverlap; + fHostname = fOptions->Hostname(); + std::string run_name = fOptions->GetString("run_identifier", "run"); + + fEmptyVerified = 0; + fLog = log; + + fBufferNumChunks = fOptions->GetInt("strax_buffer_num_chunks", 2); + fWarnIfChunkOlderThan = fOptions->GetInt("strax_chunk_phase_limit", 2); + + std::string output_path = fOptions->GetString("strax_output_path", "./"); + try{ + fs::path op(output_path); + op /= run_name; + fOutputPath = op; + fs::create_directory(op); + } + catch(...){ + fLog->Entry(MongoLog::Error, "StraxFormatter::Initialize tried to create output directory but failed. Check that you have permission to write here."); + throw std::runtime_error("No write permissions"); + } +} + +StraxFormatter::~StraxFormatter(){ + fActive = false; + int counter_short = 0, counter_long = 0; + if (fBufferLength.load() > 0) + fLog->Entry(MongoLog::Local, "Thread %lx waiting to stop, has %i events left", + fThreadId, fBufferLength.load()); + else + fLog->Entry(MongoLog::Local, "Thread %lx stopping", fThreadId); + int events_start = fBufferLength.load(); + do{ + events_start = fBufferLength.load(); + while (fRunning && counter_short++ < 500) + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + if (counter_short >= 500) + fLog->Entry(MongoLog::Message, "Thread %lx taking a while to stop, still has %i evts", + fThreadId, fBufferLength.load()); + counter_short = 0; + } while (fRunning && fBufferLength.load() > 0 && events_start > fBufferLength.load() && counter_long++ < 10); + if (fRunning) { + fLog->Entry(MongoLog::Warning, "Force-quitting thread %lx: %i events lost", + fThreadId, fBufferLength.load()); + fForceQuit = true; + std::this_thread::sleep_for(std::chrono::seconds(2)); + } + while (fRunning) { + fLog->Entry(MongoLog::Message, "Still waiting for thread %lx to stop", fThreadId); + std::this_thread::sleep_for(std::chrono::seconds(2)); + } + std::stringstream ss; + ss << std::hex << fThreadId; + std::map times { + {"data_packets_us", fProcTimeDP}, + {"events_us", fProcTimeEv}, + {"fragments_us", fProcTimeCh}, + {"compression_us", fCompTime} + }; + std::map> counters { + {"fragments", fFragsPerEvent}, + {"events", fEvPerDP}, + {"data_packets", fBufferCounter}, + {"chunks", fBytesPerChunk} + }; + fOptions->SaveBenchmarks(counters, fBytesProcessed, ss.str(), times); +} + +void StraxFormatter::Close(std::map& ret){ + fActive = false; + const std::lock_guard lg(fFC_mutex); + for (auto& iter : fFailCounter) ret[iter.first] += iter.second; +} + +void StraxFormatter::GetDataPerChan(std::map& ret) { + if (!fActive) return; + const std::lock_guard lk(fDPC_mutex); + for (auto& pair : fDataPerChan) { + ret[pair.first] += pair.second; + pair.second = 0; + } + return; +} + +void StraxFormatter::GenerateArtificialDeadtime(int64_t timestamp, const std:::unique_ptr& digi) { + std::string fragment; + fragment.reserve(fFragmentBytes + fStraxHeaderSize); + timestamp *= digi->GetClockWidth(); + fragment.append((char*)×tamp, sizeof(timestamp)); + int32_t length = fFragmentBytes>>1; + fragment.append((char*)&length, sizeof(length)); + int16_t sw = 10; + fragment.append((char*)&sw, sizeof(sw)); + int16_t channel = 790; // TODO add MV and NV support + fragment.append((char*)&channel, sizeof(channel)); + fragment.append((char*)&length, sizeof(length)); + int16_t fragment_i = 0; + fragment.append((char*)&fragment_i, sizeof(fragment_i)); + int16_t baseline = 0; + fragment.append((char*)&baseline, sizeof(baseline)); + fragment.append((char*)&bid, sizeof(bid)); + int8_t zero = 0; + while ((int)fragment.size() < fFragmentBytes+fStraxHeaderSize) + fragment.append((char*)&zero, sizeof(zero)); + AddFragmentToBuffer(std::move(fragment), 0, 0); +} + +void StraxFormatter::ProcessDatapacket(std::unique_ptr dp){ + // Take a buffer and break it up into one document per channel + struct timespec dp_start, dp_end, ev_start, ev_end; + auto it = dp->buff.begin(); + int evs_this_dp(0), words(0); + bool missed = false; + std::map dpc; + clock_gettime(CLOCK_THREAD_CPUTIME_ID, &dp_start); + do { + if((*it)>>28 == 0xA){ + missed = true; // it works out + clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_start); + words = (*it)&0x7FFFFFFF; + std::u32string_view sv(dp->buff.data() + std::distance(dp->buff.begin(), it), words); + ProcessEvent(sv, dp, dpc); + clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_end); + fProcTimeEv += timespec_subtract(ev_end, ev_start); + evs_this_dp++; + it += words; + } else { + if (missed) { + fLog->Entry(MongoLog::Warning, "Missed an event from %i at idx %i", + dp->digi->bid, std::distance(dp->buff.begin(), it)); + missed = false; + } + it++; + } + } while (it < dp->buff.end() && fActive == true); + clock_gettime(CLOCK_THREAD_CPUTIME_ID, &dp_end); + fProcTimeDP += timespec_subtract(dp_end, dp_start); + fBytesProcessed += dp->buff.size()*sizeof(char32_t); + fEvPerDP[evs_this_dp]++; + { + const std::lock_guard lk(fDPC_mutex); + for (auto& p : dpc) fDataPerChan[p.first] += p.second; + } +} + +int StraxFormatter::ProcessEvent(std::u32string_view buff, + const std::unique_ptr& dp, std::map& dpc) { + // buff = start of event + + struct timespec ch_start, ch_end; + + // returns {words this event, channel mask, board fail, header timestamp} + auto [words, channel_mask, fail, event_time] = dp->digi->UnpackEventHeader(buff); + + if(fail){ // board fail + GenerateArtificialDeadtime(((clock_counter<<31) + header_time), dp->digi); + dp->digi->CheckFail(true); + fFailCounter[dp->digi->bid()]++; + return event_header_words; + } + + buff.remove_prefix(event_header_words); + int ret; + int frags(0); + + for(unsigned ch=0; ch& dp, std::map& dpc) { + // buff points to the first word of the channel's data + + int n_channels = std::bitset(channel_mask).count(); + // returns {timestamp (ns), words this channel, baseline, waveform} + auto [timestamp, channel_words, baseline_ch, wf] = dp->digi->UnpackChannelHeader( + buff, dp->clock_counter, dp->header_time, event_time, words_in_event, n_channels); + + uint32_t samples_in_pulse = wf.size()*sizeof(uint16_t)/sizeof(char32_t); + uint16_t sw = dp->digi->SampleWidth(); + int samples_per_fragment = fFragmentBytes>>1; + int16_t global_ch = fOptions->GetChannel(dp->digi->bid(), channel); + // Failing to discern which channel we're getting data from seems serious enough to throw + if(global_ch==-1) + throw std::runtime_error("Failed to parse channel map. I'm gonna just kms now."); + + int num_frags = std::ceil(1.*samples_in_pulse/samples_per_fragment); + frags += num_frags; + for (uint16_t frag_i = 0; frag_i < num_frags; frag_i++) { + std::string fragment; + fragment.reserve(fFragmentBytes + fStraxHeaderSize); + + // How long is this fragment? + uint32_t samples_this_frag = samples_per_fragment; + if (frag_i == num_frags-1) + samples_this_frag = samples_in_pulse - frag_i*samples_per_fragment; + + int64_t time_this_frag = timestamp + samples_per_frag*sw*frag_i; + fragment.append((char*)&time_this_frag, sizeof(time_this_frag)); + fragment.append((char*)&samples_this_frag, sizeof(samples_this_frag)); + fragment.append((char*)&sw, sizeof(sw)); + fragment.append((char*)&cl, sizeof(cl)); + fragment.append((char*)&samples_in_pulse, sizeof(samples_in_pulse)); + fragment.append((char*)&frag_i, sizeof(frag_i)); + fragment.append((char*)&baseline_ch, sizeof(baseline_ch)); + + // Copy the raw buffer + fragment.append((char*)wf.data(), samples_this_frag*sizeof(uint16_t)); + wf.remove_prefix(samples_this_frag*sizeof(uint16_t)/sizeof(char32_t)); + uint16_t zero_filler = 0; + while((int)fragment.size() 0) { + auto [min_iter, max_iter] = std::minmax_element(fChunks.begin(), fChunks.end(), + [&](auto& l, auto& r) {return l.first < r.first;}); + min_chunk = (*min_iter).first; + max_chunk = (*max_iter).first; + } + + if (min_chunk - chunk_id > fWarnIfChunkOlderThan) { + const short* channel = (const short*)(fragment.data()+14); + fLog->Entry(MongoLog::Warning, + "Thread %lx got data from ch %i that's in chunk %i instead of %i/%i (ts %lx), it might get lost (ts %lx ro %i)", + fThreadId, *channel, chunk_id, min_chunk, max_chunk, timestamp, ts, rollovers); + } else if (chunk_id - max_chunk > 1) { + fLog->Entry(MongoLog::Message, "Thread %lx skipped %i chunk(s)", + fThreadId, chunk_id - max_chunk - 1); + } + + fFragmentSize += fragment.size(); + + if(!overlap){ + fChunks[chunk_id].emplace_back(std::move(fragment)); + } else { + fOverlaps[chunk_id].emplace_back(std::move(fragment)); + } +} + +void StraxFormatter::ReceiveDatapackets(std::list>& in) { + { + const std::lock_guard lk(fBufferMutex); + fBufferCounter[in.size()]++; + fBuffer.splice(fBuffer.end(), in); + } + fCV.notify_one(); +} + +void StraxFormatter::Process() { + // this func runs in its own thread + fThreadID = std::this_thread::get_id(); + std::stringstream ss; + ss< dp; + while (fActive == true) { + std::unique_lock lk(fBufferMutex); + fCV.wait(lk, [&]{return fBuffer.size() > 0 || fActive == false;}); + if (fBuffer.size() > 0) { + dp = std::move(fBuffer.front()); + fBuffer.pop_front(); + lk.unlock(); + ProcessDatapacket(std::move(dp)); + WriteOutChunks(); + } else { + lk.unlock(); + } + } + if (fBytesProcessed > 0) + End(); + fRunning = false; +} + +// Can tune here as needed, these are defaults from the LZ4 examples +static const LZ4F_preferences_t kPrefs = { + { LZ4F_max256KB, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0, { 0, 0 } }, + 0, /* compression level; 0 == default */ + 0, /* autoflush */ + { 0, 0, 0 }, /* reserved, must be set to 0 */ +}; + +void StraxFormatter::WriteOutChunk(int chunk_i){ + // Write the contents of the buffers to compressed files + struct timespec comp_start, comp_end; + clock_gettime(CLOCK_THREAD_CPUTIME_ID, &comp_start); + + std::vector*> buffers = {&fChunks[chunk_i], &fOverlaps[chunk_i]}; + std::vector uncompressed_size(3, 0); + std::string uncompressed; + std::vector> out_buffer(3); + std::vector wsize(3); + size_t max_compressed_size = 0; + + for (int i = 0; i < 2; i++) { + uncompressed_size[i] = buffers[i]->size()*(fFragmentBytes + fStraxHeaderSize); + uncompressed.reserve(uncompressed_size[i]); + for (auto it = buffers[i]->begin(); it != buffers[i]->end(); it++) + uncompressed += *it; + buffers[i]->clear(); + if(fCompressor == "blosc"){ + max_compressed_size = uncompressed_size + BLOSC_MAX_OVERHEAD; + out_buffer[i] = std::make_shared(max_compressed_size, 0); + wsize[i] = blosc_compress_ctx(5, 1, sizeof(char), uncompressed_size[i], + uncompressed[i].data(), out_buffer[i]->data(), max_compressed_size,"lz4", 0, 2); + }else{ + // Note: the current package repo version for Ubuntu 18.04 (Oct 2019) is 1.7.1, which is + // so old it is not tracked on the lz4 github. The API for frame compression has changed + // just slightly in the meantime. So if you update and it breaks you'll have to tune at least + // the LZ4F_preferences_t object to the new format. + max_compressed_size = LZ4F_compressFrameBound(uncompressed_size[i], &kPrefs); + out_buffer[i] = std::make_shared(max_compressed_size, 0); + wsize[i] = LZ4F_compressFrame(out_buffer[i]->data(), max_compressed_size, + uncompressed[i].data(), uncompressed_size[i], &kPrefs); + } + uncompressed.clear(); + fBytesPerChunk[int(std::log2(uncompressed_size[i]))]++; + } + fChunks.erase(chunk_i); + fOverlaps.erase(chunk_i); + + out_buffer[2] = out_buffer[1]; + wsize[2] = wsize[1]; + uncompressed_size[2] = uncompressed_size[1]; + std::vector names {{GetStringFormat(chunk_i), + GetStringFormat(chunk_i)+"_post", GetStringFormat(chunk_i+1)+"_pre"}}; + for (int i = 0; i < 3; i++) { + // write to *_TEMP + auto output_dir_temp = GetDirectoryPath(names[i], true); + auto filename_temp = GetFilePath(names[i], true); + if (!fs::exists(output_dir_temp)) + fs::create_directory(output_dir_temp); + std::ofstream writefile(filename_temp, std::ios::binary); + if (uncompressed_size[i] > 0) writefile.write(out_buffer[i]>data(), wsize[i]); + writefile.close(); + out_buffer[i].reset(); + + auto output_dir = GetDirectoryPath(names[i]); + auto filename = GetFilePath(names[i]); + // shenanigans or skulduggery? + if(fs::exists(filename)) { + fLog->Entry(MongoLog::Warning, "Chunk %s from thread %lx already exists? %li vs %li bytes", + names[i].c_str(), fThreadId, fs::file_size(filename), wsize[i]); + } + + // Move this chunk from *_TEMP to the same path without TEMP + if(!fs::exists(output_dir)) + fs::create_directory(output_dir); + fs::rename(filename_temp, filename); + } // End writing + clock_gettime(CLOCK_THREAD_CPUTIME_ID, &comp_end); + fCompTime += timespec_subtract(comp_end, comp_start); + return; +} + +void StraxFormatter::WriteOutChunks() { + if (fChunks.size() < fBufferNumChunks) return; + auto [min_iter, max_iter] = std::minmax_element(fChunks.begin(), fChunks.end(), + [&](auto& a, auto& b){return a.first < b.first;}); + int max_chunk = (*max_iter).first; + int min_chunk = (*min_iter).first; + for (; min_chunk <= max_chunk - fBufferNumChunks; min_chunk++) + WriteOutChunk(min_chunk); + CreateEmpty(min_chunk); + return; +} + +void StraxFormatter::End() { + for (auto& p : fChunks) + WriteOutChunk(p.first); + fChunks.clear(); + fFragmentSize = 0; + auto end_dir = GetDirectoryPath("THE_END"); + if(!fs::exists(end_dir)){ + fLog->Entry(MongoLog::Local,"Creating END directory at %s", end_dir.c_str()); + try{ + fs::create_directory(end_dir); + } + catch(...){}; + } + std::ofstream outfile(GetFilePath("THE_END"), std::ios::out); + outfile<<"...my only friend"; + outfile.close(); + return; +} + +std::string StraxFormatter::GetStringFormat(int id){ + std::string chunk_index = std::to_string(id); + while(chunk_index.size() < fChunkNameLength) + chunk_index.insert(0, "0"); + return chunk_index; +} + +fs::path StraxFormatter::GetDirectoryPath(const std::string& id, bool temp){ + fs::path write_path(fOutputPath); + write_path /= id; + if(temp) + write_path+="_temp"; + return write_path; +} + +fs::path StraxFormatter::GetFilePath(const std::string& id, bool temp){ + return GetDirectoryPath(id, temp) / fFullHostname; +} + +void StraxFormatter::CreateEmpty(int back_from){ + for(; fEmptyVerified names {{GetStringFormat(fEmptyVerified), + GetStringFormat(fEmptyVerified)+"_post", GetStringFormat(fEmptyVerified+1)+"_pre"}}; + for (auto& n : names) { + if(!fs::exists(GetFilePath(n))){ + if(!fs::exists(GetDirectoryPath(n))) + fs::create_directory(GetDirectoryPath(n)); + std::ofstream o(GetFilePath(n)); + o.close(); + } + } // name + } // chunks +} + diff --git a/StraxFormatter.hh b/StraxFormatter.hh new file mode 100644 index 00000000..0519b861 --- /dev/null +++ b/StraxFormatter.hh @@ -0,0 +1,113 @@ +#ifndef _STRAXINSERTER_HH_ +#define _STRAXINSERTER_HH_ + +#include +#include +#include + +//for debugging +//#include +#include +#include +#include +#include +#include +#include +#include + +class Options; +class MongoLog; +class ThreadPool; + +struct data_packet{ + data_packet() : clock_counter(0), header_time(0) {} + data_packet(std::u32string s, uint32_t cc, uint32_t ht) : + buff(std::move(s)), clock_counter(cc), header_time(ht) {} + data_packet(const data_packet& rhs)=delete; + data_packet(data_packet&& rhs) : buff(std::move(rhs.buff)), + clock_counter(rhs.clock_counter), header_time(rhs.header_time), digi(rhs.digi) {} + ~data_packet() {buff.clear(); digi.reset();} + + data_packet& operator=(const data_packet& rhs)=delete; + data_packet& operator=(data_packet&& rhs) { + buff=std::move(rhs.buff); + clock_counter=rhs.clock_counter; + header_time=rhs.header_time; + digi=rhs.digi; + return *this; + } + + std::u32string buff; + uint32_t clock_counter; + uint32_t header_time; + std::shared_ptr digi; +}; + +class StraxFormatter{ + /* + Reformats raw data into strax format + */ + +public: + StraxFormatter(std::shared_ptr&, std::shared_ptr&); + ~StraxFormatter(); + + void Close(std::map& ret); + + void Process(); + long GetBufferSize() {return fFragmentSize.load();} + void GetDataPerChan(std::map& ret); + void CheckError(int bid); + int GetBufferLength() {return fBufferLength.load();} + void ReceiveDatapackets(std::list>&); + +private: + void ProcessDatapacket(std::unique_ptr dp); + int ProcessEvent(std::u32string_view, const std::unique_ptr&, + std::map&); + int ProcessChannel(std::u32string_view, int, uint32_t, int&, unsigned, + const std::unique_ptr&, std::map&); + void WriteOutChunk(int); + void WriteOutChunks(); + void End(); + void GenerateArtificialDeadtime(int64_t, const std::unique_ptr&); + void AddFragmentToBuffer(std::string&, uint32_t, int); + + std::experimental::filesystem::path GetFilePath(const std::string&, bool=false); + std::experimental::filesystem::path GetDirectoryPath(const std::string&, bool=false); + std::string GetStringFormat(int id); + void CreateEmpty(int); + int fEmptyVerified; + + int64_t fChunkLength; // ns + int64_t fChunkOverlap; // ns + int fFragmentBytes; + int fStraxHeaderSize; // bytes + int fBufferNumChunks; + int fWarnIfChunkOlderThan; + unsigned fChunkNameLength; + int64_t fFullChunkLength; + std::string fOutputPath, fHostname, fFullHostname; + std::shared_ptr fOptions; + std::shared_ptr fLog; + std::atomic_bool fActive, fRunning; + std::string fCompressor; + std::map> fChunks, fOverlaps; + std::map fFailCounter; + std::map fDataPerChan; + std::mutex fDPC_mutex; + std::map fBufferCounter; + std::map fFragsPerEvent; + std::map fEvPerDP; + std::map fBytesPerChunk; + std::atomic_int fInputBufferSize, fOutputBufferSize; + long fBytesProcessed; + + double fProcTimeDP, fProcTimeEv, fProcTimeCh, fCompTime; + std::thread::id fThreadId; + std::condition_variable fCV; + std::mutex fBufferMutex; + std::list fBuffer; +}; + +#endif diff --git a/StraxInserter.cc b/StraxInserter.cc deleted file mode 100644 index 7ebf50e8..00000000 --- a/StraxInserter.cc +++ /dev/null @@ -1,610 +0,0 @@ -#include "StraxInserter.hh" -#include -#include "DAQController.hh" -#include "MongoLog.hh" -#include "Options.hh" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace fs=std::experimental::filesystem; -using namespace std::chrono; -const int event_header_words = 4, max_channels = 16; - -double timespec_subtract(struct timespec& a, struct timespec& b) { - return (a.tv_sec - b.tv_sec)*1e6 + (a.tv_nsec - b.tv_nsec)/1e3; -} - -StraxInserter::StraxInserter(){ - fOptions = NULL; - fDataSource = NULL; - fActive = true; - fChunkLength=0x7fffffff; // DAQ magic number - fChunkNameLength=6; - fChunkOverlap = 0x2FAF080; - fStraxHeaderSize=24; - fFragmentBytes=110*2; - fLog = NULL; - fErrorBit = false; - fMissingVerified = 0; - fOutputPath = ""; - fThreadId = std::this_thread::get_id(); - fBytesProcessed = 0; - fFragmentSize = 0; - fForceQuit = false; - fFullChunkLength = fChunkLength+fChunkOverlap; - fProcTimeDP = fProcTimeEv = fProcTimeCh = fCompTime = 0.; -} - -StraxInserter::~StraxInserter(){ - fActive = false; - int counter_short = 0, counter_long = 0; - if (fBufferLength.load() > 0) - fLog->Entry(MongoLog::Local, "Thread %lx waiting to stop, has %i events left", - fThreadId, fBufferLength.load()); - else - fLog->Entry(MongoLog::Local, "Thread %lx stopping", fThreadId); - int events_start = fBufferLength.load(); - do{ - events_start = fBufferLength.load(); - while (fRunning && counter_short++ < 500) - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - if (counter_short >= 500) - fLog->Entry(MongoLog::Message, "Thread %lx taking a while to stop, still has %i evts", - fThreadId, fBufferLength.load()); - counter_short = 0; - } while (fRunning && fBufferLength.load() > 0 && events_start > fBufferLength.load() && counter_long++ < 10); - if (fRunning) { - fLog->Entry(MongoLog::Warning, "Force-quitting thread %lx: %i events lost", - fThreadId, fBufferLength.load()); - fForceQuit = true; - std::this_thread::sleep_for(std::chrono::seconds(2)); - } - while (fRunning) { - fLog->Entry(MongoLog::Message, "Still waiting for thread %lx to stop", fThreadId); - std::this_thread::sleep_for(std::chrono::seconds(2)); - } - std::stringstream ss; - ss << std::hex << fThreadId; - std::map times { - {"data_packets_us", fProcTimeDP}, - {"events_us", fProcTimeEv}, - {"fragments_us", fProcTimeCh}, - {"compression_us", fCompTime} - }; - std::map> counters { - {"fragments", fFragsPerEvent}, - {"events", fEvPerDP}, - {"data_packets", fBufferCounter} - }; - fOptions->SaveBenchmarks(counters, fBytesProcessed, ss.str(), times); -} - -int StraxInserter::Initialize(Options *options, MongoLog *log, DAQController *dataSource, - std::string hostname){ - fOptions = options; - fChunkLength = long(fOptions->GetDouble("strax_chunk_length", 5)*1e9); // default 5s - fChunkOverlap = long(fOptions->GetDouble("strax_chunk_overlap", 0.5)*1e9); // default 0.5s - fFragmentBytes = fOptions->GetInt("strax_fragment_payload_bytes", 110*2); - fCompressor = fOptions->GetString("compressor", "lz4"); - fFullChunkLength = fChunkLength+fChunkOverlap; - fHostname = hostname; - std::string run_name = fOptions->GetString("run_identifier", "run"); - - fMissingVerified = 0; - fDataSource = dataSource; - dataSource->GetDataFormat(fFmt); - fLog = log; - fErrorBit = false; - - fBufferNumChunks = fOptions->GetInt("strax_buffer_num_chunks", 2); - fWarnIfChunkOlderThan = fOptions->GetInt("strax_chunk_phase_limit", 2); - - std::string output_path = fOptions->GetString("strax_output_path", "./"); - try{ - fs::path op(output_path); - op /= run_name; - fOutputPath = op; - fs::create_directory(op); - } - catch(...){ - fLog->Entry(MongoLog::Error, "StraxInserter::Initialize tried to create output directory but failed. Check that you have permission to write here."); - return -1; - } - - return 0; -} - -void StraxInserter::Close(std::map& ret){ - fActive = false; - const std::lock_guard lg(fFC_mutex); - for (auto& iter : fFailCounter) ret[iter.first] += iter.second; -} - -void StraxInserter::GetDataPerChan(std::map& ret) { - if (!fActive) return; - fDPC_mutex.lock(); - for (auto& pair : fDataPerChan) { - ret[pair.first] += pair.second; - pair.second = 0; - } - fDPC_mutex.unlock(); - return; -} - -void StraxInserter::GenerateArtificialDeadtime(int64_t timestamp, int16_t bid, uint32_t et, int ro) { - std::string fragment; - fragment.append((char*)×tamp, sizeof(timestamp)); - int32_t length = fFragmentBytes>>1; - fragment.append((char*)&length, sizeof(length)); - int16_t sw = 10; - fragment.append((char*)&sw, sizeof(sw)); - int16_t channel = 790; // TODO add MV and NV support - fragment.append((char*)&channel, sizeof(channel)); - fragment.append((char*)&length, sizeof(length)); - int16_t fragment_i = 0; - fragment.append((char*)&fragment_i, sizeof(fragment_i)); - int16_t baseline = 0; - fragment.append((char*)&baseline, sizeof(baseline)); - fragment.append((char*)&bid, sizeof(bid)); - int8_t zero = 0; - while ((int)fragment.size() < fFragmentBytes+fStraxHeaderSize) - fragment.append((char*)&zero, sizeof(zero)); - AddFragmentToBuffer(fragment, timestamp, et, ro); -} - -void StraxInserter::ProcessDatapacket(data_packet* dp){ - - struct timespec dp_start, dp_end, ev_start, ev_end; - - // Take a buffer and break it up into one document per channel - - uint32_t *buff = dp->buff; - uint32_t idx = 0; - unsigned total_words = dp->size/sizeof(uint32_t); - int evs_this_dp(0); - clock_gettime(CLOCK_THREAD_CPUTIME_ID, &dp_start); - while(idx < total_words && fForceQuit == false){ - - if(buff[idx]>>28 == 0xA){ // 0xA indicates header at those bits - clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_start); - idx += ProcessEvent(buff+idx, total_words-idx, dp->clock_counter, dp->header_time, dp->bid); - clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_end); - fProcTimeEv += timespec_subtract(ev_end, ev_start); - evs_this_dp++; - } else - idx++; - } - clock_gettime(CLOCK_THREAD_CPUTIME_ID, &dp_end); - fProcTimeDP += timespec_subtract(dp_end, dp_start); - fBytesProcessed += dp->size; - fEvPerDP[evs_this_dp]++; - delete dp; -} - -uint32_t StraxInserter::ProcessEvent(uint32_t* buff, unsigned total_words, long clock_counter, - uint32_t header_time, int bid) { - // buff = start of event, total_words = valid words remaining in total buffer - - struct timespec ch_start, ch_end; - std::map fmt = fFmt[bid]; - - uint32_t words_in_event = std::min(buff[0]&0xFFFFFFF, total_words); - if (words_in_event < (buff[0]&0xFFFFFFF)) { - fLog->Entry(MongoLog::Local, "Board %i garbled event header: %x/%x", - bid, buff[0]&0xFFFFFFF, total_words); - } - - uint32_t channel_mask = (buff[1]&0xFF); - if (fmt["channel_mask_msb_idx"] != -1) channel_mask |= ( ((buff[2]>>24)&0xFF)<<8); - - uint32_t event_time = buff[3]&0x7FFFFFFF; - - if(buff[1]&0x4000000){ // board fail - const std::lock_guard lg(fFC_mutex); - GenerateArtificialDeadtime(((clock_counter<<31) + header_time)*fmt["ns_per_clk"], bid, - event_time, clock_counter); - fDataSource->CheckError(bid); - fFailCounter[bid]++; - return event_header_words; - } - - unsigned idx = event_header_words; - int ret; - int frags(0); - - for(unsigned ch=0; ch(channel_mask).count(); - uint32_t channel_words = (words_in_event-event_header_words) / channels_in_event; - long channel_time = event_time; - long channel_timeMSB = clock_counter<<31; - uint16_t baseline_ch = 0; - std::map fmt = fFmt[bid]; - - // Presence of a channel header indicates non-default firmware (DPP-DAW) so override - if(fmt["channel_header_words"] > 0){ - channel_words = std::min(buff[0]&0x7FFFFF, words_in_event); - if (channel_words < (buff[0]&0x7FFFFF)) { - fLog->Entry(MongoLog::Local, "Board %i ch %i garbled header: %x/%x", - bid, channel, buff[0]&0x7FFFFF, words_in_event); - return -1; - } - if ((int)channel_words <= fmt["channel_header_words"]) { - fLog->Entry(MongoLog::Local, "Board %i ch %i empty (%i/%i)", - bid, channel, channel_words, fmt["channel_header_words"]); - return -1; - } - channel_time = buff[1] & (fmt["channel_time_msb_idx"] == -1 ? 0x7FFFFFFF : 0xFFFFFFFF); - - if (fmt["channel_time_msb_idx"] == 2) { - channel_time = buff[1]; - channel_timeMSB = long(buff[2]&0xFFFF)<<32; - baseline_ch = (buff[2]>>16)&0x3FFF; - } - - if(fmt["channel_header_words"] <= 2){ - // More clock rollover logic here, because channels are independent - // and we process multithreaded. We leverage the fact that readout windows are short - // and polled frequently compared to the clock rollover timescale, so there will - // never be a "large" difference in realtime between timestamps in a data_packet - - // first, has the main counter rolled but this channel hasn't? - if(channel_time>15e8 && header_time<5e8 && clock_counter!=0){ - clock_counter--; - } - // Now check the opposite - else if(channel_time<5e8 && header_time>15e8){ - clock_counter++; - } - channel_timeMSB = clock_counter<<31; - } - } // channel_header_words > 0 - - int64_t Time64 = fmt["ns_per_clk"]*(channel_timeMSB + channel_time); // in ns - - // let's sanity-check the data first to make sure we didn't get CAENed - for (unsigned w = fmt["channel_header_words"]; w < channel_words; w++) { - if ((buff[w]>>28) == 0xA) { - fLog->Entry(MongoLog::Local, "Board %i has CAEN'd itself (%lx)", bid, Time64); - GenerateArtificialDeadtime(Time64, bid, event_time, clock_counter); - return -1; - } - } - - uint16_t *payload = reinterpret_cast(buff+fmt["channel_header_words"]); - uint32_t samples_in_pulse = (channel_words-fmt["channel_header_words"])<<1; - uint16_t sw = fmt["ns_per_sample"]; - int samples_per_fragment = fFragmentBytes>>1; - int16_t cl = fOptions->GetChannel(bid, channel); - // Failing to discern which channel we're getting data from seems serious enough to throw - if(cl==-1) - throw std::runtime_error("Failed to parse channel map. I'm gonna just kms now."); - - int num_frags = std::ceil(1.*samples_in_pulse/samples_per_fragment); - frags += num_frags; - for (uint16_t frag_i = 0; frag_i < num_frags; frag_i++) { - std::string fragment; - fragment.reserve(fFragmentBytes + fStraxHeaderSize); - - // How long is this fragment? - uint32_t samples_this_fragment = samples_per_fragment; - if (frag_i == num_frags-1) - samples_this_fragment = samples_in_pulse - frag_i*samples_per_fragment; - - int64_t time_this_fragment = Time64 + samples_per_fragment*sw*frag_i; - fragment.append((char*)&time_this_fragment, sizeof(time_this_fragment)); - fragment.append((char*)&samples_this_fragment, sizeof(samples_this_fragment)); - fragment.append((char*)&sw, sizeof(sw)); - fragment.append((char*)&cl, sizeof(cl)); - fragment.append((char*)&samples_in_pulse, sizeof(samples_in_pulse)); - fragment.append((char*)&frag_i, sizeof(frag_i)); - fragment.append((char*)&baseline_ch, sizeof(baseline_ch)); - - // Copy the raw buffer - fragment.append((char*)(payload + frag_i*samples_per_fragment), samples_this_fragment*2); - uint16_t zero_filler = 0; - while((int)fragment.size() lg(fDPC_mutex); - fDataPerChan[cl] += samples_in_pulse<<1; - } - return channel_words; -} - -void StraxInserter::AddFragmentToBuffer(std::string& fragment, int64_t timestamp, uint32_t ts, int rollovers) { - // Get the CHUNK and decide if this event also goes into a PRE/POST file - int chunk_id = timestamp/fFullChunkLength; - bool nextpre = (chunk_id+1)* fFullChunkLength - timestamp <= fChunkOverlap; - // Minor mess to maintain the same width of file names and do the pre/post stuff - // If not in pre/post - std::string chunk_index = GetStringFormat(chunk_id); - int min_chunk(0), max_chunk(1); - if (fFragments.size() > 0) { - const auto [min_chunk_, max_chunk_] = std::minmax_element(fFragments.begin(), fFragments.end(), - [&](auto& l, auto& r) {return std::stoi(l.first) < std::stoi(r.first);}); - min_chunk = std::stoi((*min_chunk_).first); - max_chunk = std::stoi((*max_chunk_).first); - } - - if (min_chunk - chunk_id > fWarnIfChunkOlderThan) { - const short* channel = (const short*)(fragment.data()+14); - fLog->Entry(MongoLog::Warning, - "Thread %lx got data from ch %i that's in chunk %i instead of %i/%i (ts %lx), it might get lost (ts %lx ro %i)", - fThreadId, *channel, chunk_id, min_chunk, max_chunk, timestamp, ts, rollovers); - } else if (chunk_id - max_chunk > 2) { - fLog->Entry(MongoLog::Message, "Thread %lx skipped %i chunk(s)", - fThreadId, chunk_id - max_chunk - 1); - } - - fFragmentSize += fragment.size(); - - if(!nextpre){ - if(fFragments.count(chunk_index) == 0){ - fFragments[chunk_index] = new std::string(); - } - fFragments[chunk_index]->append(fragment); - } else { - std::string nextchunk_index = GetStringFormat(chunk_id+1); - - if(fFragments.count(nextchunk_index+"_pre") == 0){ - fFragments[nextchunk_index+"_pre"] = new std::string(); - } - fFragments[nextchunk_index+"_pre"]->append(fragment); - - if(fFragments.count(chunk_index+"_post") == 0){ - fFragments[chunk_index+"_post"] = new std::string(); - } - fFragments[chunk_index+"_post"]->append(fragment); - } -} - -int StraxInserter::ReadAndInsertData(){ - fThreadId = std::this_thread::get_id(); - fActive = fRunning = true; - fBufferLength = 0; - std::chrono::microseconds sleep_time(10); - if (fOptions->GetString("buffer_type", "dual") == "dual") { - while(fActive == true){ - std::list b; - if (fDataSource->GetData(&b)) { - fBufferLength = b.size(); - fBufferCounter[int(b.size())]++; - for (auto& dp_ : b) { - ProcessDatapacket(dp_); - fBufferLength--; - dp_ = nullptr; - if (fForceQuit) break; - } - if (fForceQuit) for (auto& dp_ : b) if (dp_ != nullptr) delete dp_; - b.clear(); - WriteOutFiles(); - } else { - fBufferCounter[0]++; - std::this_thread::sleep_for(sleep_time); - } - } - } else { - data_packet* dp; - while (fActive == true) { - if (fDataSource->GetData(dp)) { - fBufferLength = 1; - fBufferCounter[1]++; - ProcessDatapacket(dp); - fBufferLength = 0; - WriteOutFiles(); - } else { - std::this_thread::sleep_for(sleep_time); - } - } - } - if (fBytesProcessed > 0) - WriteOutFiles(true); - fRunning = false; - return 0; -} - -// Can tune here as needed, these are defaults from the LZ4 examples -static const LZ4F_preferences_t kPrefs = { - { LZ4F_max256KB, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0, { 0, 0 } }, - 0, /* compression level; 0 == default */ - 0, /* autoflush */ - { 0, 0, 0 }, /* reserved, must be set to 0 */ -}; - -void StraxInserter::WriteOutFiles(bool end){ - // Write the contents of fFragments to compressed files - struct timespec comp_start, comp_end; - std::vector idx_to_clear; - int max_chunk = -1; - for (auto& iter : fFragments) max_chunk = std::max(max_chunk, std::stoi(iter.first)); - int write_lte = max_chunk - fBufferNumChunks; - for (auto& iter : fFragments) { - if (iter.first == "") - continue; // not sure why, but this sometimes happens during bad shutdowns - std::string chunk_index = iter.first; - if (std::stoi(chunk_index) > write_lte && !end) continue; - - clock_gettime(CLOCK_THREAD_CPUTIME_ID, &comp_start); - if(!fs::exists(GetDirectoryPath(chunk_index, true))) - fs::create_directory(GetDirectoryPath(chunk_index, true)); - - size_t uncompressed_size = iter.second->size(); - - // Compress it - char *out_buffer = NULL; - int wsize = 0; - if(fCompressor == "blosc"){ - out_buffer = new char[uncompressed_size+BLOSC_MAX_OVERHEAD]; - wsize = blosc_compress_ctx(5, 1, sizeof(char), uncompressed_size, iter.second->data(), - out_buffer, uncompressed_size+BLOSC_MAX_OVERHEAD, "lz4", 0, 2); - } - else{ - // Note: the current package repo version for Ubuntu 18.04 (Oct 2019) is 1.7.1, which is - // so old it is not tracked on the lz4 github. The API for frame compression has changed - // just slightly in the meantime. So if you update and it breaks you'll have to tune at least - // the LZ4F_preferences_t object to the new format. - size_t max_compressed_size = LZ4F_compressFrameBound(uncompressed_size, &kPrefs); - out_buffer = new char[max_compressed_size]; - wsize = LZ4F_compressFrame(out_buffer, max_compressed_size, - iter.second->data(), uncompressed_size, &kPrefs); - } - delete iter.second; - iter.second = nullptr; - fFragmentSize -= uncompressed_size; - idx_to_clear.push_back(chunk_index); - - std::ofstream writefile(GetFilePath(chunk_index, true), std::ios::binary); - writefile.write(out_buffer, wsize); - delete[] out_buffer; - writefile.close(); - - // shenanigans or skulduggery? - if(fs::exists(GetFilePath(chunk_index, false))) { - fLog->Entry(MongoLog::Warning, "Chunk %s from thread %lx already exists? %li vs %li bytes", - chunk_index.c_str(), fThreadId, fs::file_size(GetFilePath(chunk_index, false)), wsize); - } - - // Move this chunk from *_TEMP to the same path without TEMP - if(!fs::exists(GetDirectoryPath(chunk_index, false))) - fs::create_directory(GetDirectoryPath(chunk_index, false)); - fs::rename(GetFilePath(chunk_index, true), - GetFilePath(chunk_index, false)); - clock_gettime(CLOCK_THREAD_CPUTIME_ID, &comp_end); - fCompTime += timespec_subtract(comp_end, comp_start); - - CreateMissing(std::stoi(iter.first)); - } // End for through fragments - // clear now because c++ sometimes overruns its buffers - for (auto s : idx_to_clear) { - if (fFragments.count(s) != 0) fFragments.erase(s); - } - - if(end){ - for (auto& p : fFragments) - if (p.second != nullptr) delete p.second; - fFragments.clear(); - fFragmentSize = 0; - fs::path write_path(fOutputPath); - std::string filename = fHostname; - write_path /= "THE_END"; - if(!fs::exists(write_path)){ - fLog->Entry(MongoLog::Local,"Creating END directory at %s",write_path.c_str()); - try{ - fs::create_directory(write_path); - } - catch(...){}; - } - std::stringstream ss; - ss< -#include -#include - -//for debugging -//#include -#include -#include -#include -#include -#include -#include -#include - -class DAQController; -class Options; -class MongoLog; - -struct data_packet{ - public: - data_packet(); - ~data_packet(); - u_int32_t *buff; - int32_t size; - u_int32_t clock_counter; - u_int32_t header_time; - int bid; -}; - - -class StraxInserter{ - /* - Reformats raw data into strax format - */ - -public: - StraxInserter(); - ~StraxInserter(); - - int Initialize(Options *options, MongoLog *log, - DAQController *dataSource, std::string hostname); - void Close(std::map& ret); - - int ReadAndInsertData(); - bool CheckError(){ bool ret = fErrorBit; fErrorBit = false; return ret;} - long GetBufferSize() {return fFragmentSize.load();} - void GetDataPerChan(std::map& ret); - void CheckError(int bid); - int GetBufferLength() {return fBufferLength.load();} - -private: - void ProcessDatapacket(data_packet *dp); - uint32_t ProcessEvent(uint32_t*, unsigned, long, uint32_t, int); - int ProcessChannel(uint32_t*, unsigned, int, int, uint32_t, uint32_t, long, int, int&); - void WriteOutFiles(bool end=false); - void GenerateArtificialDeadtime(int64_t, int16_t, uint32_t, int); - void AddFragmentToBuffer(std::string&, int64_t, uint32_t, int); - - std::experimental::filesystem::path GetFilePath(std::string id, bool temp); - std::experimental::filesystem::path GetDirectoryPath(std::string id, bool temp); - std::string GetStringFormat(int id); - void CreateMissing(u_int32_t back_from_id); - int fMissingVerified; - - int64_t fChunkLength; // ns - int64_t fChunkOverlap; // ns - int fFragmentBytes; - int fStraxHeaderSize; // bytes - int fBufferNumChunks; - int fWarnIfChunkOlderThan; - unsigned fChunkNameLength; - int64_t fFullChunkLength; - std::string fOutputPath, fHostname; - Options *fOptions; - MongoLog *fLog; - DAQController *fDataSource; - std::atomic_bool fActive, fRunning, fForceQuit; - bool fErrorBit; - std::string fCompressor; - std::map fFragments; - std::atomic_long fFragmentSize; - std::map> fFmt; - std::map fFailCounter; - std::mutex fFC_mutex; - std::map fDataPerChan; - std::mutex fDPC_mutex; - std::map fBufferCounter; - std::map fFragsPerEvent; - std::map fEvPerDP; - std::atomic_int fBufferLength; - long fBytesProcessed; - - double fProcTimeDP, fProcTimeEv, fProcTimeCh, fCompTime; - std::thread::id fThreadId; -}; - -#endif diff --git a/V1495.cc b/V1495.cc index 39972efb..7c83214d 100644 --- a/V1495.cc +++ b/V1495.cc @@ -10,7 +10,7 @@ #include "MongoLog.hh" -V1495::V1495(MongoLog *log, Options *options, int bid, int handle, unsigned int address){ +V1495::V1495(std::shared_ptr& log, std::shared_ptr& options, int bid, int handle, unsigned int address){ fOptions = options; fLog = log; fBID = bid; diff --git a/V1495.hh b/V1495.hh index f8a87a94..c9d9f924 100644 --- a/V1495.hh +++ b/V1495.hh @@ -24,15 +24,15 @@ using namespace std; class V1495{ public: - V1495(MongoLog *log, Options *options, int bid, int handle, unsigned int address); - virtual ~V1495(); + V1495(std::shared_ptr&, std::shared_ptr&, int, int, unsigned); + virtual ~V1495(); int WriteReg(unsigned int reg, unsigned int value); private: int fBoardHandle, fBID; unsigned int fBaseAddress; - Options *fOptions; - MongoLog *fLog; + std::shared_ptr fOptions; + std::shared_ptr fLog; }; #endif diff --git a/V1724.cc b/V1724.cc index b8e99ea5..4b790dde 100644 --- a/V1724.cc +++ b/V1724.cc @@ -16,8 +16,7 @@ #include -V1724::V1724(MongoLog *log, Options *options){ - fOptions = options; +V1724::V1724(std::shared_ptr& log, std::shared_ptr& opts){ fBoardHandle=fLink=fCrate=fBID=-1; fBaseAddress=0; fLog = log; @@ -35,24 +34,58 @@ V1724::V1724(MongoLog *log, Options *options){ fBoardFailStatRegister = 0x8178; fReadoutStatusRegister = 0xEF04; fBoardErrRegister = 0xEF00; + fError = false; - BLT_SIZE=512*1024; // one channel's memory + fSampleWidth = 10; + fClockCycle = 10; - DataFormatDefinition = { - {"channel_mask_msb_idx", -1}, - {"channel_mask_msb_mask", -1}, - {"channel_header_words", 2}, - {"ns_per_sample", 10}, - {"ns_per_clk", 10}, - // Channel indices are given relative to start of channel - // i.e. the channel size is at index '0' - {"channel_time_msb_idx", -1}, - {"channel_time_msb_mask", -1}, + int a = CAENVME_Init(cvV2718, link, crate, &fBoardHandle); + if(a != cvSuccess){ + fLog->Entry(MongoLog::Warning, "Board %i failed to init, error %i handle %i link %i bdnum %i", + bid, a, fBoardHandle, link, crate); + fBoardHandle = -1; + throw std::runtime_error(); + } + fLog->Entry(MongoLog::Debug, "Board %i initialized with handle %i (link/crate)(%i/%i)", + bid, fBoardHandle, link, crate); + + fLink = link; + fCrate = crate; + fBID = bid; + fBaseAddress=address; + fRolloverCounter = 0; + fLastClock = 0; + uint32_t word(0); + int my_bid(0); - }; + fBLTSafety = opts->GetDouble("blt_safety_factor", 1.5); + BLT_SIZE = opts->GetInt("blt_size", 512*1024); + // there's a more elegant way to do this, but I'm not going to write it + fClockPeriod = std::chrono::nanoseconds((1l<<31)*DataFormatDefinition["ns_per_clk"]); - fBLTSafety = 1.4; - fBufferSafety = 1.1; + if (Reset()) { + fLog->Entry(MongoLog::Error, "Board %i unable to pre-load registers", fBID); + throw std::runtime_error(); + } else { + fLog->Entry(MongoLog::Local, "Board %i reset", fBID); + } + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + if (opts->GetInt("do_sn_check", 0) != 0) { + if ((word = ReadRegister(fSNRegisterLSB)) == 0xFFFFFFFF) { + fLog->Entry(MongoLog::Error, "Board %i couldn't read its SN lsb", fBID); + throw std::runtime_error(); + } + my_bid |= word&0xFF; + if ((word = ReadRegister(fSNRegisterMSB)) == 0xFFFFFFFF) { + fLog->Entry(MongoLog::Error, "Board %i couldn't read its SN msb", fBID); + throw std::runtime_error(); + } + my_bid |= ((word&0xFF)<<8); + if (my_bid != fBID) { + fLog->Entry(MongoLog::Local, "Link %i crate %i should be SN %i but is actually %i", + link, crate, fBID, my_bid); + } + } } V1724::~V1724(){ @@ -87,7 +120,7 @@ bool V1724::EnsureStarted(int ntries, int tsleep){ bool V1724::EnsureStopped(int ntries, int tsleep){ return MonitorRegister(fAqStatusRegister, 0x4, ntries, tsleep, 0x0); } -u_int32_t V1724::GetAcquisitionStatus(){ +uint32_t V1724::GetAcquisitionStatus(){ return ReadRegister(fAqStatusRegister); } @@ -102,77 +135,24 @@ int V1724::CheckErrors(){ return ret; } - -int V1724::Init(int link, int crate, int bid, unsigned int address){ - int a = CAENVME_Init(cvV2718, link, crate, &fBoardHandle); - if(a != cvSuccess){ - fLog->Entry(MongoLog::Warning, "Board %i failed to init, error %i handle %i link %i bdnum %i", - bid, a, fBoardHandle, link, crate); - fBoardHandle = -1; - return -1; - } - fLog->Entry(MongoLog::Debug, "Board %i initialized with handle %i (link/crate)(%i/%i)", - bid, fBoardHandle, link, crate); - - fLink = link; - fCrate = crate; - fBID = bid; - fBaseAddress=address; - fRolloverCounter = 0; - fLastClock = 0; - u_int32_t word(0); - int my_bid(0); - - fBLTSafety = fOptions->GetDouble("blt_safety_factor", 1.5); - fBufferSafety = fOptions->GetDouble("buffer_safety_factor", 1.1); - BLT_SIZE = fOptions->GetInt("blt_size", 512*1024); - // there's a more elegant way to do this, but I'm not going to write it - fClockPeriod = std::chrono::nanoseconds((1l<<31)*DataFormatDefinition["ns_per_clk"]); - - if (Reset()) { - fLog->Entry(MongoLog::Error, "Board %i unable to pre-load registers", fBID); - return -1; - } else { - fLog->Entry(MongoLog::Local, "Board %i reset", fBID); - } - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - if (fOptions->GetInt("do_sn_check", 0) != 0) { - if ((word = ReadRegister(fSNRegisterLSB)) == 0xFFFFFFFF) { - fLog->Entry(MongoLog::Error, "Board %i couldn't read its SN lsb", fBID); - return -1; - } - my_bid |= word&0xFF; - if ((word = ReadRegister(fSNRegisterMSB)) == 0xFFFFFFFF) { - fLog->Entry(MongoLog::Error, "Board %i couldn't read its SN msb", fBID); - return -1; - } - my_bid |= ((word&0xFF)<<8); - if (my_bid != fBID) { - fLog->Entry(MongoLog::Local, "Link %i crate %i should be SN %i but is actually %i", - link, crate, fBID, my_bid); - } - } - return 0; -} - int V1724::Reset() { int ret = WriteRegister(fResetRegister, 0x1); ret += WriteRegister(fBoardErrRegister, 0x30); return ret; } -u_int32_t V1724::GetHeaderTime(u_int32_t *buff, u_int32_t size){ - u_int32_t idx = 0; - while(idx < size/sizeof(u_int32_t)){ - if(buff[idx]>>28==0xA){ - return buff[idx+3]&0x7FFFFFFF; +std::tuple V1724::GetClockInfo(std::u32string_view sv) { + auto it = sv.begin(); + do { + if ((*it)>>28 == 0xA) { + uint32_t ht = *(it+3)&0x7FFFFFFF; + return {ht, GetClockCounter(ht)}; } - idx++; - } - return 0xFFFFFFFF; + } while (++it < sv.end()); + return {0xFFFFFFFF, -1}; } -int V1724::GetClockCounter(u_int32_t timestamp){ +int V1724::GetClockCounter(uint32_t timestamp){ // The V1724 has a 31-bit on board clock counter that counts 10ns samples. // So it will reset every 21 seconds. We need to count the resets or we // can't run longer than that. We can employ some clever logic @@ -200,7 +180,7 @@ int V1724::GetClockCounter(u_int32_t timestamp){ } int V1724::WriteRegister(unsigned int reg, unsigned int value){ - u_int32_t write=0; + uint32_t write=0; write+=value; int ret = 0; if((ret = CAENVME_WriteCycle(fBoardHandle, fBaseAddress+reg, @@ -226,21 +206,19 @@ unsigned int V1724::ReadRegister(unsigned int reg){ return temp; } -int V1724::ReadMBLT(u_int32_t* &buffer){ +int V1724::Read(std::unique_ptr& outptr){ + if ((GetAcquisitionStatus() & 0x8) == 0) return 0; // Initialize - int64_t blt_bytes=0; - int nb=0,ret=-5; - std::list> xfer_buffers; + int blt_words=0, nb=0, ret=-5; + std::list> xfer_buffers; int count = 0; - int alloc_size = BLT_SIZE/sizeof(u_int32_t)*fBLTSafety; - u_int32_t* thisBLT = nullptr; - if ((GetAcquisitionStatus() & 0x8) == 0) return 0; - // digitizer has at least one event + int alloc_words = BLT_SIZE/sizeof(char32_t)*fBLTSafety; + char32_t* thisBLT = nullptr; do{ // Reserve space for this block transfer - thisBLT = new u_int32_t[alloc_size]; + thisBLT = new char32_t[alloc_words]; ret = CAENVME_FIFOBLTReadCycle(fBoardHandle, fBaseAddress, ((unsigned char*)thisBLT), @@ -255,43 +233,40 @@ int V1724::ReadMBLT(u_int32_t* &buffer){ for (auto& b : xfer_buffers) delete[] b.first; return -1; } - if (nb > (int)BLT_SIZE) fLog->Entry(MongoLog::Message, + if (nb > BLT_SIZE) fLog->Entry(MongoLog::Message, "Board %i got %i more bytes than asked for (headroom %i)", - fBID, nb-BLT_SIZE, alloc_size-nb); + fBID, nb-BLT_SIZE, alloc_words*sizeof(char32_t)-nb); count++; - blt_bytes+=nb; - xfer_buffers.push_back(std::make_pair(thisBLT, nb)); + blt_words+=nb/sizeof(char32_t); + xfer_buffers.emplace_back(std::make_pair(thisBLT, nb)); }while(ret != cvBusError); - // Now, unfortunately we need to make one copy of the data here or else our memory - // usage explodes. We declare above a buffer of several MB, which is the maximum capacity - // of the board in case every channel is 100% saturated (the practical largest - // capacity is certainly smaller depending on settings). But if we just keep reserving - // O(MB) blocks and filling 50kB with actual data, we're gonna run out of memory. - // So here we declare the return buffer as *just* large enough to hold the actual - // data and free up the rest of the memory reserved as buffer. - // In tests this does not seem to impact our ability to read out the V1724 at the - // maximum bandwidth of the link. - if(blt_bytes>0){ - u_int32_t bytes_copied = 0; - alloc_size = blt_bytes/sizeof(u_int32_t)*fBufferSafety; - buffer = new u_int32_t[alloc_size]; + /*Now, unfortunately we need to make one copy of the data here or else our memory + usage explodes. We declare above a buffer of several MB, which is the maximum capacity + of the board in case every channel is 100% saturated (the practical largest + capacity is certainly smaller depending on settings). But if we just keep reserving + O(MB) blocks and filling 50kB with actual data, we're gonna run out of memory. + So here we declare the return buffer as *just* large enough to hold the actual + data and free up the rest of the memory reserved as buffer. + In tests this does not seem to impact our ability to read out the V1724 at the + maximum bandwidth of the link. */ + if(blt_words>0){ + std::u32string s; + s.reserve(blt_words); for (auto& xfer : xfer_buffers) { - std::memcpy(((unsigned char*)buffer)+bytes_copied, xfer.first, xfer.second); - bytes_copied += xfer.second; + s.append(xfer.first, xfer.second); } fBLTCounter[count]++; - if (bytes_copied != blt_bytes) fLog->Entry(MongoLog::Message, - "Board %i funny buffer accumulation: %i/%i from %i BLTs", - fBID, bytes_copied, blt_bytes, count); + auto [ht, cc] = GetClockInfo(s); + outptr = std::make_unique(std::move(s), ht, cc); } for (auto b : xfer_buffers) delete[] b.first; - return blt_bytes; + return blt_words; } -int V1724::LoadDAC(std::vector &dac_values){ +int V1724::LoadDAC(std::vector &dac_values){ // Loads DAC values into registers for(unsigned int x=0; x &dac_values){ return 0; } -int V1724::SetThresholds(std::vector vals) { +int V1724::SetThresholds(std::vector vals) { int ret = 0; for (unsigned ch = 0; ch < fNChannels; ch++) ret += WriteRegister(fChTrigRegister + 0x100*ch, vals[ch]); @@ -319,9 +294,9 @@ int V1724::End(){ return 0; } -void V1724::ClampDACValues(std::vector &dac_values, +void V1724::ClampDACValues(std::vector &dac_values, std::map> &cal_values) { - u_int16_t min_dac, max_dac(0xffff); + uint16_t min_dac, max_dac(0xffff); for (unsigned ch = 0; ch < fNChannels; ch++) { if (cal_values["yint"][ch] > 0x3fff) { min_dac = (0x3fff - cal_values["yint"][ch])/cal_values["slope"][ch]; @@ -336,8 +311,8 @@ void V1724::ClampDACValues(std::vector &dac_values, } } -bool V1724::MonitorRegister(u_int32_t reg, u_int32_t mask, int ntries, int sleep, u_int32_t val){ - u_int32_t rval = 0; +bool V1724::MonitorRegister(uint32_t reg, uint32_t mask, int ntries, int sleep, uint32_t val){ + uint32_t rval = 0; if(val == 0) rval = 0xffffffff; for(int counter = 0; counter < ntries; counter++){ rval = ReadRegister(reg); @@ -351,3 +326,21 @@ bool V1724::MonitorRegister(u_int32_t reg, u_int32_t mask, int ntries, int sleep fBID, reg, mask, rval,val); return false; } + +std::tuple V1724::UnpackEventHeader(std::u32string_view sv) { + // returns {words this event, channel mask, board fail, header timestamp} + return {sv[0]&0xFFFFFFF, sv[1]&0xFF, sv[1]&0x4000000, sv[3]&0x7FFFFFFF}; +} + +std::tuple V1724::UnpackChannelHeader(std::u32string_view sv, long rollovers, uint32_t header_time, uint32_t, int, int) { + // returns {timestamp (ns), words this channel, baseline, waveform} + long ch_time = sv[1]&0x7FFFFFFF; + int words = sv[0]&0x7FFFFF; + // More rollover logic here, because channels are independent and the + // processing is multithreaded. We leverage the fact that readout windows are + // short and polled frequently compared to the rollover timescale, so there + // will never be a large difference in timestamps in one data packet + if (ch_time > 15e8 && header_time < 5e8 && rollovers != 0) rollovers--; + else if (ch_time < 5e8 && header_time > 15e8) rollovers++; + return {((rollovers<<31)+ch_time)*fClockCycle, words, 0, sv.substr(2, words-2)}; +} diff --git a/V1724.hh b/V1724.hh index 134849ef..229da860 100644 --- a/V1724.hh +++ b/V1724.hh @@ -5,6 +5,8 @@ #include #include #include +#include +#include class MongoLog; class Options; @@ -13,22 +15,27 @@ class data_packet; class V1724{ public: - V1724(MongoLog *log, Options *options); + V1724(std::shared_ptr&, std::shared_ptr&, int, int, int, unsigned=0); virtual ~V1724(); - virtual int Init(int link, int crate, int bid, unsigned int address=0); - virtual int ReadMBLT(u_int32_t* &buffer); + virtual int Read(std::unique_ptr&); virtual int WriteRegister(unsigned int reg, unsigned int value); virtual unsigned int ReadRegister(unsigned int reg); - virtual int GetClockCounter(u_int32_t timestamp); virtual int End(); int bid() {return fBID;} + uint16_t SampleWidth() {return fSampleWidth;} + int GetClockWidth() {return fClockCycle;} - virtual int LoadDAC(std::vector &dac_values); - void ClampDACValues(std::vector&, std::map>&); + virtual int LoadDAC(std::vector&); + void ClampDACValues(std::vector&, std::map>&); unsigned GetNumChannels() {return fNChannels;} - int SetThresholds(std::vector vals); + int SetThresholds(std::vector vals); + + virtual std::tuple UnpackEventHeader(std::u32string_view); + virtual std::tuple UnpackChannelHeader(std::u32string_view, long, uint32_t, uint32_t, int, int); + + bool CheckFail(bool val=false) {bool ret = fError; fError = val; return ret;} // Acquisition Control @@ -41,10 +48,7 @@ class V1724{ virtual bool EnsureStarted(int ntries, int sleep); virtual bool EnsureStopped(int ntries, int sleep); virtual int CheckErrors(); - virtual u_int32_t GetAcquisitionStatus(); - u_int32_t GetHeaderTime(u_int32_t *buff, u_int32_t size); - - std::map DataFormatDefinition; + virtual uint32_t GetAcquisitionStatus(); protected: // Some values for base classes to override @@ -66,22 +70,24 @@ protected: int BLT_SIZE; std::map fBLTCounter; - bool MonitorRegister(u_int32_t reg, u_int32_t mask, int ntries, int sleep, u_int32_t val=1); - Options *fOptions; + bool MonitorRegister(uint32_t reg, uint32_t mask, int ntries, int sleep, uint32_t val=1); + virtual std::tuple GetClockInfo(std::u32string_view); + int GetClockCounter(uint32_t); int fBoardHandle; int fLink, fCrate, fBID; unsigned int fBaseAddress; // Stuff for clock reset tracking - u_int32_t fRolloverCounter; - u_int32_t fLastClock; + int fRolloverCounter; + uint32_t fLastClock; std::chrono::high_resolution_clock::time_point fLastClockTime; std::chrono::nanoseconds fClockPeriod; - MongoLog *fLog; + std::shared_ptr fLog; + std::atomic_bool fError; float fBLTSafety, fBufferSafety; - + int fSampleWidth, fClockCycle; }; diff --git a/V1724_MV.cc b/V1724_MV.cc index 2555cc17..71f1a74a 100644 --- a/V1724_MV.cc +++ b/V1724_MV.cc @@ -2,11 +2,27 @@ #include "MongoLog.hh" #include "Options.hh" -V1724_MV::V1724_MV(MongoLog *log, Options *options) - :V1724(log, options){ - DataFormatDefinition["channel_header_words"] = 0; - // MV boards seem to have reg 0x1n80 for channel n threshold - fChTrigRegister = 0x1080; +V1724_MV::V1724_MV(std::shared_ptr& log, std::shared_ptr& options) : + V1724(log, options){ + // MV boards seem to have reg 0x1n80 for channel n threshold + fChTrigRegister = 0x1080; } V1724_MV::~V1724_MV(){} + +std::tuple +V1724_MV::UnpackChannelHeader(std::u32string_view sv, long rollovers, + uint32_t header_time, uint32_t event_time, int event_words, int n_channels) { + int words = (event_words-4)/n_channels; + // returns {timestamp (ns), baseline, waveform} + // More rollover logic here, because processing is multithreaded. + // We leverage the fact that readout windows are + // short and polled frequently compared to the rollover timescale, so there + // will never be a large difference in timestamps in one data packet + if (event_time > 15e8 && header_time < 5e8 && rollovers != 0) rollovers--; + else if (event_time < 5e8 && header_time > 15e8) rollovers++; + return {((rollovers<<31)+event_time)*fClockCycle, + words, + 0, + sv.substr(0, words)}; +} diff --git a/V1724_MV.hh b/V1724_MV.hh index 5fdbd6e1..e533055f 100644 --- a/V1724_MV.hh +++ b/V1724_MV.hh @@ -6,9 +6,12 @@ class V1724_MV : public V1724 { public: - V1724_MV(MongoLog *log, Options *options); + V1724_MV(std::shared_ptr&, std::shared_ptr&); virtual ~V1724_MV(); + virtual std::tuple UnpackEventHeader(std::u32string_view); + virtual std::tuple UnpackChannelHeader(std::u32string_view, long, uint32_t, uint32_t, int, int); + }; #endif diff --git a/V1730.cc b/V1730.cc index d3a1e198..0d68a6eb 100644 --- a/V1730.cc +++ b/V1730.cc @@ -2,19 +2,29 @@ #include "MongoLog.hh" #include "Options.hh" -V1730::V1730(MongoLog *log, Options *options) +V1730::V1730(std::shared_ptr& log, std::shared_ptr& options) :V1724(log, options){ fNChannels = 16; - DataFormatDefinition["ns_per_sample"] = 2; - DataFormatDefinition["ns_per_clk"] = 2; - DataFormatDefinition["channel_header_words"] = 3; - DataFormatDefinition["channel_mask_msb_idx"] = 2; - DataFormatDefinition["channel_mask_msb_mask"] = -1; - DataFormatDefinition["channel_time_msb_idx"] = 2; - DataFormatDefinition["channel_time_msb_mask"] = -1; - // Channel indices are given relative to start of channel - // i.e. the channel size is at index '0' - + fSampleWidth = 2; + fClockCycle = 2; } V1730::~V1730(){} + +std::tuple V1730::UnpackEventHeader(std::u32string_view sv) { + // returns {words this event, channel mask, board fail, header timestamp} + return {sv[0]&0xFFFFFFF, + (sv[1]&0xFF) | ((sv[2]>>16)&0xFF00), + sv[1]&0x4000000, + sv[3]&0x7FFFFFFF}; +} + +std::tuple +V1730::UnpackChannelHeader(std::u32string_view sv, long, uint32_t, uint32_t, int, int) { + // returns {timestamp (ns), words this channel, baseline, waveform} + int words = sv[0]&0x7FFFFF; + return {(long(sv[1]) | (long(sv[2]&0xFFFF)<<32))*fClockCycle, + words, + (sv[2]>>16)&0x3FFF, + sv.substr(3, words-3)}; +} diff --git a/V1730.hh b/V1730.hh index 8d564762..29e32d8d 100644 --- a/V1730.hh +++ b/V1730.hh @@ -6,9 +6,11 @@ class V1730 : public V1724 { public: - V1730(MongoLog *log, Options *options); + V1730(std::shared_ptr&, std::shared_ptr&); virtual ~V1730(); + virtual std::tuple UnpackEventHeader(std::u32string_view); + virtual std::tuple UnpackChannelHeader(std::u32string_view, long, uint32_t, uint32_t, int, int); private: }; diff --git a/V2718.cc b/V2718.cc index 4829eaf6..3c8c3db2 100644 --- a/V2718.cc +++ b/V2718.cc @@ -2,14 +2,13 @@ #include "MongoLog.hh" #include -V2718::V2718(MongoLog *log){ +V2718::V2718(std::shared_ptr& log){ fLog = log; fBoardHandle=fLink=fCrate=-1; fCopts.s_in = fCopts.neutron_veto = fCopts.muon_veto = -1; fCopts.led_trigger = fCopts.pulser_freq = -1; } - V2718::~V2718(){ } @@ -98,7 +97,6 @@ int V2718::SendStartSignal(){ return 0; } - int V2718::SendStopSignal(bool end){ if(fCrate == -1) diff --git a/V2718.hh b/V2718.hh index fe44565a..2bf02c8c 100644 --- a/V2718.hh +++ b/V2718.hh @@ -7,7 +7,7 @@ class MongoLog; class V2718{ public: - V2718(MongoLog *log); + V2718(std::shared_ptr&); virtual ~V2718(); virtual int CrateInit(CrateOptions c_opts, int link, int crate); @@ -21,7 +21,7 @@ protected: int fBoardHandle; CrateOptions fCopts; int fCrate, fLink; - MongoLog *fLog; + std::shared_ptr fLog; }; diff --git a/ccontrol.cc b/ccontrol.cc index ca826275..0cbffbe7 100644 --- a/ccontrol.cc +++ b/ccontrol.cc @@ -84,18 +84,13 @@ int main(int argc, char** argv){ std::cout<<"I dub thee "<Initialize(mongo_uri, dbname, "log", hostname, true); - if(ret!=0){ - std::cout<<"Log couldn't be initialized. Exiting."<(log_retention, log_dir, mongo_uri, dbname, "log", hostname, true); // Options - Options *options = NULL; - + std::shared_ptr options; + // Holds session data - CControl_Handler *fHandler = new CControl_Handler(logger, hostname); + auto fHandler = std::make_shared(logger, hostname); using namespace std::chrono; while(b_run){ @@ -129,8 +124,8 @@ int main(int argc, char** argv){ std::string command = ""; try{ command = doc["command"].get_utf8().value.to_string(); - if(command == "arm" ) - run = doc["number"].get_int32(); + if(command == "arm") + run = doc["number"].get_int32(); } catch(const std::exception E){ logger->Entry(MongoLog::Warning, "ccontrol: Received a document from the dispatcher missing [command|number]"); @@ -157,11 +152,7 @@ int main(int argc, char** argv){ } //Here are our options - if(options != NULL) { - delete options; - options = NULL; - } - options = new Options(logger, mode, hostname, mongo_uri, dbname, override_json); + options = std::make_shared(logger, mode, hostname, mongo_uri, dbname, override_json); // Initialise the V2178, V1495 and DDC10...etc. if(fHandler->DeviceArm(run, options) != 0){ @@ -199,8 +190,9 @@ int main(int argc, char** argv){ } std::this_thread::sleep_for(seconds(1)); } // while run - if (options != NULL) delete options; - delete fHandler; - delete logger; + options.reset(); + fHandler.reset(); + logger.reset(); return 0; } + diff --git a/main.cc b/main.cc index 2574f29c..ed45149c 100644 --- a/main.cc +++ b/main.cc @@ -26,7 +26,7 @@ void SignalHandler(int signum) { return; } -void UpdateStatus(std::string suri, std::string dbname, DAQController* controller) { +void UpdateStatus(std::string suri, std::string dbname, std::shared_ptr& controller) { mongocxx::uri uri(suri); mongocxx::client c(uri); mongocxx::collection status = c[dbname]["status"]; @@ -39,7 +39,7 @@ void UpdateStatus(std::string suri, std::string dbname, DAQController* controlle "time" << bsoncxx::types::b_date(system_clock::now())<< "rate" << controller->GetDataSize()/1e6 << "status" << controller->status() << - "buffer_length" << controller->GetBufferLength() << + "buffer_length" << controller->GetBufferLength() << // TODO "buffer_size" << controller->GetBufferSize()/1e6 << "strax_buffer" << controller->GetStraxBufferSize()/1e6 << "run_mode" << controller->run_mode() << @@ -53,7 +53,7 @@ void UpdateStatus(std::string suri, std::string dbname, DAQController* controlle std::cout<<"Can't connect to DB to update."<Initialize(suri, dbname, "log", hostname, true); - if(ret!=0){ - std::cout<<"Exiting"<(log_retention, log_dir, suri, dbname, "log", hostname); //Options - Options *fOptions = NULL; + std::shared_ptr fOptions; // The DAQController object is responsible for passing commands to the // boards and tracking the status - DAQController *controller = new DAQController(logger, hostname); - std::vector readoutThreads; + auto controller = std::make_shared(logger, hostname); std::thread status_update(&UpdateStatus, suri, dbname, controller); using namespace std::chrono; // Main program loop. Scan the database and look for commands addressed @@ -193,66 +187,26 @@ int main(int argc, char** argv){ // Process commands if(command == "start"){ - if(controller->status() == 2) { - if(controller->Start()!=0){ continue; } - - // Nested tried cause of nice C++ typing - try{ - current_run_id = (doc)["run_identifier"].get_utf8().value.to_string(); - } - catch(const std::exception &e){ - try{ - current_run_id = std::to_string((doc)["run_identifier"].get_int32()); - } - catch(const std::exception &e){ - current_run_id = "na"; - } - } - - //logger->Entry(MongoLog::Message, "Received start command from user %s", - // user.c_str()); } else logger->Entry(MongoLog::Debug, "Cannot start DAQ since not in ARMED state"); - } - else if(command == "stop"){ + }else if(command == "stop"){ // "stop" is also a general reset command and can be called any time - //logger->Entry(MongoLog::Message, "Received stop command from user %s", - // user.c_str()); if(controller->Stop()!=0) logger->Entry(MongoLog::Error, "DAQ failed to stop. Will continue clearing program memory."); - current_run_id = "none"; - if(readoutThreads.size()!=0){ - for(auto t : readoutThreads){ - t->join(); - delete t; - } - readoutThreads.clear(); - } controller->End(); - } - else if(command == "arm"){ - + } else if(command == "arm"){ // Can only arm if we're in the idle, arming, or armed state - if(controller->status() == 0 || controller->status() == 1 || controller->status() == 2){ + if(controller->status() >= 0 || controller->status() <= 2){ // Join readout threads if they still are out there controller->Stop(); - if(readoutThreads.size() !=0){ - for(auto t : readoutThreads){ - logger->Entry(MongoLog::Local, "Joining orphaned readout thread"); - t->join(); - delete t; - } - readoutThreads.clear(); - } - // Clear up any previously failed things if(controller->status() != 0) controller->End(); @@ -266,46 +220,15 @@ int main(int argc, char** argv){ catch(const std::exception &e){ logger->Entry(MongoLog::Debug, "No override options provided, continue without."); } - - bool initialized = false; - // Mongocxx types confusing so passing json strings around - if(fOptions != NULL) { - delete fOptions; - fOptions = NULL; - } - fOptions = new Options(logger, (doc)["mode"].get_utf8().value.to_string(), + fOptions = std::make_shared(logger, (doc)["mode"].get_utf8().value.to_string(), hostname, suri, dbname, override_json); - std::vector links; - if(controller->InitializeElectronics(fOptions, links) != 0){ + if(controller->InitializeElectronics(fOptions) != 0){ logger->Entry(MongoLog::Error, "Failed to initialize electronics"); controller->End(); - } - else{ - initialized = true; - logger->SetRunId(fOptions->GetString("run_identifier","none")); - logger->Entry(MongoLog::Debug, "Initialized electronics"); - } - - if(readoutThreads.size()!=0){ - logger->Entry(MongoLog::Message, - "Cannot start DAQ while readout thread from previous run active. Please perform a reset"); - } - else if(!initialized){ - logger->Entry(MongoLog::Warning, "Skipping readout configuration since init failed"); - } - else{ - controller->CloseProcessingThreads(); - // open nprocessingthreads - if (controller->OpenProcessingThreads()) { - logger->Entry(MongoLog::Warning, "Could not open processing threads!"); - controller->CloseProcessingThreads(); - throw std::runtime_error("Error while arming"); - } - for(unsigned int i=0; iSetRunId(fOptions->GetString("run_identifier","none")); + logger->Entry(MongoLog::Debug, "Initialized electronics"); } } // if status is ok else @@ -318,16 +241,12 @@ int main(int argc, char** argv){ std::cout<<"Can't connect to DB so will continue what I'm doing"<CheckErrors(); - std::this_thread::sleep_for(std::chrono::seconds(1)); + std::this_thread::sleep_for(seconds(1)); } status_update.join(); - delete controller; - if (fOptions != NULL) delete fOptions; - delete logger; + controller.reset(); + fOptions.reset(); + logger.reset(); exit(0); - } - - From f4d98482a6ee90ea7c069ac1c68ac39ea4040083 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 24 Sep 2020 11:12:18 +0200 Subject: [PATCH 02/57] Compiles --- DAQController.cc | 63 ++++++++++++++++++------------------- DAQController.hh | 22 +++++-------- Makefile | 2 +- Options.hh | 2 +- StraxFormatter.cc | 80 +++++++++++++++-------------------------------- StraxFormatter.hh | 23 +++++++------- V1724.cc | 14 ++++----- V1724.d.88260 | 0 V1724_MV.cc | 4 +-- V1724_MV.hh | 3 +- V1730.cc | 4 +-- V1730.hh | 2 +- ccontrol.cc | 2 +- main.cc | 7 ++--- 14 files changed, 94 insertions(+), 134 deletions(-) create mode 100644 V1724.d.88260 diff --git a/DAQController.cc b/DAQController.cc index dd04b7c3..9c217f63 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -5,7 +5,7 @@ #include "V1730.hh" #include "DAXHelpers.hh" #include "Options.hh" -#include "StraxInserter.hh" +#include "StraxFormatter.hh" #include "MongoLog.hh" #include #include @@ -22,20 +22,19 @@ // 3-running // 4-error -DAQController::DAQController(MongoLog *log, std::string hostname){ +DAQController::DAQController(std::shared_ptr& log, std::string hostname){ fLog=log; - fOptions = NULL; + fOptions = nullptr; fStatus = DAXHelpers::Idle; fReadLoop = false; fNProcessingThreads=8; - fBufferLength = 0; fDataRate=0.; fHostname = hostname; } DAQController::~DAQController(){ if(fProcessingThreads.size()!=0) - CloseProcessingThreads(); + CloseThreads(); } std::string DAQController::run_mode(){ @@ -77,6 +76,7 @@ int DAQController::InitializeElectronics(std::shared_ptr& options){ }catch(const std::exception& e) { fLog->Entry(MongoLog::Warning, "Failed to initialize digitizer %i: %s", d.board, e.what()); + fDigitizers.clear(); return -1; } } @@ -91,7 +91,6 @@ int DAQController::InitializeElectronics(std::shared_ptr& options){ if (fOptions->GetString("baseline_dac_mode") == "cached") fOptions->GetDAC(dac_values, BIDs); std::vector init_threads; - fMaxEventsPerThread = fOptions->GetInt("max_events_per_thread", 1024); std::map rets; // Parallel digitizer programming to speed baselining for( auto& link : fDigitizers ) { @@ -121,9 +120,10 @@ int DAQController::InitializeElectronics(std::shared_ptr& options){ digi->AcquisitionStop(); } } + fCounter = 0; if (OpenThreads()) { fLog->Entry(MongoLog::Warning, "Error opening threads"); - fStatus = DAQXHelpers::Idle; + fStatus = DAXHelpers::Idle; return -1; } sleep(1); @@ -200,7 +200,7 @@ void DAQController::End(){ digi->End(); digi.reset(); } - link.clear(); + link.second.clear(); } fDigitizers.clear(); fStatus = DAXHelpers::Idle; @@ -257,7 +257,7 @@ void DAQController::ReadData(int link){ if (local_buffer.size() > 0) { fDataRate += local_size; int selector = (fCounter++)%fNProcessingThreads; - fProcessingThreads[selector]->ReceiveDatapackets(local_buffer); + fFormatters[selector]->ReceiveDatapackets(local_buffer); local_size = 0; } readcycler++; @@ -269,30 +269,27 @@ void DAQController::ReadData(int link){ std::map DAQController::GetDataPerChan(){ // Return a map of data transferred per channel since last update - // Clears the private maps in the StraxInserters - const std::lock_guard lg(fPTmutex); - std::map retmap; - for (auto& p : fProcessors) + // Clears the private maps in the StraxFormatters + const std::lock_guard lg(fMutex); + std::map retmap; + for (auto& p : fFormatters) p->GetDataPerChan(retmap); return retmap; } -long DAQController::GetStraxBufferSize() { - const std::lock_guard lg(fPTmutex); - return std::accumulate(fProcessingThreads.begin(), fProcessingThreads.end(), 0, - [=](long tot, processingThread pt) {return tot + pt.inserter->GetBufferSize();}); -} - -int DAQController::GetBufferLength() { - const std::lock_guard lg(fPTmutex); - return fBufferLength.load() + std::accumulate(fProcessingThreads.begin(), - fProcessingThreads.end(), 0, - [](int tot, auto pt){return tot + pt.inserter->GetBufferLength();}); +std::pair DAQController::GetBufferSize() { + const std::lock_guard lg(fMutex); + std::pair ret{0l,0l}; + for (const auto& p : fFormatters) { + auto x = p->GetBufferSize(); + ret.first += x.first; + ret.second += x.second; + } + return ret; } int DAQController::OpenThreads(){ - int ret = 0; - const std::lock_guard lg(fPTmutex); + const std::lock_guard lg(fMutex); fProcessingThreads.reserve(fNProcessingThreads); for(int i=0; i board_fails; - const std::lock_guard lg(fPTmutex); + const std::lock_guard lg(fMutex); for (auto& sf : fFormatters) sf->Close(board_fails); // give threads time to finish std::this_thread::sleep_for(std::chrono::milliseconds(10)); @@ -350,9 +347,9 @@ void DAQController::InitLink(std::vector>& digis, int bid = digi->bid(), success(0); if (BL_MODE == "fit") { } else if(BL_MODE == "cached") { - fMapMutex.lock(); + fMutex.lock(); auto board_dac_cal = cal_values.count(bid) ? cal_values[bid] : cal_values[-1]; - fMapMutex.unlock(); + fMutex.unlock(); dac_values[bid] = std::vector(digi->GetNumChannels()); fLog->Entry(MongoLog::Local, "Board %i using cached baselines", bid); for (unsigned ch = 0; ch < digi->GetNumChannels(); ch++) @@ -515,7 +512,7 @@ int DAQController::FitBaselines(std::vector> &digis, // readout for (auto d : digis) { - bytes_read[d->bid()] = d->Read(buffers[d->bid()]); + words_read[d->bid()] = d->Read(buffers[d->bid()]); } // decode @@ -551,7 +548,7 @@ int DAQController::FitBaselines(std::vector> &digis, it += 4; continue; } - if (mask == 0) { // should be impossible? + if (channel_mask == 0) { // should be impossible? it += 4; continue; } @@ -615,11 +612,11 @@ int DAQController::FitBaselines(std::vector> &digis, // **************************** for (auto d : digis) { bid = d->bid(); - fMapMutex.lock(); + fMutex.lock(); cal_values[bid] = std::map>( {{"slope", vector(d->GetNumChannels())}, {"yint", vector(d->GetNumChannels())}}); - fMapMutex.unlock(); + fMutex.unlock(); for (unsigned ch = 0; ch < d->GetNumChannels(); ch++) { B = C = D = E = F = 0; for (unsigned i = 0; i < DAC_cal_points.size(); i++) { diff --git a/DAQController.hh b/DAQController.hh index a7022472..713124a3 100644 --- a/DAQController.hh +++ b/DAQController.hh @@ -10,7 +10,7 @@ #include #include -class StraxInserter; +class StraxFormatter; class MongoLog; class Options; class V1724; @@ -37,24 +37,21 @@ public: int GetDataSize(){int ds = fDataRate; fDataRate=0; return ds;} std::map GetDataPerChan(); - void CheckError(int bid) {fCheckFails[bid] = true;} - long GetStraxBufferSize(); - int GetBufferSize() {return fBufferSize.load();} - - void GetDataFormat(std::map>&); + std::pair GetBufferSize(); private: void ReadData(int link); - int OpenProcessingThreads(); - void CloseProcessingThreads(); + int OpenThreads(); + void CloseThreads(); void InitLink(std::vector>&, std::map>>&, int&); int FitBaselines(std::vector>&, std::map>&, int, std::map>>&); - std::vector> fFormatters; + std::vector> fFormatters; std::vector fProcessingThreads; + std::vector fReadoutThreads; std::map>> fDigitizers; - std::mutex fMapMutex; + std::mutex fMutex; std::atomic_bool fReadLoop; std::map fRunning; @@ -63,13 +60,10 @@ private: std::string fHostname; std::shared_ptr fLog; std::shared_ptr fOptions; - std::shared_ptr fTP; // For reporting to frontend - std::atomic_int fBufferSize; - std::atomic_int fBufferLength; std::atomic_int fDataRate; - std::map fCheckFails; + std::atomic_long fCounter; }; #endif diff --git a/Makefile b/Makefile index 61c2ecc1..cf48f2de 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,7 @@ LDFLAGS = -lCAENVME -lstdc++fs -llz4 -lblosc $(shell pkg-config --libs libmongoc LDFLAGS_CC = ${LDFLAGS} -lexpect -ltcl8.6 SOURCES_SLAVE = DAQController.cc main.cc Options.cc MongoLog.cc \ - StraxInserter.cc V1724.cc V1724_MV.cc V1730.cc + StraxFormatter.cc V1724.cc V1724_MV.cc V1730.cc OBJECTS_SLAVE = $(SOURCES_SLAVE:%.cc=%.o) DEPS_SLAVE = $(OBJECTS_SLAVE:%.o=%.d) EXEC_SLAVE = main diff --git a/Options.hh b/Options.hh index 52512525..828052c8 100644 --- a/Options.hh +++ b/Options.hh @@ -88,7 +88,7 @@ private: mongocxx::client fClient; bsoncxx::document::view bson_options; bsoncxx::document::value *bson_value; - MongoLog *fLog; + std::shared_ptr fLog; mongocxx::collection fDAC_collection; std::string fDBname; std::string fHostname; diff --git a/StraxFormatter.cc b/StraxFormatter.cc index 5b8f4c83..bc0ecbfa 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -1,15 +1,15 @@ #include "StraxFormatter.hh" -#include #include "DAQController.hh" #include "MongoLog.hh" #include "Options.hh" +#include "V1724.hh" +#include #include #include #include #include #include #include -#include #include #include #include @@ -28,9 +28,10 @@ StraxFormatter::StraxFormatter(std::shared_ptr& opts, std::shared_ptrGetDouble("strax_chunk_length", 5)*1e9); // default 5s fChunkOverlap = long(fOptions->GetDouble("strax_chunk_overlap", 0.5)*1e9); // default 0.5s fFragmentBytes = fOptions->GetInt("strax_fragment_payload_bytes", 110*2); @@ -59,33 +60,6 @@ StraxFormatter::StraxFormatter(std::shared_ptr& opts, std::shared_ptr 0) - fLog->Entry(MongoLog::Local, "Thread %lx waiting to stop, has %i events left", - fThreadId, fBufferLength.load()); - else - fLog->Entry(MongoLog::Local, "Thread %lx stopping", fThreadId); - int events_start = fBufferLength.load(); - do{ - events_start = fBufferLength.load(); - while (fRunning && counter_short++ < 500) - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - if (counter_short >= 500) - fLog->Entry(MongoLog::Message, "Thread %lx taking a while to stop, still has %i evts", - fThreadId, fBufferLength.load()); - counter_short = 0; - } while (fRunning && fBufferLength.load() > 0 && events_start > fBufferLength.load() && counter_long++ < 10); - if (fRunning) { - fLog->Entry(MongoLog::Warning, "Force-quitting thread %lx: %i events lost", - fThreadId, fBufferLength.load()); - fForceQuit = true; - std::this_thread::sleep_for(std::chrono::seconds(2)); - } - while (fRunning) { - fLog->Entry(MongoLog::Message, "Still waiting for thread %lx to stop", fThreadId); - std::this_thread::sleep_for(std::chrono::seconds(2)); - } std::stringstream ss; ss << std::hex << fThreadId; std::map times { @@ -105,7 +79,6 @@ StraxFormatter::~StraxFormatter(){ void StraxFormatter::Close(std::map& ret){ fActive = false; - const std::lock_guard lg(fFC_mutex); for (auto& iter : fFailCounter) ret[iter.first] += iter.second; } @@ -119,7 +92,7 @@ void StraxFormatter::GetDataPerChan(std::map& ret) { return; } -void StraxFormatter::GenerateArtificialDeadtime(int64_t timestamp, const std:::unique_ptr& digi) { +void StraxFormatter::GenerateArtificialDeadtime(int64_t timestamp, const std::shared_ptr& digi) { std::string fragment; fragment.reserve(fFragmentBytes + fStraxHeaderSize); timestamp *= digi->GetClockWidth(); @@ -135,7 +108,6 @@ void StraxFormatter::GenerateArtificialDeadtime(int64_t timestamp, const std:::u fragment.append((char*)&fragment_i, sizeof(fragment_i)); int16_t baseline = 0; fragment.append((char*)&baseline, sizeof(baseline)); - fragment.append((char*)&bid, sizeof(bid)); int8_t zero = 0; while ((int)fragment.size() < fFragmentBytes+fStraxHeaderSize) fragment.append((char*)&zero, sizeof(zero)); @@ -164,7 +136,7 @@ void StraxFormatter::ProcessDatapacket(std::unique_ptr dp){ } else { if (missed) { fLog->Entry(MongoLog::Warning, "Missed an event from %i at idx %i", - dp->digi->bid, std::distance(dp->buff.begin(), it)); + dp->digi->bid(), std::distance(dp->buff.begin(), it)); missed = false; } it++; @@ -178,6 +150,7 @@ void StraxFormatter::ProcessDatapacket(std::unique_ptr dp){ const std::lock_guard lk(fDPC_mutex); for (auto& p : dpc) fDataPerChan[p.first] += p.second; } + fInputBufferSize -= dp->buff.size()*sizeof(char32_t); } int StraxFormatter::ProcessEvent(std::u32string_view buff, @@ -190,7 +163,7 @@ int StraxFormatter::ProcessEvent(std::u32string_view buff, auto [words, channel_mask, fail, event_time] = dp->digi->UnpackEventHeader(buff); if(fail){ // board fail - GenerateArtificialDeadtime(((clock_counter<<31) + header_time), dp->digi); + GenerateArtificialDeadtime(((dp->clock_counter<<31) + dp->header_time), dp->digi); dp->digi->CheckFail(true); fFailCounter[dp->digi->bid()]++; return event_header_words; @@ -216,7 +189,7 @@ int StraxFormatter::ProcessEvent(std::u32string_view buff, } int StraxFormatter::ProcessChannel(std::u32string_view buff, int words_in_event, - int channel_mask, uint32_t event_time, int& frags, + int channel_mask, uint32_t event_time, int& frags, int channel, const std::unique_ptr& dp, std::map& dpc) { // buff points to the first word of the channel's data @@ -227,28 +200,28 @@ int StraxFormatter::ProcessChannel(std::u32string_view buff, int words_in_event, uint32_t samples_in_pulse = wf.size()*sizeof(uint16_t)/sizeof(char32_t); uint16_t sw = dp->digi->SampleWidth(); - int samples_per_fragment = fFragmentBytes>>1; + int samples_per_frag= fFragmentBytes>>1; int16_t global_ch = fOptions->GetChannel(dp->digi->bid(), channel); // Failing to discern which channel we're getting data from seems serious enough to throw if(global_ch==-1) throw std::runtime_error("Failed to parse channel map. I'm gonna just kms now."); - int num_frags = std::ceil(1.*samples_in_pulse/samples_per_fragment); + int num_frags = std::ceil(1.*samples_in_pulse/samples_per_frag); frags += num_frags; for (uint16_t frag_i = 0; frag_i < num_frags; frag_i++) { std::string fragment; fragment.reserve(fFragmentBytes + fStraxHeaderSize); // How long is this fragment? - uint32_t samples_this_frag = samples_per_fragment; + uint32_t samples_this_frag = samples_per_frag; if (frag_i == num_frags-1) - samples_this_frag = samples_in_pulse - frag_i*samples_per_fragment; + samples_this_frag = samples_in_pulse - frag_i*samples_per_frag; int64_t time_this_frag = timestamp + samples_per_frag*sw*frag_i; fragment.append((char*)&time_this_frag, sizeof(time_this_frag)); fragment.append((char*)&samples_this_frag, sizeof(samples_this_frag)); fragment.append((char*)&sw, sizeof(sw)); - fragment.append((char*)&cl, sizeof(cl)); + fragment.append((char*)&global_ch, sizeof(global_ch)); fragment.append((char*)&samples_in_pulse, sizeof(samples_in_pulse)); fragment.append((char*)&frag_i, sizeof(frag_i)); fragment.append((char*)&baseline_ch, sizeof(baseline_ch)); @@ -260,7 +233,7 @@ int StraxFormatter::ProcessChannel(std::u32string_view buff, int words_in_event, while((int)fragment.size()clock_counter); } // loop over frag_i dpc[global_ch] += samples_in_pulse*sizeof(uint16_t); return channel_words; @@ -289,7 +262,7 @@ void StraxFormatter::AddFragmentToBuffer(std::string fragment, uint32_t ts, int fThreadId, chunk_id - max_chunk - 1); } - fFragmentSize += fragment.size(); + fOutputBufferSize += fragment.size(); if(!overlap){ fChunks[chunk_id].emplace_back(std::move(fragment)); @@ -309,12 +282,11 @@ void StraxFormatter::ReceiveDatapackets(std::list>& void StraxFormatter::Process() { // this func runs in its own thread - fThreadID = std::this_thread::get_id(); + fThreadId = std::this_thread::get_id(); std::stringstream ss; - ss< dp; while (fActive == true) { std::unique_lock lk(fBufferMutex); @@ -347,7 +319,7 @@ void StraxFormatter::WriteOutChunk(int chunk_i){ struct timespec comp_start, comp_end; clock_gettime(CLOCK_THREAD_CPUTIME_ID, &comp_start); - std::vector*> buffers = {&fChunks[chunk_i], &fOverlaps[chunk_i]}; + std::vector*> buffers = {&fChunks[chunk_i], &fOverlaps[chunk_i]}; std::vector uncompressed_size(3, 0); std::string uncompressed; std::vector> out_buffer(3); @@ -361,10 +333,10 @@ void StraxFormatter::WriteOutChunk(int chunk_i){ uncompressed += *it; buffers[i]->clear(); if(fCompressor == "blosc"){ - max_compressed_size = uncompressed_size + BLOSC_MAX_OVERHEAD; + max_compressed_size = uncompressed_size[i] + BLOSC_MAX_OVERHEAD; out_buffer[i] = std::make_shared(max_compressed_size, 0); wsize[i] = blosc_compress_ctx(5, 1, sizeof(char), uncompressed_size[i], - uncompressed[i].data(), out_buffer[i]->data(), max_compressed_size,"lz4", 0, 2); + uncompressed.data(), out_buffer[i]->data(), max_compressed_size,"lz4", 0, 2); }else{ // Note: the current package repo version for Ubuntu 18.04 (Oct 2019) is 1.7.1, which is // so old it is not tracked on the lz4 github. The API for frame compression has changed @@ -373,10 +345,11 @@ void StraxFormatter::WriteOutChunk(int chunk_i){ max_compressed_size = LZ4F_compressFrameBound(uncompressed_size[i], &kPrefs); out_buffer[i] = std::make_shared(max_compressed_size, 0); wsize[i] = LZ4F_compressFrame(out_buffer[i]->data(), max_compressed_size, - uncompressed[i].data(), uncompressed_size[i], &kPrefs); + uncompressed.data(), uncompressed_size[i], &kPrefs); } uncompressed.clear(); fBytesPerChunk[int(std::log2(uncompressed_size[i]))]++; + fOutputBufferSize -= uncompressed_size[i]; } fChunks.erase(chunk_i); fOverlaps.erase(chunk_i); @@ -393,7 +366,7 @@ void StraxFormatter::WriteOutChunk(int chunk_i){ if (!fs::exists(output_dir_temp)) fs::create_directory(output_dir_temp); std::ofstream writefile(filename_temp, std::ios::binary); - if (uncompressed_size[i] > 0) writefile.write(out_buffer[i]>data(), wsize[i]); + if (uncompressed_size[i] > 0) writefile.write(out_buffer[i]->data(), wsize[i]); writefile.close(); out_buffer[i].reset(); @@ -416,7 +389,7 @@ void StraxFormatter::WriteOutChunk(int chunk_i){ } void StraxFormatter::WriteOutChunks() { - if (fChunks.size() < fBufferNumChunks) return; + if ((int)fChunks.size() < fBufferNumChunks) return; auto [min_iter, max_iter] = std::minmax_element(fChunks.begin(), fChunks.end(), [&](auto& a, auto& b){return a.first < b.first;}); int max_chunk = (*max_iter).first; @@ -431,7 +404,6 @@ void StraxFormatter::End() { for (auto& p : fChunks) WriteOutChunk(p.first); fChunks.clear(); - fFragmentSize = 0; auto end_dir = GetDirectoryPath("THE_END"); if(!fs::exists(end_dir)){ fLog->Entry(MongoLog::Local,"Creating END directory at %s", end_dir.c_str()); diff --git a/StraxFormatter.hh b/StraxFormatter.hh index 0519b861..142a8877 100644 --- a/StraxFormatter.hh +++ b/StraxFormatter.hh @@ -4,9 +4,6 @@ #include #include #include - -//for debugging -//#include #include #include #include @@ -14,10 +11,14 @@ #include #include #include +#include +#include +#include +#include class Options; class MongoLog; -class ThreadPool; +class V1724; struct data_packet{ data_packet() : clock_counter(0), header_time(0) {} @@ -55,23 +56,21 @@ public: void Close(std::map& ret); void Process(); - long GetBufferSize() {return fFragmentSize.load();} + std::pair GetBufferSize() {return {fInputBufferSize.load(), fOutputBufferSize.load()};} void GetDataPerChan(std::map& ret); - void CheckError(int bid); - int GetBufferLength() {return fBufferLength.load();} - void ReceiveDatapackets(std::list>&); + void ReceiveDatapackets(std::list>&); private: void ProcessDatapacket(std::unique_ptr dp); int ProcessEvent(std::u32string_view, const std::unique_ptr&, std::map&); - int ProcessChannel(std::u32string_view, int, uint32_t, int&, unsigned, + int ProcessChannel(std::u32string_view, int, int, uint32_t, int&, int, const std::unique_ptr&, std::map&); void WriteOutChunk(int); void WriteOutChunks(); void End(); - void GenerateArtificialDeadtime(int64_t, const std::unique_ptr&); - void AddFragmentToBuffer(std::string&, uint32_t, int); + void GenerateArtificialDeadtime(int64_t, const std::shared_ptr&); + void AddFragmentToBuffer(std::string, uint32_t, int); std::experimental::filesystem::path GetFilePath(const std::string&, bool=false); std::experimental::filesystem::path GetDirectoryPath(const std::string&, bool=false); @@ -107,7 +106,7 @@ private: std::thread::id fThreadId; std::condition_variable fCV; std::mutex fBufferMutex; - std::list fBuffer; + std::list> fBuffer; }; #endif diff --git a/V1724.cc b/V1724.cc index 4b790dde..1388d613 100644 --- a/V1724.cc +++ b/V1724.cc @@ -8,7 +8,7 @@ #include #include "MongoLog.hh" #include "Options.hh" -#include "StraxInserter.hh" +#include "StraxFormatter.hh" #include #include #include @@ -16,7 +16,7 @@ #include -V1724::V1724(std::shared_ptr& log, std::shared_ptr& opts){ +V1724::V1724(std::shared_ptr& log, std::shared_ptr& opts, int link, int crate, int bid, unsigned address){ fBoardHandle=fLink=fCrate=fBID=-1; fBaseAddress=0; fLog = log; @@ -44,7 +44,7 @@ V1724::V1724(std::shared_ptr& log, std::shared_ptr& opts){ fLog->Entry(MongoLog::Warning, "Board %i failed to init, error %i handle %i link %i bdnum %i", bid, a, fBoardHandle, link, crate); fBoardHandle = -1; - throw std::runtime_error(); + throw std::runtime_error("Board init failed"); } fLog->Entry(MongoLog::Debug, "Board %i initialized with handle %i (link/crate)(%i/%i)", bid, fBoardHandle, link, crate); @@ -61,11 +61,11 @@ V1724::V1724(std::shared_ptr& log, std::shared_ptr& opts){ fBLTSafety = opts->GetDouble("blt_safety_factor", 1.5); BLT_SIZE = opts->GetInt("blt_size", 512*1024); // there's a more elegant way to do this, but I'm not going to write it - fClockPeriod = std::chrono::nanoseconds((1l<<31)*DataFormatDefinition["ns_per_clk"]); + fClockPeriod = std::chrono::nanoseconds((1l<<31)*fClockCycle); if (Reset()) { fLog->Entry(MongoLog::Error, "Board %i unable to pre-load registers", fBID); - throw std::runtime_error(); + throw std::runtime_error("Board reset failed"); } else { fLog->Entry(MongoLog::Local, "Board %i reset", fBID); } @@ -73,12 +73,12 @@ V1724::V1724(std::shared_ptr& log, std::shared_ptr& opts){ if (opts->GetInt("do_sn_check", 0) != 0) { if ((word = ReadRegister(fSNRegisterLSB)) == 0xFFFFFFFF) { fLog->Entry(MongoLog::Error, "Board %i couldn't read its SN lsb", fBID); - throw std::runtime_error(); + throw std::runtime_error("Board access failed"); } my_bid |= word&0xFF; if ((word = ReadRegister(fSNRegisterMSB)) == 0xFFFFFFFF) { fLog->Entry(MongoLog::Error, "Board %i couldn't read its SN msb", fBID); - throw std::runtime_error(); + throw std::runtime_error("Board access failed"); } my_bid |= ((word&0xFF)<<8); if (my_bid != fBID) { diff --git a/V1724.d.88260 b/V1724.d.88260 new file mode 100644 index 00000000..e69de29b diff --git a/V1724_MV.cc b/V1724_MV.cc index 71f1a74a..98d62d7f 100644 --- a/V1724_MV.cc +++ b/V1724_MV.cc @@ -2,8 +2,8 @@ #include "MongoLog.hh" #include "Options.hh" -V1724_MV::V1724_MV(std::shared_ptr& log, std::shared_ptr& options) : - V1724(log, options){ +V1724_MV::V1724_MV(std::shared_ptr& log, std::shared_ptr& opts, int link, int crate, int bid, unsigned address) : +V1724(log, opts, link, crate, bid, address) { // MV boards seem to have reg 0x1n80 for channel n threshold fChTrigRegister = 0x1080; } diff --git a/V1724_MV.hh b/V1724_MV.hh index e533055f..f1ccea98 100644 --- a/V1724_MV.hh +++ b/V1724_MV.hh @@ -6,10 +6,9 @@ class V1724_MV : public V1724 { public: - V1724_MV(std::shared_ptr&, std::shared_ptr&); + V1724_MV(std::shared_ptr&, std::shared_ptr&, int, int, int, unsigned); virtual ~V1724_MV(); - virtual std::tuple UnpackEventHeader(std::u32string_view); virtual std::tuple UnpackChannelHeader(std::u32string_view, long, uint32_t, uint32_t, int, int); }; diff --git a/V1730.cc b/V1730.cc index 0d68a6eb..dd36be67 100644 --- a/V1730.cc +++ b/V1730.cc @@ -2,8 +2,8 @@ #include "MongoLog.hh" #include "Options.hh" -V1730::V1730(std::shared_ptr& log, std::shared_ptr& options) - :V1724(log, options){ +V1730::V1730(std::shared_ptr& log, std::shared_ptr& options, int link, int crate, int bid, unsigned address) + :V1724(log, options, link, crate, bid, address){ fNChannels = 16; fSampleWidth = 2; fClockCycle = 2; diff --git a/V1730.hh b/V1730.hh index 29e32d8d..942f8a01 100644 --- a/V1730.hh +++ b/V1730.hh @@ -6,7 +6,7 @@ class V1730 : public V1724 { public: - V1730(std::shared_ptr&, std::shared_ptr&); + V1730(std::shared_ptr&, std::shared_ptr&, int, int, int, unsigned); virtual ~V1730(); virtual std::tuple UnpackEventHeader(std::u32string_view); diff --git a/ccontrol.cc b/ccontrol.cc index 0cbffbe7..c12c1e33 100644 --- a/ccontrol.cc +++ b/ccontrol.cc @@ -84,7 +84,7 @@ int main(int argc, char** argv){ std::cout<<"I dub thee "<(log_retention, log_dir, mongo_uri, dbname, "log", hostname, true); + auto logger = std::make_shared(log_retention, log_dir, mongo_uri, dbname, "log", hostname); // Options std::shared_ptr options; diff --git a/main.cc b/main.cc index ed45149c..5adf3a5a 100644 --- a/main.cc +++ b/main.cc @@ -35,13 +35,12 @@ void UpdateStatus(std::string suri, std::string dbname, std::shared_ptrGetBufferSize(); insert_doc << "host" << hostname << "time" << bsoncxx::types::b_date(system_clock::now())<< "rate" << controller->GetDataSize()/1e6 << "status" << controller->status() << - "buffer_length" << controller->GetBufferLength() << // TODO - "buffer_size" << controller->GetBufferSize()/1e6 << - "strax_buffer" << controller->GetStraxBufferSize()/1e6 << + "buffer_size" << (buf.first + buf.second)/1e6 << "run_mode" << controller->run_mode() << "channels" << bsoncxx::builder::stream::open_document << [&](bsoncxx::builder::stream::key_context<> doc){ @@ -133,7 +132,7 @@ int main(int argc, char** argv){ // The DAQController object is responsible for passing commands to the // boards and tracking the status auto controller = std::make_shared(logger, hostname); - std::thread status_update(&UpdateStatus, suri, dbname, controller); + std::thread status_update(&UpdateStatus, suri, dbname, std::ref(controller)); using namespace std::chrono; // Main program loop. Scan the database and look for commands addressed // to this hostname. From af50494c84bb598973428fdc2416523d218a9981 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 24 Sep 2020 11:12:56 +0200 Subject: [PATCH 03/57] .d file somehow snuck in there --- V1724.d.88260 | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 V1724.d.88260 diff --git a/V1724.d.88260 b/V1724.d.88260 deleted file mode 100644 index e69de29b..00000000 From f388480ba4bde305e05f0f74c456047f509a16ba Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 24 Sep 2020 12:03:02 +0200 Subject: [PATCH 04/57] Odd crash --- DAQController.cc | 6 +++--- V1724.cc | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/DAQController.cc b/DAQController.cc index 9c217f63..bc21db42 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -312,11 +312,11 @@ void DAQController::CloseThreads(){ const std::lock_guard lg(fMutex); for (auto& sf : fFormatters) sf->Close(board_fails); // give threads time to finish - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - for (auto& sf : fFormatters) sf.reset(); - fFormatters.clear(); + std::this_thread::sleep_for(std::chrono::seconds(1)); for (auto& t : fProcessingThreads) if (t.joinable()) t.join(); fProcessingThreads.clear(); + for (auto& sf : fFormatters) sf.reset(); + fFormatters.clear(); if (std::accumulate(board_fails.begin(), board_fails.end(), 0, [=](int tot, auto& iter) {return std::move(tot) + iter.second;})) { diff --git a/V1724.cc b/V1724.cc index 1388d613..e5f42da2 100644 --- a/V1724.cc +++ b/V1724.cc @@ -172,6 +172,8 @@ int V1724::GetClockCounter(uint32_t timestamp){ if (timestamp < fLastClock) { // actually rolled over fRolloverCounter++; + fLog->Entry(MongoLog::Local, "Board %i rollover %i (%x/%x)", + fBID, fRolloverCounter, fLastClock, timestamp); } else { // not a rollover } From c8478e70e67e094d68aee4730bd66bdb41146f40 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 24 Sep 2020 13:46:59 +0200 Subject: [PATCH 05/57] If in doubt, print it out --- StraxFormatter.cc | 3 ++- V1724.cc | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/StraxFormatter.cc b/StraxFormatter.cc index bc0ecbfa..ff63b8d6 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -126,7 +126,7 @@ void StraxFormatter::ProcessDatapacket(std::unique_ptr dp){ if((*it)>>28 == 0xA){ missed = true; // it works out clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_start); - words = (*it)&0x7FFFFFFF; + words = (*it)&0xFFFFFFF; std::u32string_view sv(dp->buff.data() + std::distance(dp->buff.begin(), it), words); ProcessEvent(sv, dp, dpc); clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_end); @@ -161,6 +161,7 @@ int StraxFormatter::ProcessEvent(std::u32string_view buff, // returns {words this event, channel mask, board fail, header timestamp} auto [words, channel_mask, fail, event_time] = dp->digi->UnpackEventHeader(buff); + fLog->Entry(MongoLog::Local, "SF %i %x %x", dp->digi->bid(), event_time, dp->header_time); if(fail){ // board fail GenerateArtificialDeadtime(((dp->clock_counter<<31) + dp->header_time), dp->digi); diff --git a/V1724.cc b/V1724.cc index e5f42da2..0ecf9bf3 100644 --- a/V1724.cc +++ b/V1724.cc @@ -262,6 +262,7 @@ int V1724::Read(std::unique_ptr& outptr){ } fBLTCounter[count]++; auto [ht, cc] = GetClockInfo(s); + fLog->Entry(MongoLog::Local, "Bd %i start %x %i", fBID, ht, cc); outptr = std::make_unique(std::move(s), ht, cc); } for (auto b : xfer_buffers) delete[] b.first; From d9fd69112bb92f231d85a79cb9aeb6f50d980eeb Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 24 Sep 2020 14:02:37 +0200 Subject: [PATCH 06/57] More logging --- StraxFormatter.cc | 3 ++- V1724.cc | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/StraxFormatter.cc b/StraxFormatter.cc index ff63b8d6..f78d3985 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -161,7 +161,6 @@ int StraxFormatter::ProcessEvent(std::u32string_view buff, // returns {words this event, channel mask, board fail, header timestamp} auto [words, channel_mask, fail, event_time] = dp->digi->UnpackEventHeader(buff); - fLog->Entry(MongoLog::Local, "SF %i %x %x", dp->digi->bid(), event_time, dp->header_time); if(fail){ // board fail GenerateArtificialDeadtime(((dp->clock_counter<<31) + dp->header_time), dp->digi); @@ -206,6 +205,8 @@ int StraxFormatter::ProcessChannel(std::u32string_view buff, int words_in_event, // Failing to discern which channel we're getting data from seems serious enough to throw if(global_ch==-1) throw std::runtime_error("Failed to parse channel map. I'm gonna just kms now."); + fLog->Entry(MongoLog::Local, "%i/%i (%i) %lx %x %x %i", dp->digi->bid(), ch, + global_ch, timestamp, dp->header_time, event_time, dp->clock_counter); int num_frags = std::ceil(1.*samples_in_pulse/samples_per_frag); frags += num_frags; diff --git a/V1724.cc b/V1724.cc index 0ecf9bf3..e5f42da2 100644 --- a/V1724.cc +++ b/V1724.cc @@ -262,7 +262,6 @@ int V1724::Read(std::unique_ptr& outptr){ } fBLTCounter[count]++; auto [ht, cc] = GetClockInfo(s); - fLog->Entry(MongoLog::Local, "Bd %i start %x %i", fBID, ht, cc); outptr = std::make_unique(std::move(s), ht, cc); } for (auto b : xfer_buffers) delete[] b.first; From 830261c6c3c8929ecc172038aa85649db350a8c6 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 24 Sep 2020 14:04:53 +0200 Subject: [PATCH 07/57] Typo --- StraxFormatter.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/StraxFormatter.cc b/StraxFormatter.cc index f78d3985..51e75812 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -205,7 +205,7 @@ int StraxFormatter::ProcessChannel(std::u32string_view buff, int words_in_event, // Failing to discern which channel we're getting data from seems serious enough to throw if(global_ch==-1) throw std::runtime_error("Failed to parse channel map. I'm gonna just kms now."); - fLog->Entry(MongoLog::Local, "%i/%i (%i) %lx %x %x %i", dp->digi->bid(), ch, + fLog->Entry(MongoLog::Local, "%i/%i (%i) %lx %x %x %i", dp->digi->bid(), channel, global_ch, timestamp, dp->header_time, event_time, dp->clock_counter); int num_frags = std::ceil(1.*samples_in_pulse/samples_per_frag); From 0a30b3e66c4e692618a888ac1f91f4f0f1cf8c70 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 24 Sep 2020 14:35:01 +0200 Subject: [PATCH 08/57] Insidious --- StraxFormatter.cc | 4 ++-- StraxFormatter.hh | 4 ++-- V1724.cc | 1 + 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/StraxFormatter.cc b/StraxFormatter.cc index 51e75812..a5b6461c 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -205,8 +205,8 @@ int StraxFormatter::ProcessChannel(std::u32string_view buff, int words_in_event, // Failing to discern which channel we're getting data from seems serious enough to throw if(global_ch==-1) throw std::runtime_error("Failed to parse channel map. I'm gonna just kms now."); - fLog->Entry(MongoLog::Local, "%i/%i (%i) %lx %x %x %i", dp->digi->bid(), channel, - global_ch, timestamp, dp->header_time, event_time, dp->clock_counter); + //fLog->Entry(MongoLog::Local, "%i/%i (%i) %lx %x %x %i", dp->digi->bid(), channel, + // global_ch, timestamp, dp->header_time, event_time, dp->clock_counter); int num_frags = std::ceil(1.*samples_in_pulse/samples_per_frag); frags += num_frags; diff --git a/StraxFormatter.hh b/StraxFormatter.hh index 142a8877..c9029b4b 100644 --- a/StraxFormatter.hh +++ b/StraxFormatter.hh @@ -22,7 +22,7 @@ class V1724; struct data_packet{ data_packet() : clock_counter(0), header_time(0) {} - data_packet(std::u32string s, uint32_t cc, uint32_t ht) : + data_packet(std::u32string s, uint32_t ht, long cc) : buff(std::move(s)), clock_counter(cc), header_time(ht) {} data_packet(const data_packet& rhs)=delete; data_packet(data_packet&& rhs) : buff(std::move(rhs.buff)), @@ -39,7 +39,7 @@ struct data_packet{ } std::u32string buff; - uint32_t clock_counter; + long clock_counter; uint32_t header_time; std::shared_ptr digi; }; diff --git a/V1724.cc b/V1724.cc index e5f42da2..e168c45a 100644 --- a/V1724.cc +++ b/V1724.cc @@ -346,3 +346,4 @@ std::tuple V1724::UnpackChannelHead else if (ch_time < 5e8 && header_time > 15e8) rollovers++; return {((rollovers<<31)+ch_time)*fClockCycle, words, 0, sv.substr(2, words-2)}; } + From f3df61e85c9c73f2f54777cb8779955423f0c72b Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 24 Sep 2020 14:39:54 +0200 Subject: [PATCH 09/57] Forgot to notify --- StraxFormatter.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/StraxFormatter.cc b/StraxFormatter.cc index a5b6461c..1f13ca64 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -80,6 +80,7 @@ StraxFormatter::~StraxFormatter(){ void StraxFormatter::Close(std::map& ret){ fActive = false; for (auto& iter : fFailCounter) ret[iter.first] += iter.second; + fCV.notify_one(); } void StraxFormatter::GetDataPerChan(std::map& ret) { From 18c833e5100020c023f6bcba97fff922dd53310d Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 24 Sep 2020 14:49:31 +0200 Subject: [PATCH 10/57] Looking for misses --- StraxFormatter.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/StraxFormatter.cc b/StraxFormatter.cc index 1f13ca64..5b7b60ba 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -128,6 +128,8 @@ void StraxFormatter::ProcessDatapacket(std::unique_ptr dp){ missed = true; // it works out clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_start); words = (*it)&0xFFFFFFF; + fLog->Entry(MongoLog::Local, "Bd %i %x/%x/%x", dp->digi->bid(), + std::distance(dp->buff.begin(), it), words, dp->buff.size()); std::u32string_view sv(dp->buff.data() + std::distance(dp->buff.begin(), it), words); ProcessEvent(sv, dp, dpc); clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_end); From d29cd1765fd1bb717d8e192ed68e6c08312a6007 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 24 Sep 2020 15:04:21 +0200 Subject: [PATCH 11/57] Segfault fix? --- DAQController.cc | 2 ++ StraxFormatter.cc | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/DAQController.cc b/DAQController.cc index bc21db42..295bc291 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -313,8 +313,10 @@ void DAQController::CloseThreads(){ for (auto& sf : fFormatters) sf->Close(board_fails); // give threads time to finish std::this_thread::sleep_for(std::chrono::seconds(1)); + fLog->Entry(MongoLog::Local, "Joining processing threads"); for (auto& t : fProcessingThreads) if (t.joinable()) t.join(); fProcessingThreads.clear(); + fLog->Entry(MongoLog::Local, "Destroying formatters"); for (auto& sf : fFormatters) sf.reset(); fFormatters.clear(); diff --git a/StraxFormatter.cc b/StraxFormatter.cc index 5b7b60ba..2d3de2fd 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -406,8 +406,9 @@ void StraxFormatter::WriteOutChunks() { } void StraxFormatter::End() { - for (auto& p : fChunks) - WriteOutChunk(p.first); + // this line is awkward, but iterators don't always like it when you're + // changing the container while looping over its contents + while (fChunks.size() > 0) WriteOutChunk(fChunks.begin()->first); fChunks.clear(); auto end_dir = GetDirectoryPath("THE_END"); if(!fs::exists(end_dir)){ From 16870575b3179ff048061948514034dbc347308f Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 24 Sep 2020 15:42:09 +0200 Subject: [PATCH 12/57] More --- StraxFormatter.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/StraxFormatter.cc b/StraxFormatter.cc index 2d3de2fd..d74cc721 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -128,9 +128,9 @@ void StraxFormatter::ProcessDatapacket(std::unique_ptr dp){ missed = true; // it works out clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_start); words = (*it)&0xFFFFFFF; - fLog->Entry(MongoLog::Local, "Bd %i %x/%x/%x", dp->digi->bid(), - std::distance(dp->buff.begin(), it), words, dp->buff.size()); std::u32string_view sv(dp->buff.data() + std::distance(dp->buff.begin(), it), words); + fLog->Entry(MongoLog::Local, "Bd %i %x/%x/%x/%x", dp->digi->bid(), + std::distance(dp->buff.begin(), it), words, sv.size(), dp->buff.size()); ProcessEvent(sv, dp, dpc); clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_end); fProcTimeEv += timespec_subtract(ev_end, ev_start); @@ -138,8 +138,8 @@ void StraxFormatter::ProcessDatapacket(std::unique_ptr dp){ it += words; } else { if (missed) { - fLog->Entry(MongoLog::Warning, "Missed an event from %i at idx %i", - dp->digi->bid(), std::distance(dp->buff.begin(), it)); + fLog->Entry(MongoLog::Warning, "Missed an event from %i at idx %x/%x", + dp->digi->bid(), std::distance(dp->buff.begin(), it), dp->buff.size()); missed = false; } it++; From d6727a05f22cec3c8681c022ff059bba12b568de Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 24 Sep 2020 15:51:36 +0200 Subject: [PATCH 13/57] Silly subtle typo --- StraxFormatter.cc | 8 ++++---- V1724.cc | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/StraxFormatter.cc b/StraxFormatter.cc index d74cc721..b1858f5a 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -129,8 +129,8 @@ void StraxFormatter::ProcessDatapacket(std::unique_ptr dp){ clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_start); words = (*it)&0xFFFFFFF; std::u32string_view sv(dp->buff.data() + std::distance(dp->buff.begin(), it), words); - fLog->Entry(MongoLog::Local, "Bd %i %x/%x/%x/%x", dp->digi->bid(), - std::distance(dp->buff.begin(), it), words, sv.size(), dp->buff.size()); + fLog->Entry(MongoLog::Local, "Bd %i %x/%x/%x", dp->digi->bid(), + std::distance(dp->buff.begin(), it), words, dp->buff.size()); ProcessEvent(sv, dp, dpc); clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_end); fProcTimeEv += timespec_subtract(ev_end, ev_start); @@ -138,8 +138,8 @@ void StraxFormatter::ProcessDatapacket(std::unique_ptr dp){ it += words; } else { if (missed) { - fLog->Entry(MongoLog::Warning, "Missed an event from %i at idx %x/%x", - dp->digi->bid(), std::distance(dp->buff.begin(), it), dp->buff.size()); + fLog->Entry(MongoLog::Warning, "Missed an event from %i at idx %x/%x (%x)", + dp->digi->bid(), std::distance(dp->buff.begin(), it), dp->buff.size(), *it); missed = false; } it++; diff --git a/V1724.cc b/V1724.cc index e168c45a..36631799 100644 --- a/V1724.cc +++ b/V1724.cc @@ -241,7 +241,7 @@ int V1724::Read(std::unique_ptr& outptr){ count++; blt_words+=nb/sizeof(char32_t); - xfer_buffers.emplace_back(std::make_pair(thisBLT, nb)); + xfer_buffers.emplace_back(std::make_pair(thisBLT, nb/sizeof(char32_t))); }while(ret != cvBusError); From bc35c70327125398f291d9ffc48c78b91f60c065 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Fri, 25 Sep 2020 09:32:07 +0200 Subject: [PATCH 14/57] Less logging --- StraxFormatter.cc | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/StraxFormatter.cc b/StraxFormatter.cc index b1858f5a..66da038f 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -129,8 +129,8 @@ void StraxFormatter::ProcessDatapacket(std::unique_ptr dp){ clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_start); words = (*it)&0xFFFFFFF; std::u32string_view sv(dp->buff.data() + std::distance(dp->buff.begin(), it), words); - fLog->Entry(MongoLog::Local, "Bd %i %x/%x/%x", dp->digi->bid(), - std::distance(dp->buff.begin(), it), words, dp->buff.size()); + //fLog->Entry(MongoLog::Local, "Bd %i %x/%x/%x", dp->digi->bid(), + // std::distance(dp->buff.begin(), it), words, dp->buff.size()); ProcessEvent(sv, dp, dpc); clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_end); fProcTimeEv += timespec_subtract(ev_end, ev_start); @@ -208,8 +208,6 @@ int StraxFormatter::ProcessChannel(std::u32string_view buff, int words_in_event, // Failing to discern which channel we're getting data from seems serious enough to throw if(global_ch==-1) throw std::runtime_error("Failed to parse channel map. I'm gonna just kms now."); - //fLog->Entry(MongoLog::Local, "%i/%i (%i) %lx %x %x %i", dp->digi->bid(), channel, - // global_ch, timestamp, dp->header_time, event_time, dp->clock_counter); int num_frags = std::ceil(1.*samples_in_pulse/samples_per_frag); frags += num_frags; From e1b1cefc48e2ff52b5bf057bf484e5e948368c10 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Mon, 28 Sep 2020 10:16:41 +0200 Subject: [PATCH 15/57] Unified executable --- CControl_Handler.cc | 66 +++++++++++------------------- CControl_Handler.hh | 25 ++++-------- DAQController.cc | 84 ++++++++++++++++++-------------------- DAQController.hh | 28 +++++-------- StraxFormatter.cc | 9 ++-- dispatcher/MongoConnect.py | 2 +- main.cc | 75 ++++++++++++++++------------------ 7 files changed, 123 insertions(+), 166 deletions(-) diff --git a/CControl_Handler.cc b/CControl_Handler.cc index 119ec158..44ebf503 100644 --- a/CControl_Handler.cc +++ b/CControl_Handler.cc @@ -1,18 +1,9 @@ #include "CControl_Handler.hh" -#include "DAXHelpers.hh" -#include "Options.hh" -#include "MongoLog.hh" #include "V2718.hh" #include "DDC10.hh" #include "V1495.hh" -#include -#include -#include - -CControl_Handler::CControl_Handler(std::shared_ptr& log, std::string procname){ - fOptions = nullptr; - fLog = log; - fProcname = procname; + +CControl_Handler::CControl_Handler(std::shared_ptr& log, std::string procname) : DAQController(log, procname){ fCurrentRun = fBID = fBoardHandle-1; fV2718 = nullptr; fV1495 = nullptr; @@ -25,15 +16,20 @@ CControl_Handler::~CControl_Handler(){ } // Initialising various devices namely; V2718 crate controller, V1495, DDC10... -int CControl_Handler::DeviceArm(int run, std::shared_ptr& opts){ +int CControl_Handler::Arm(std::shared_ptr& opts){ fStatus = DAXHelpers::Arming; // Just in case clear out any remaining objects from previous runs DeviceStop(); - fCurrentRun = run; fOptions = opts; + try{ + fCurrentRun = std::stoi(opts->GetInt("run_identifier", "none")); + }catch(std::exception& e) { + fLog->Entry(MongoLog::Warning, "No run number specified in config?? %s", e.what()); + return -1; + } // Pull options for V2718 CrateOptions copts; @@ -47,27 +43,25 @@ int CControl_Handler::DeviceArm(int run, std::shared_ptr& opts){ // Getting the link and crate for V2718 std::vector bv = fOptions->GetBoards("V2718"); if(bv.size() != 1){ - fLog->Entry(MongoLog::Message, "Require one V2718 to be defined or we can't start the run"); + fLog->Entry(MongoLog::Message, "Require one V2718 to be defined"); fStatus = DAXHelpers::Idle; return -1; } BoardType cc_def = bv[0]; - fV2718 = std::make_unique(fLog); - if (fV2718->CrateInit(copts, cc_def.link, cc_def.crate)!=0){ - fLog->Entry(MongoLog::Error, "Failed to initialize V2718 crate controller"); + try{ + fV2718 = std::make_unique(fLog, copts, cc_def.link, cc_def.crate); + }catch(std::exception& e){ + fLog->Entry(MongoLog::Error, "Failed to initialize V2718 crate controller: %s", e.what()); fStatus = DAXHelpers::Idle; return -1; - }else{ - fBoardHandle = fV2718->GetHandle(); - fLog->Entry(MongoLog::Local, "V2718 Initialised"); } + fBoardHandle = fV2718->GetHandle(); + fLog->Entry(MongoLog::Local, "V2718 Initialized"); // Getting options for DDC10 HEV module - HEVOptions hopts; - std::vector dv = fOptions->GetBoards("DDC10"); - // Init DDC10 only when included in config - only for TPC if (dv.size() == 1){ + HEVOptions hopts; if(fOptions->GetHEVOpt(hopts) == 0){ fDDC10 = std::make_unique(); if(fDDC10->Initialize(hopts) != 0){ @@ -81,11 +75,8 @@ int CControl_Handler::DeviceArm(int run, std::shared_ptr& opts){ fLog->Entry(MongoLog::Error, "Failed to pull DDC10 options from file"); } } else { - //fLog->Entry(MongoLog::Debug, "No HEV"); } - - // Getting options for the V1495 board std::vector mv = fOptions->GetBoards("V1495"); if (mv.size() == 1){ BoardType mv_def = mv[0]; @@ -102,16 +93,13 @@ int CControl_Handler::DeviceArm(int run, std::shared_ptr& opts){ } } }else{ - //fLog->Entry(MongoLog::Debug, "No V1495"); } - //fLog->Entry(MongoLog::Local, "Arm sequence finished"); fStatus = DAXHelpers::Armed; return 0; -} // end devicearm +} -// Send the start signal from crate controller -int CControl_Handler::DeviceStart(){ +int CControl_Handler::Start(){ if(fStatus != DAXHelpers::Armed){ fLog->Entry(MongoLog::Warning, "V2718 attempt to start without arming. Maybe unclean shutdown"); return 0; @@ -123,15 +111,11 @@ int CControl_Handler::DeviceStart(){ } fStatus = DAXHelpers::Running; - //fLog->Entry(MongoLog::Local, "Start sequence completed"); return 0; } // Stopping the previously started devices; V2718, V1495, DDC10... -int CControl_Handler::DeviceStop(){ - //fLog->Entry(MongoLog::Local, "Beginning stop sequence"); - - // If V2718 here then send stop signal +int CControl_Handler::Stop(){ if(fV2718){ if(fV2718->SendStopSignal() != 0){ fLog->Entry(MongoLog::Warning, "Failed to stop V2718"); @@ -147,13 +131,10 @@ int CControl_Handler::DeviceStop(){ } // Reporting back on the status of V2718, V1495, DDC10 etc... -bsoncxx::document::value CControl_Handler::GetStatusDoc(std::string hostname){ - using namespace std::chrono; - - // Updating the status doc +void CControl_Handler::StatusUpdate(mongocxx::collection* collection){ bsoncxx::builder::stream::document builder{}; builder << "host" << hostname << "status" << fStatus << - "time" << bsoncxx::types::b_date(system_clock::now()); + "time" << bsoncxx::types::b_date(std::chrono::system_clock::now()); auto in_array = builder << "active" << bsoncxx::builder::stream::open_array; if(fV2718){ @@ -169,7 +150,8 @@ bsoncxx::document::value CControl_Handler::GetStatusDoc(std::string hostname){ << bsoncxx::builder::stream::close_document; } auto after_array = in_array << bsoncxx::builder::stream::close_array; - return after_array << bsoncxx::builder::stream::finalize; + collection->insert_one(after_array << bsoncxx::builder::stream::finalize); + return; /* // DDC10 parameters might change for future updates of the XENONnT HEV if(fDDC10){ diff --git a/CControl_Handler.hh b/CControl_Handler.hh index 62b02c80..01ae26af 100644 --- a/CControl_Handler.hh +++ b/CControl_Handler.hh @@ -1,25 +1,21 @@ #ifndef _CCONTROL_HANDLER_HH_ #define _CCONTROL_HANDLER_HH_ -#include -#include +#include "DAQController.hh" -class MongoLog; -class Options; class V2718; class DDC10; class V1495; -class CControl_Handler{ - +class CControl_Handler : public DAQController{ public: - CControl_Handler(std::shared_ptr& log, std::string procname); - ~CControl_Handler(); + CControl_Handler(std::shared_ptr&, std::string); + virtual ~CControl_Handler(); - bsoncxx::document::value GetStatusDoc(std::string hostname); - int DeviceArm(int run, std::shared_ptr& opts); - int DeviceStart(); - int DeviceStop(); + virtual void GetStatusDoc(mongocxx::collection*); + virtual int Arm(std::shared_ptr&); + virtual int Start(); + virtual int Stop(); private: @@ -31,9 +27,6 @@ private: int fCurrentRun; int fBID; int fBoardHandle; - std::string fProcname; - std::shared_ptr fOptions; - std::shared_ptr fLog; }; -#endif +#endif // _CCONTROL_HANDLER_HH_ defined diff --git a/DAQController.cc b/DAQController.cc index 295bc291..5113af1e 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -15,6 +15,9 @@ #include #include +#include +#include + // Status: // 0-idle // 1-arming @@ -37,22 +40,9 @@ DAQController::~DAQController(){ CloseThreads(); } -std::string DAQController::run_mode(){ - if(fOptions == NULL) - return "None"; - try{ - return fOptions->GetString("name", "None"); - } - catch(const std::exception &e){ - return "None"; - } -} - -int DAQController::InitializeElectronics(std::shared_ptr& options){ - - End(); - +int DAQController::Arm(std::shared_ptr& options){ fOptions = options; + fLog->SetRunId(fOptions->GetInt("number", -1)); fNProcessingThreads = fOptions->GetNestedInt("processing_threads."+fHostname, 8); fLog->Entry(MongoLog::Local, "Beginning electronics initialization with %i threads", fNProcessingThreads); @@ -184,15 +174,7 @@ int DAQController::Stop(){ } } } - fLog->Entry(MongoLog::Debug, "Stopped digitizers"); - - fStatus = DAXHelpers::Idle; - return 0; -} - -void DAQController::End(){ - Stop(); - fLog->Entry(MongoLog::Local, "Closing Processing Threads"); + fLog->Entry(MongoLog::Debug, "Stopped digitizers, closing threads"); CloseThreads(); fLog->Entry(MongoLog::Local, "Closing Digitizers"); for(auto& link : fDigitizers ){ @@ -205,7 +187,10 @@ void DAQController::End(){ fDigitizers.clear(); fStatus = DAXHelpers::Idle; + fLog->SetRunId(-1); std::cout<<"Finished end"<Entry(MongoLog::Local, "RO thread %i returning", link); } -std::map DAQController::GetDataPerChan(){ - // Return a map of data transferred per channel since last update - // Clears the private maps in the StraxFormatters - const std::lock_guard lg(fMutex); - std::map retmap; - for (auto& p : fFormatters) - p->GetDataPerChan(retmap); - return retmap; -} - -std::pair DAQController::GetBufferSize() { - const std::lock_guard lg(fMutex); - std::pair ret{0l,0l}; - for (const auto& p : fFormatters) { - auto x = p->GetBufferSize(); - ret.first += x.first; - ret.second += x.second; - } - return ret; -} - int DAQController::OpenThreads(){ const std::lock_guard lg(fMutex); fProcessingThreads.reserve(fNProcessingThreads); @@ -330,6 +294,36 @@ void DAQController::CloseThreads(){ for (auto& t : fReadoutThreads) if (t.joinable()) t.join(); } +void DAQController::StatusUpdate(mongocxx::collection* collection) { + auto insert_doc = bsoncxx::builder::stream::document{}; + std::map retmap; + std::pair buf{0,0}; + int rate = fDataRate; + fDataRate = 0; + { + const std::lock_guard lg(fMutex); + for (auto& p : fFormatters) { + p->GetDataPerChan(retmap); + auto x = p->GetBufferSize(); + buf.first += x.first; + buf.second += x.second; + } + } + insert_doc << "host" << fHostname << + "time" << bsoncxx::types::b_date(std::chrono::system_clock::now())<< + "rate" << rate/1e6 << + "status" << fStatus << + "buffer_size" << (buf.first + buf.second)/1e6 << + "run_mode" << (fOptions ? fOptions->GetString("name", "none") : "none") << + "channels" << bsoncxx::builder::stream::open_document << + [&](bsoncxx::builder::stream::key_context<> doc){ + for( auto const& pair : retmap) + doc << std::to_string(pair.first) << short(pair.second>>10); // KB not MB + } << bsoncxx::builder::stream::close_document; + collection->insert_one(insert_doc << bsoncxx::builder::stream::finalize); + return; +} + void DAQController::InitLink(std::vector>& digis, std::map>>& cal_values, int& ret) { std::string BL_MODE = fOptions->GetString("baseline_dac_mode", "fixed"); diff --git a/DAQController.hh b/DAQController.hh index 713124a3..85065a0e 100644 --- a/DAQController.hh +++ b/DAQController.hh @@ -14,6 +14,7 @@ class StraxFormatter; class MongoLog; class Options; class V1724; +class mongocxx::collection; class DAQController{ /* @@ -23,21 +24,18 @@ class DAQController{ public: DAQController(std::shared_ptr&, std::string hostname="DEFAULT"); - ~DAQController(); + virtual ~DAQController(); - int InitializeElectronics(std::shared_ptr&); + virtual int Arm(std::shared_ptr&); + virtual int Start(); + virtual int Stop(); + virtual void StatusUpdate(mongocxx::collection*); - int status(){return fStatus;} - int GetBufferLength(); - std::string run_mode(); - - int Start(); - int Stop(); - void End(); - - int GetDataSize(){int ds = fDataRate; fDataRate=0; return ds;} - std::map GetDataPerChan(); - std::pair GetBufferSize(); +protected: + std::string fHostname; + std::shared_ptr fLog; + std::shared_ptr fOptions; + int fStatus; private: void ReadData(int link); @@ -55,11 +53,7 @@ private: std::atomic_bool fReadLoop; std::map fRunning; - int fStatus; int fNProcessingThreads; - std::string fHostname; - std::shared_ptr fLog; - std::shared_ptr fOptions; // For reporting to frontend std::atomic_int fDataRate; diff --git a/StraxFormatter.cc b/StraxFormatter.cc index 66da038f..d5608b73 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -129,8 +129,6 @@ void StraxFormatter::ProcessDatapacket(std::unique_ptr dp){ clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_start); words = (*it)&0xFFFFFFF; std::u32string_view sv(dp->buff.data() + std::distance(dp->buff.begin(), it), words); - //fLog->Entry(MongoLog::Local, "Bd %i %x/%x/%x", dp->digi->bid(), - // std::distance(dp->buff.begin(), it), words, dp->buff.size()); ProcessEvent(sv, dp, dpc); clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_end); fProcTimeEv += timespec_subtract(ev_end, ev_start); @@ -216,7 +214,7 @@ int StraxFormatter::ProcessChannel(std::u32string_view buff, int words_in_event, fragment.reserve(fFragmentBytes + fStraxHeaderSize); // How long is this fragment? - uint32_t samples_this_frag = samples_per_frag; + int32_t samples_this_frag = samples_per_frag; if (frag_i == num_frags-1) samples_this_frag = samples_in_pulse - frag_i*samples_per_frag; @@ -233,7 +231,7 @@ int StraxFormatter::ProcessChannel(std::u32string_view buff, int words_in_event, fragment.append((char*)wf.data(), samples_this_frag*sizeof(uint16_t)); wf.remove_prefix(samples_this_frag*sizeof(uint16_t)/sizeof(char32_t)); uint16_t zero_filler = 0; - while((int)fragment.size()clock_counter); @@ -406,6 +404,7 @@ void StraxFormatter::WriteOutChunks() { void StraxFormatter::End() { // this line is awkward, but iterators don't always like it when you're // changing the container while looping over its contents + if (fChunks.size() > 0) CreateEmpty(fChunks.begin()->first); while (fChunks.size() > 0) WriteOutChunk(fChunks.begin()->first); fChunks.clear(); auto end_dir = GetDirectoryPath("THE_END"); @@ -417,7 +416,7 @@ void StraxFormatter::End() { catch(...){}; } std::ofstream outfile(GetFilePath("THE_END"), std::ios::out); - outfile<<"...my only friend"; + outfile<<"...my only friend\n"; outfile.close(); return; } diff --git a/dispatcher/MongoConnect.py b/dispatcher/MongoConnect.py index 747f7f91..4624a0e1 100644 --- a/dispatcher/MongoConnect.py +++ b/dispatcher/MongoConnect.py @@ -392,7 +392,7 @@ def SendCommand(self, command, hosts, user, detector, mode="", delay=0): "user": user, "detector": detector, "mode": mode, - "options_override": {"run_identifier": n_id}, + "options_override": {"run_identifier": n_id, "number": number}, "number": number, "createdAt": datetime.datetime.utcnow() } diff --git a/main.cc b/main.cc index 5adf3a5a..92210967 100644 --- a/main.cc +++ b/main.cc @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -33,21 +34,7 @@ void UpdateStatus(std::string suri, std::string dbname, std::shared_ptrGetBufferSize(); - insert_doc << "host" << hostname << - "time" << bsoncxx::types::b_date(system_clock::now())<< - "rate" << controller->GetDataSize()/1e6 << - "status" << controller->status() << - "buffer_size" << (buf.first + buf.second)/1e6 << - "run_mode" << controller->run_mode() << - "channels" << bsoncxx::builder::stream::open_document << - [&](bsoncxx::builder::stream::key_context<> doc){ - for( auto const& pair : controller->GetDataPerChan() ) - doc << std::to_string(pair.first) << (pair.second>>10); // KB not MB - } << bsoncxx::builder::stream::close_document; - status.insert_one(insert_doc << bsoncxx::builder::stream::finalize); + status.insert_one(&status); }catch(const std::exception &e){ std::cout<<"Can't connect to DB to update."<: id number of this readout instance, required\n" << "--uri : full MongoDB URI, required\n" << "--db : name of the database to use, default \"daq\"\n" - << "--logdir : where to write the logs\n" + << "--logdir : where to write the logs, default pwd\n" + << "--reader: this instance is a reader\n" + << "--cc: this instance is a crate controller\n" << "--help: print this message\n" << "\n"; return 1; } int main(int argc, char** argv){ - // Need to create a mongocxx instance and it must exist for // the entirety of the program. So here seems good. mongocxx::instance instance{}; @@ -79,6 +67,7 @@ int main(int argc, char** argv){ std::string current_run_id="none", log_dir = ""; std::string dbname = "daq", suri = "", sid = ""; + bool reader = false, cc = false; int log_retention = 7; // days int c, opt_index; struct option longopts[] = { @@ -86,7 +75,9 @@ int main(int argc, char** argv){ {"uri", required_argument, 0, 1}, {"db", required_argument, 0, 2}, {"logdir", required_argument, 0, 3}, - {"help", no_argument, 0, 4} + {"reader", no_argument, 0, 4}, + {"cc", no_argument, 0, 5}, + {"help", no_argument, 0, 6} }; while ((c = getopt_long(argc, argv, "", longopts, &opt_index)) != -1) { switch(c) { @@ -99,18 +90,26 @@ int main(int argc, char** argv){ case 3: log_dir = optarg; break; case 4: + reader = true; break; + case 5: + cc = true; break; + case 6: default: std::cout<<"Received unknown arg\n"; return PrintUsage(); } } if (suri == "" || sid == "") return PrintUsage(); + if (reader == cc) { + std::cout<<"Specify --reader OR --cc\n"; + return 1; + } // We will consider commands addressed to this PC's ID char chostname[HOST_NAME_MAX]; gethostname(chostname, HOST_NAME_MAX); hostname=chostname; - hostname+= "_reader_" + sid; + hostname+= (reader ? "_reader_" : "_controller_") + sid; std::cout<<"Reader starting with ID: "<(logger, hostname); std::thread status_update(&UpdateStatus, suri, dbname, std::ref(controller)); + + // Sort oldest to newest + auto order = bsoncxx::builder::stream::document{} << + "_id" << 1 < querydoc; - try{ - - // Sort oldest to newest - auto order = bsoncxx::builder::stream::document{} << - "_id" << 1 <Start()!=0){ continue; } + auto now = system_clock::now(); + fLog->Entry(MongoLog::Local, "Ack to start took %i us", + duration_cast(now-ack_time).count()); } else logger->Entry(MongoLog::Debug, "Cannot start DAQ since not in ARMED state"); @@ -198,22 +198,18 @@ int main(int argc, char** argv){ if(controller->Stop()!=0) logger->Entry(MongoLog::Error, "DAQ failed to stop. Will continue clearing program memory."); - - controller->End(); + auto now = system_clock::now(); + fLog->Entry(MongoLog::Local, "Ack to stop took %i us", + duration_cast(now-ack_time).count()); } else if(command == "arm"){ // Can only arm if we're in the idle, arming, or armed state if(controller->status() >= 0 || controller->status() <= 2){ - - // Join readout threads if they still are out there controller->Stop(); - // Clear up any previously failed things - if(controller->status() != 0) - controller->End(); // Get an override doc from the 'options_override' field if it exists std::string override_json = ""; try{ - bsoncxx::document::view oopts = (doc)["options_override"].get_document().view(); + bsoncxx::document::view oopts = doc["options_override"].get_document().view(); override_json = bsoncxx::to_json(oopts); } catch(const std::exception &e){ @@ -222,18 +218,17 @@ int main(int argc, char** argv){ // Mongocxx types confusing so passing json strings around fOptions = std::make_shared(logger, (doc)["mode"].get_utf8().value.to_string(), hostname, suri, dbname, override_json); - if(controller->InitializeElectronics(fOptions) != 0){ + if(controller->Arm(fOptions) != 0){ logger->Entry(MongoLog::Error, "Failed to initialize electronics"); - controller->End(); + controller->Stop(); }else{ - logger->SetRunId(fOptions->GetString("run_identifier","none")); logger->Entry(MongoLog::Debug, "Initialized electronics"); } } // if status is ok else logger->Entry(MongoLog::Warning, "Cannot arm DAQ while not 'Idle'"); } else if (command == "quit") b_run = false; - } + } // for doc in cursor } catch(const std::exception &e){ std::cout< Date: Tue, 29 Sep 2020 08:30:44 +0200 Subject: [PATCH 16/57] Time to start compiling --- Makefile | 21 +-- V1724.cc | 66 ++++---- V1724.hh | 6 +- V1724_MV.hh | 1 + f1724.cc | 462 ++++++++++++++++++++++++++++++++++++++++++++++++++++ f1724.hh | 95 +++++++++++ 6 files changed, 597 insertions(+), 54 deletions(-) create mode 100644 f1724.cc create mode 100644 f1724.hh diff --git a/Makefile b/Makefile index cf48f2de..ba38c4e1 100644 --- a/Makefile +++ b/Makefile @@ -6,28 +6,16 @@ CPPFLAGS := $(CFLAGS) LDFLAGS = -lCAENVME -lstdc++fs -llz4 -lblosc $(shell pkg-config --libs libmongocxx) $(shell pkg-config --libs libbsoncxx) LDFLAGS_CC = ${LDFLAGS} -lexpect -ltcl8.6 -SOURCES_SLAVE = DAQController.cc main.cc Options.cc MongoLog.cc \ - StraxFormatter.cc V1724.cc V1724_MV.cc V1730.cc +SOURCES_SLAVE = $(wildcard *.cc) OBJECTS_SLAVE = $(SOURCES_SLAVE:%.cc=%.o) DEPS_SLAVE = $(OBJECTS_SLAVE:%.o=%.d) -EXEC_SLAVE = main - -SOURCES_CC = ccontrol.cc Options.cc V2718.cc \ - CControl_Handler.cc DDC10.cc V1495.cc MongoLog.cc -OBJECTS_CC = $(SOURCES_CC:%.cc=%.o) -DEPS_CC = $(OBJECTS_CC:%.o=%.d) -EXEC_CC = ccontrol +EXEC_SLAVE = redax all: $(EXEC_SLAVE) $(EXEC_SLAVE) : $(OBJECTS_SLAVE) $(CC) $(OBJECTS_SLAVE) $(CFLAGS) $(LDFLAGS) -o $(EXEC_SLAVE) -ccontrol: $(EXEC_CC) - -$(EXEC_CC) : $(OBJECTS_CC) - $(CC) $(OBJECTS_CC) $(CFLAGS) $(LDFLAGS_CC) -o $(EXEC_CC) - %.d : %.cc @set -e; rm -f $@; \ $(CC) -MM $(CFLAGS) $< > $@.$$$$; \ @@ -37,9 +25,6 @@ $(EXEC_CC) : $(OBJECTS_CC) %.o : %.cc %.d $(CC) $(CFLAGS) -o $@ -c $< -include $(DEPS_SLAVE) -include $(DEPS_CC) - .PHONY: clean clean: @@ -47,3 +32,5 @@ clean: rm -f $(EXEC_SLAVE) rm -f $(EXEC_CC) +include $(DEPS_SLAVE) + diff --git a/V1724.cc b/V1724.cc index 36631799..e9405a26 100644 --- a/V1724.cc +++ b/V1724.cc @@ -1,24 +1,17 @@ #include "V1724.hh" -#include -#include -#include -#include -#include -#include -#include #include "MongoLog.hh" #include "Options.hh" #include "StraxFormatter.hh" +#include +#include #include #include #include #include -#include V1724::V1724(std::shared_ptr& log, std::shared_ptr& opts, int link, int crate, int bid, unsigned address){ - fBoardHandle=fLink=fCrate=fBID=-1; - fBaseAddress=0; + fBoardHandle=fBID=-1; fLog = log; fAqCtrlRegister = 0x8100; @@ -38,34 +31,46 @@ V1724::V1724(std::shared_ptr& log, std::shared_ptr& opts, int fSampleWidth = 10; fClockCycle = 10; + fBID = bid; + fBaseAddress=address; + fRolloverCounter = 0; + fLastClock = 0; + fBLTSafety = opts->GetDouble("blt_safety_factor", 1.5); + BLT_SIZE = opts->GetInt("blt_size", 512*1024); + // there's a more elegant way to do this, but I'm not going to write it + fClockPeriod = std::chrono::nanoseconds((1l<<31)*fClockCycle); + if (Init(link, crate)) { + throw std::runtime_error("Board init failed"); + } +} + +V1724::~V1724(){ + End(); + if (fBLTCounter.empty()) return; + std::stringstream msg; + msg << "BLT report for board " << fBID << " (BLT " << BLT_SIZE << ")"; + for (auto p : fBLTCounter) msg << " | " << p.first << " " << int(std::log2(p.second)); + fLog->Entry(MongoLog::Local, msg.str()); +} + +int V1724::Init(int link, int crate) { int a = CAENVME_Init(cvV2718, link, crate, &fBoardHandle); if(a != cvSuccess){ fLog->Entry(MongoLog::Warning, "Board %i failed to init, error %i handle %i link %i bdnum %i", bid, a, fBoardHandle, link, crate); fBoardHandle = -1; - throw std::runtime_error("Board init failed"); + return -1; } fLog->Entry(MongoLog::Debug, "Board %i initialized with handle %i (link/crate)(%i/%i)", bid, fBoardHandle, link, crate); - fLink = link; - fCrate = crate; - fBID = bid; - fBaseAddress=address; - fRolloverCounter = 0; - fLastClock = 0; uint32_t word(0); int my_bid(0); - fBLTSafety = opts->GetDouble("blt_safety_factor", 1.5); - BLT_SIZE = opts->GetInt("blt_size", 512*1024); - // there's a more elegant way to do this, but I'm not going to write it - fClockPeriod = std::chrono::nanoseconds((1l<<31)*fClockCycle); - if (Reset()) { fLog->Entry(MongoLog::Error, "Board %i unable to pre-load registers", fBID); - throw std::runtime_error("Board reset failed"); + return -1; } else { fLog->Entry(MongoLog::Local, "Board %i reset", fBID); } @@ -73,12 +78,12 @@ V1724::V1724(std::shared_ptr& log, std::shared_ptr& opts, int if (opts->GetInt("do_sn_check", 0) != 0) { if ((word = ReadRegister(fSNRegisterLSB)) == 0xFFFFFFFF) { fLog->Entry(MongoLog::Error, "Board %i couldn't read its SN lsb", fBID); - throw std::runtime_error("Board access failed"); + return -1; } my_bid |= word&0xFF; if ((word = ReadRegister(fSNRegisterMSB)) == 0xFFFFFFFF) { fLog->Entry(MongoLog::Error, "Board %i couldn't read its SN msb", fBID); - throw std::runtime_error("Board access failed"); + return -1; } my_bid |= ((word&0xFF)<<8); if (my_bid != fBID) { @@ -88,15 +93,6 @@ V1724::V1724(std::shared_ptr& log, std::shared_ptr& opts, int } } -V1724::~V1724(){ - End(); - if (fBLTCounter.empty()) return; - std::stringstream msg; - msg << "BLT report for board " << fBID << " (BLT " << BLT_SIZE << ")"; - for (auto p : fBLTCounter) msg << " | " << p.first << " " << int(std::log2(p.second)); - fLog->Entry(MongoLog::Local, msg.str()); -} - int V1724::SINStart(){ fLastClockTime = std::chrono::high_resolution_clock::now(); return WriteRegister(fAqCtrlRegister,0x105); @@ -291,7 +287,7 @@ int V1724::SetThresholds(std::vector vals) { int V1724::End(){ if(fBoardHandle>=0) CAENVME_End(fBoardHandle); - fBoardHandle=fLink=fCrate=-1; + fBoardHandle=-1; fBaseAddress=0; return 0; } diff --git a/V1724.hh b/V1724.hh index 229da860..31cd5021 100644 --- a/V1724.hh +++ b/V1724.hh @@ -7,6 +7,7 @@ #include #include #include +#include class MongoLog; class Options; @@ -70,11 +71,12 @@ protected: int BLT_SIZE; std::map fBLTCounter; + virtual int Init(int, int); bool MonitorRegister(uint32_t reg, uint32_t mask, int ntries, int sleep, uint32_t val=1); virtual std::tuple GetClockInfo(std::u32string_view); - int GetClockCounter(uint32_t); + virtual int GetClockCounter(uint32_t); int fBoardHandle; - int fLink, fCrate, fBID; + int fBID; unsigned int fBaseAddress; // Stuff for clock reset tracking diff --git a/V1724_MV.hh b/V1724_MV.hh index f1ccea98..8e813467 100644 --- a/V1724_MV.hh +++ b/V1724_MV.hh @@ -11,6 +11,7 @@ public: virtual std::tuple UnpackChannelHeader(std::u32string_view, long, uint32_t, uint32_t, int, int); +protected: }; #endif diff --git a/f1724.cc b/f1724.cc new file mode 100644 index 00000000..8ca3ba12 --- /dev/null +++ b/f1724.cc @@ -0,0 +1,462 @@ +#include "f1724.hh" +#include "MongoLog.hh" +#include +#include + +using std::vector; +constexpr double PI() {return std::acos(-1.);} + +// redeclare all the static members +std::thread f1724::sGeneratorThread; +std::mutex f1724::sMutex; +std::random_device f1724::sRD; +std::mt19937_64 f1724::sGen; +std::uniform_real_distribution<> f1724::sFlatDist; +long f1724::sClock; +int f1724::sEventCounter; +std::atomic_bool f1724::sRun, f1724::sReady; +fax_options_t f1724::sFaxOptions; +int f1724::sNumPMTs; +vector f1724::sRegistry; +vector> f1724::sPMTxy; +std::condition_variable f1724::sCV; +std::shared_ptr f1724::sLog; + +f1724::pmt_pos_t f1724::PMTiToXY(int i) { + pmt_pos_t ret; + if (i == 0) { + ret.x = ret.y = 0; + return ret; + } + if (i < 7) { + ret.x = std::cos((i-1)*PI()/3.); + ret.y = std::sin((i-1)*PI()/3.); + return ret; + } + int ring = 2; + // how many total PMTs are contained in a radius r? aka which ring is this PMT in + while (i > 3*ring*(ring+1)) ring++; + int i_in_ring = i - (1 + 3*ring*(ring-1)); + int side = i_in_ring / ring; + int side_i = i_in_ring % ring; + + double ref_angle = PI()/3*side; + double offset_angle = ref_angle + 2*PI()/3; + double x_c = ring*std::cos(ref_angle), y_c = ring*std::sin(ref_angle); + ret.x = x_c + side_i*std::cos(offset_angle); + ret.y = y_c + side_i*std::sin(offset_angle); + return ret; +} + +f1724::f1724(std::shared_ptr& opts, std::shared_ptr& log, int, int, int bid, unsigned) : V1724(opts, log, 0, 0, bid, 0){ + //fLog->Entry(MongoLog::Warning, "Initializing fax digitizer"); + fSPEtemplate = {0.0, 0.0, 0.0, 2.81e-2, 7.4, 6.07e1, 3.26e1, 1.33e1, 7.60, 5.71, + 7.75, 4.46, 3.68, 3.31, 2.97, 2.74, 2.66, 2.48, 2.27, 2.15, 2.03, 1.93, 1.70, + 1.68, 1.26, 7.86e-1, 5.36e-1, 4.36e-1, 3.11e-1, 2.15e-1}; + fEventCounter = 0; + fSeenUnder5 = true; + fSeenOver15 = false; +} + +f1724::~f1724() { + End(); +} + +int f1724::Init(int, int) { + if (fOptions->GetFaxOptions(fFaxOptions)) { + return -1; + } + fGen = std::mt19937_64(fRD()); + fFlatDist = std::uniform_real_distribution<>(0., 1.); + GlobalInit(fFaxOptions, fLog); + Reset(); + sRegistry.emplace_back(this); + unsigned n_chan = GetNumChannels(); + fBLoffset = fBLslope = fNoiseRMS = fBaseline = vector(n_chan, 0); + std::generate_n(fBLoffset.begin(), n_chan, [&]{return 17000 + 400*fFlatDist(fGen);}); + std::generate_n(fBLslope.begin(), n_chan, [&]{return -0.27 + 0.01*fFlatDist(fGen);}); + std::exponential_distribution<> noise(1); + std::generate_n(fNoiseRMS.begin(), n_chan, [&]{return 4*noise(fGen);}); + std::generate_n(fBaseline.begin(), n_chan, [&]{return 13600 + 50*fFlatDist(fGen);}); + return 0; +} + +void f1724::End() { + AcquisitionStop(true); +} + +int f1724::WriteRegister(unsigned int reg, unsigned int val) { + if (reg == 0x8020 || (reg & 0x1020) == 0x1020) { // min record length + + } else if (reg == 0x8038 || (reg & 0x1038) == 0x1038) { // pre-trigger + + } else if (reg == 0x8060 || (reg & 0x1060) == 0x1060) { // trigger threshold + + } else if (reg == 0x8078 || (reg & 0x1078) == 0x1078) { // samples under threshold + + } else if (reg == 0x807C || (reg & 0x107C) == 0x107C) { // max tail + + } else if (reg == 0x8098 || (reg & 0x1098) == 0x1098) { // DC offset + if (reg == 0x8098) std::fill_n(fBaseline.begin(), fBaseline.size(), val&0xFFFF); + else fBaseline[(reg>>8)&0xF] = (val&0xFFFF); + } + return 0; +} + +unsigned int f1724::ReadRegister(unsigned int) { + return 0; +} + +int f1724::Read(std::unique_ptr& outptr) { + if (fBufferSize == 0) return 0; + const std::lock_guard lk(fBufferMutex); + int retwords = fBuffer.size(); + auto [ht, cc] = GetClockInfo(fBuffer); + outptr = std::make_unique(std::move(fBuffer), ht, cc); + fBufferSize = 0; + return retwords; +} + +int f1724::SWTrigger() { + ConvertToDigiFormat(GenerateNoise(fSPEtemplate.size(), 0xFF), 0xFF, sClock); + return 0; +} + +void f1724::GlobalInit(fax_options_t& fax_options, std::shared_ptr& log) { + if (sReady == false) { + sGen = std::mt19937_64(sRD()); + sFlatDist = std::uniform_real_distribution<>(0., 1.); + sFaxOptions = fax_options; + sLog = log; + sLog->Entry(MongoLog::Local, "f1724 global init"); + + sReady = true; + sRun = false; + sClock = 0; + sEventCounter = 0; + sNumPMTs = (1+3*fax_options.tpc_size*(fax_options.tpc_size+1))*2; + int PMTsPerArray = sNumPMTs/2; + sPMTxy.reserve(sNumPMTs); + for (int p = 0; p < sNumPMTs; p++) + sPMTxy.emplace_back(PMTiToXY(p % PMTsPerArray)); + + sGeneratorThread = std::thread(&f1724::GlobalRun); + } else + sLog->Entry(MongoLog::Local, "f1724 global already init"); +} + +void f1724::GlobalDeinit() { + if (sGeneratorThread.joinable()) { + sLog->Entry(MongoLog::Local, "f1724::deinit"); + sRun = sReady = false; + sCV.notify_one(); + sGeneratorThread.join(); + sLog.reset(); + sRegistry.clear(); + sPMTxy.clear(); + } +} + +uint32_t f1724::GetAcquisitionStatus() { + uint32_t ret = 0; + ret |= 0x4*(sRun == true); // run status + ret |= 0x8*(fBufferSize > 0); // event ready + ret |= 0x80; // no PLL unlock + ret |= 0x100*(sRun == true || sReady == true); // board is ready + ret |= 0x8000*(sRun == true); // S-IN + + return ret; +} + +int f1724::SoftwareStart() { + fLastClockTime = std::chrono::high_resolution_clock::now(); + if (sReady == true) { + sRun = true; + sReady = false; + sCV.notify_one(); + } + fGeneratorThread = std::thread(&f1724::Run, this); + return 0; +} + +int f1724::SINStart() { + return SoftwareStart(); +} + +int f1724::AcquisitionStop(bool i_mean_it) { + if (!i_mean_it) return 0; + GlobalDeinit(); + sRun = false; + fCV.notify_one(); + if (fGeneratorThread.joinable()) fGeneratorThread.join(); + Reset(); + return 0; +} + +int f1724::Reset() { + const std::lock_guard lg(fBufferMutex); + fBuffer.clear(); + fEventCounter = 0; + fBufferSize = 0; + return 0; +} + +int f1724::GetClockCounter(uint32_t timestamp) { + // Waveform generation is asynchronous, so we need different logic here + // from a hardware digitizer + if (timestamp > fLastClock) { + // Case 1. This is over 15s but fSeenUnder5 is true. Give 1 back + if(timestamp >= 15e8 && fSeenUnder5 && fRolloverCounter != 0) + return fRolloverCounter-1; + + // Case 2. This is over 5s and fSeenUnder5 is true. + else if(fSeenUnder5 && 5e8 <= timestamp && timestamp < 15e8){ + fSeenUnder5 = false; + fLastClock = timestamp; + return fRolloverCounter; + } + + // Case 3. This is over 15s and fSeenUnder5 is false + else if(timestamp >= 15e8 && !fSeenUnder5){ + fSeenOver15 = true; + fLastClock = timestamp; + return fRolloverCounter; + } + + // Case 4. Anything else where the clock is progressing correctly + else{ + fLastClock = timestamp; + return fRolloverCounter; + } + } + + // Second, is this number less than the previous? + else if(timestamp < fLastClock){ + + // Case 1. Genuine clock reset. under 5s is false and over 15s is true + if(timestamp < 5e8 && !fSeenUnder5 && fSeenOver15){ + fSeenUnder5 = true; + fSeenOver15 = false; + fLog->Entry(MongoLog::Local, "Bd %i rollover %i (%x/%x)", + fBID, fRolloverCounter, timestamp, fLastClock); + fLastClock = timestamp; + fRolloverCounter++; + return fRolloverCounter; + } + + // Case 2: Any other jitter within the 21 seconds, just return + else{ + return fRolloverCounter; + } + } + // timestamps are the same??? + else + return fRolloverCounter; +} + +std::tuple f1724::GenerateEventLocation() { + double offset = 0.5; // min number of PMTs between S1 and S2 to prevent overlap + double z = -1.*sFlatDist(sGen)*((2*sFaxOptions.tpc_size+1)-offset)-offset; + double r = sFlatDist(sGen)*sFaxOptions.tpc_size; // no, this isn't uniform + double theta = sFlatDist(sGen)*2*PI(); + return {r*std::cos(theta), r*std::sin(theta), z}; +} + +vector f1724::GenerateEventSize(double, double, double z) { + int s1 = sFlatDist(sGen)*19+11; + std::normal_distribution<> s2_over_s1{100, 20}; + double elivetime_loss_fraction = std::exp(z/sFaxOptions.e_absorbtion_length); + int s2 = s1*s2_over_s1(sGen)*elivetime_loss_fraction; + return {0, s1, s2}; +} + +vector f1724::MakeHitpattern(int s_i, int photons, double x, double y, double z) { + double signal_width = s_i == 1 ? 40 : 1000.+200.*std::sqrt(std::abs(z)); + vector ret(photons); + vector hit_prob(sNumPMTs, 0.); + std::discrete_distribution<> hitpattern; + double top_fraction(0); + int TopPMTs = sNumPMTs/2; + int tpc_length = 2*sFaxOptions.tpc_size + 1; + if (s_i == 1) { + top_fraction = (0.4-0.1)/(0+tpc_length)*z+0.4; // 10% at bottom, 40% at top + std::fill_n(hit_prob.begin(), TopPMTs, top_fraction/TopPMTs); + } else { + top_fraction = 0.65; + // let's go with a Gaussian probability, because why not + double gaus_width = 2*1.3*1.3; // PMTs wide + auto gen = [&](auto& p){ + return std::exp(-(std::pow(p.x-x, 2)+std::pow(p.y-y, 2))/gaus_width); + }; + + std::transform(sPMTxy.begin(), sPMTxy.begin()+TopPMTs, hit_prob.begin(), gen); + // normalize + double total_top_prob = std::accumulate(hit_prob.begin(),hit_prob.begin()+TopPMTs,0.); + std::transform(hit_prob.begin(), hit_prob.begin()+TopPMTs, hit_prob.begin(), + [&](double x){return top_fraction*x/total_top_prob;}); + } + // bottom array probability simpler to calculate + std::fill(hit_prob.begin()+TopPMTs, hit_prob.end(), (1.-top_fraction)/TopPMTs); + + hitpattern.param(std::discrete_distribution<>::param_type(hit_prob.begin(), hit_prob.end())); + + std::generate_n(ret.begin(), photons, + [&]{return hit_t{hitpattern(sGen), f1724::sFlatDist(sGen)*signal_width};}); + return ret; +} + +void f1724::SendToWorkers(const vector& hits) { + vector> hits_per_board(sRegistry.size()); + int n_boards = sRegistry.size(); + for (auto& hit : hits) { + hits_per_board[hit.pmt_i%n_boards].emplace_back(hit_t{hit.pmt_i/n_boards, hit.time}); + } + for (unsigned i = 0; i < sRegistry.size(); i++) + if (hits_per_board[i].size() > 0) + sRegistry[i]->ReceiveFromGenerator(std::move(hits_per_board[i])); + return; +} + +void f1724::ReceiveFromGenerator(vector hits) { + { + std::lock_guard lk(fMutex); + fProtoPulse = std::move(hits); + } + fCV.notify_one(); +} + +void f1724::MakeWaveform(std::vector& hits) { + int mask = 0; + double last_hit_time = 0, first_hit_time = 1e9; + for (auto& hit : hits) { + mask |= (1< pmt_to_ch(GetNumChannels(), -1); + int j = 0; + for(unsigned ch = 0; ch < pmt_to_ch.size(); ch++) { + if (mask & (1< hit_scale{1., 0.15}; + int offset = 0, sample_width = GetSampleWidth(); + double scale; + for (auto& hit : hits) { + offset = hit.time/sample_width; + scale = hit_scale(fGen); + for (unsigned i = 0; i < fSPEtemplate.size(); i++) { + wf[pmt_to_ch[hit.ch_i]][offset+i] -= fSPEtemplate[i]*scale; + } + } + return ConvertToDigiFormat(wf, mask, timestamp); +} + +void f1724::ConvertToDigiFormat(const vector>& wf, int mask, long ts) { + fEventCounter++; + const int overhead_per_channel = 2, overhead_per_event = 4; + std::u32string buffer; + char32_t word = 0; + for (auto& ch : wf) word += ch.size(); // samples + char32_t words_this_event = word/2 + overhead_per_channel*wf.size() + overhead_per_event; + buffer.reserve(words_this_event); + word = words_this_event | 0xA0000000; + buffer += word; + buffer += (char32_t)mask; + word = fEventCounter.load(); + buffer += word; + char32_t timestamp = (ts/fClockCycle)&0x7FFFFFFF; + //fLog->Entry(MongoLog::Local, "Bd %i ts %lx/%08x", fBID, ts, timestamp); + buffer += timestamp; + int32_t sample; + for (auto& ch_wf : wf) { + word = ch_wf.size()/2 + overhead_per_channel; // size is in samples + buffer += word; + buffer += timestamp; + for (unsigned i = 0; i < ch_wf.size(); i += 2) { + sample = std::max(ch_wf[i], 0.); + word = sample & 0x3FFF; + sample = std::max(ch_wf[i+1], 0.); + word |= (sample << 16)&0x3FFF0000; + buffer += word; + } // loop over samples + } // loop over channels + { + const std::lock_guard lg(fBufferMutex); + fBuffer.append(buffer); + fBufferSize = fBuffer.size(); + } + return; +} + +vector> f1724::GenerateNoise(int length, int mask) { + vector> ret; + unsigned n_chan = GetNumChannels(); + ret.reserve(n_chan); + for (unsigned i = 0; i < n_chan; i++) { + if (mask & (1< noise{fBaseline[i], fNoiseRMS[i]}; + std::generate(ret.back().begin(), ret.back().end(), [&]{return noise(fGen);}); + } + } + return ret; +} + +void f1724::Run() { + while (sRun == true) { + std::unique_lock lk(fMutex); + fCV.wait(lk, []{return fProtoPulse.size() > 0 || sRun == false;}); + if (fProtoPulse.size() > 0 && sRun == true) { + MakeWaveform(fProtoPulse); + fProtoPulse.clear(); + } else { + } + lk.unlock(); + } +} + +void f1724::GlobalRun() { + std::exponential_distribution<> rate(sFaxOptions.rate/1e9); // Hz to /ns + double x, y, z, t_max; + long time_to_next; + vector photons; // S1 = 1 + vector hits; + sClock = (0.5+sFlatDist(sGen))*10000; + sEventCounter = 0; + { + std::unique_lock lg(sMutex); + sCV.wait(lg, []{return sReady == false;}); + } + sLog->Entry(MongoLog::Local, "f1724::GlobalRun"); + auto t_start = std::chrono::high_resolution_clock::now(); + while (sRun == true) { + std::tie(x,y,z) = GenerateEventLocation(); + photons = GenerateEventSize(x, y, z); + for (const auto s_i : {1,2}) { + hits = MakeHitpattern(s_i, photons[s_i], x, y, z); + // split hitpattern and issue to digis + SendToWorkers(hits); + t_max = 0; + for (auto& hit : hits) { + t_max = std::max(t_max, hit.time); + } + + time_to_next = (s_i == 1 ? std::abs(z/sFaxOptions.drift_speed) : rate(sGen)) + t_max; + sClock += time_to_next; + std::this_thread::sleep_for(std::chrono::nanoseconds(time_to_next)); + } + sEventCounter++; + } + auto t_end = std::chrono::high_resolution_clock::now(); + sLog->Entry(MongoLog::Local, "Generation lasted %lx/%lx", sClock, + std::chrono::duration_cast(t_end-t_start).count()); + sLog->Entry(MongoLog::Local, "f1724::GlobalRun finished"); +} + diff --git a/f1724.hh b/f1724.hh new file mode 100644 index 00000000..a839f7bb --- /dev/null +++ b/f1724.hh @@ -0,0 +1,95 @@ +#ifndef _F1724_HH_ +#define _F1724_HH_ + +#include "V1724.hh" +#include "Options.hh" +#include +#include + +class f1724 : public V1724 { +public: + f1724(std::shared_ptr&, std::shared_ptr&, int, int, int, unsigned); + virtual ~f1724(); + + virtual int Read(std::shared_ptr&); + virtual int WriteRegister(unsigned, unsigned); + virtual unsigned ReadRegister(unsigned); + virtual int End(); + + virtual int SINStart(); + virtual int SoftwareStart(); + virtual int AcquisitionStop(bool); + virtual int SWTrigger(); + virtual int Reset(); + virtual bool EnsureReady(int, int) {return sRun || sReady;} + virtual bool EnsureStarted(int, int) {return sRun == true;} + virtual bool EnsureStopped(int, int) {return sRun == false;} + virtual int CheckErrors() {return 0;} + virtual uint32_t GetAcquisitionStatus(); + +protected: + struct hit_t { + int pmt_i; + double time; + }; + struct pmt_pos_t { + double x; + double y; + int array; + }; + + static void GlobalRun(); + static void GlobalInit(fax_options_t&, std::shared_ptr&); + static void GlobalDeinit(); + static std::tuple GenerateEventLocation(); + static std::vector GenerateEventSize(double, double, double); + static std::vector MakeHitpattern(int, int, double, double, double); + static void SendToWorkers(const std::vector&); + static pmt_pos_t PMTiToXY(int); + + static std::thread sGeneratorThread; + static std::mutex sMutex; + static std::random_device sRD; + static std::mt19937_64 sGen; + static std::uniform_real_distribution<> sFlatDist; + static long sClock; + static int sEventCounter; + static std::atomic_bool sRun; + static std::atomic_bool sReady; + static fax_options_t sFaxOptions; + static int sNumPMTs; + static std::vector sRegistry; + static std::vector sPMTxy; + static std::condition_variable sCV; + static std::shared_ptr sLog; + + virtual int Init(int, int); + void Run(); + virtual int GetClockCounter(uint32_t); + void MakeWaveform(std::vector&); + void ConvertToDigiFormat(const std::vector>&, int, long); + std::vector> GenerateNoise(int, int=0xFF); + void ReceiveFromGenerator(std::vector); + + std::u32string fBuffer; + std::mutex fBufferMutex; + std::atomic_int fBufferSize; + std::random_device fRD; + std::mt19937_64 fGen; + std::uniform_real_distribution<> fFlatDist; + std::vector fSPEtemplate; + std::vector fBLoffset, fBLslope; + std::vector fNoiseRMS; + std::vector fBaseline; + fax_options_t fFaxOptions; + std::atomic_long fTimestamp; + std::atomic_int fEventCounter; + std::vector fProtoPulse; + std::condition_variable fCV; + std::mutex fWFMutex; + std::thread fGeneratorThread; + + bool fSeenUnder5, fSeenOver15; +}; + +#endif // _F1724_HH_ defined From 6f9eea68e503003ccb03f0e9e7e2ddc4a5ede362 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Tue, 29 Sep 2020 09:23:58 +0200 Subject: [PATCH 17/57] Compiles --- .gitignore | 5 +- CControl_Handler.cc | 10 ++- CControl_Handler.hh | 2 +- DAQController.cc | 1 - DAQController.hh | 3 +- Makefile | 5 +- MongoLog.cc | 2 +- MongoLog.hh | 4 +- Options.cc | 14 ++++ Options.hh | 8 ++ V1724.cc | 9 +- V1724.hh | 2 +- V2718.cc | 18 ++-- V2718.hh | 5 +- ccontrol.cc | 198 -------------------------------------------- f1724.cc | 40 +++++---- f1724.hh | 17 ++-- main.cc | 10 +-- 18 files changed, 89 insertions(+), 264 deletions(-) delete mode 100644 ccontrol.cc diff --git a/.gitignore b/.gitignore index 04a5a076..41b0d7c2 100644 --- a/.gitignore +++ b/.gitignore @@ -6,9 +6,8 @@ *.swp *~ -# executables -main -ccontrol +# executable +redax # DAQ logs *.log diff --git a/CControl_Handler.cc b/CControl_Handler.cc index 44ebf503..d94bba83 100644 --- a/CControl_Handler.cc +++ b/CControl_Handler.cc @@ -1,7 +1,9 @@ #include "CControl_Handler.hh" +#include "DAXHelpers.hh" #include "V2718.hh" #include "DDC10.hh" #include "V1495.hh" +#include CControl_Handler::CControl_Handler(std::shared_ptr& log, std::string procname) : DAQController(log, procname){ fCurrentRun = fBID = fBoardHandle-1; @@ -12,7 +14,7 @@ CControl_Handler::CControl_Handler(std::shared_ptr& log, std::string p } CControl_Handler::~CControl_Handler(){ - DeviceStop(); + Stop(); } // Initialising various devices namely; V2718 crate controller, V1495, DDC10... @@ -21,11 +23,11 @@ int CControl_Handler::Arm(std::shared_ptr& opts){ fStatus = DAXHelpers::Arming; // Just in case clear out any remaining objects from previous runs - DeviceStop(); + Stop(); fOptions = opts; try{ - fCurrentRun = std::stoi(opts->GetInt("run_identifier", "none")); + fCurrentRun = opts->GetInt("run_identifier", -1); }catch(std::exception& e) { fLog->Entry(MongoLog::Warning, "No run number specified in config?? %s", e.what()); return -1; @@ -133,7 +135,7 @@ int CControl_Handler::Stop(){ // Reporting back on the status of V2718, V1495, DDC10 etc... void CControl_Handler::StatusUpdate(mongocxx::collection* collection){ bsoncxx::builder::stream::document builder{}; - builder << "host" << hostname << "status" << fStatus << + builder << "host" << fHostname << "status" << fStatus << "time" << bsoncxx::types::b_date(std::chrono::system_clock::now()); auto in_array = builder << "active" << bsoncxx::builder::stream::open_array; diff --git a/CControl_Handler.hh b/CControl_Handler.hh index 01ae26af..37753fae 100644 --- a/CControl_Handler.hh +++ b/CControl_Handler.hh @@ -12,7 +12,7 @@ public: CControl_Handler(std::shared_ptr&, std::string); virtual ~CControl_Handler(); - virtual void GetStatusDoc(mongocxx::collection*); + virtual void StatusUpdate(mongocxx::collection*); virtual int Arm(std::shared_ptr&); virtual int Start(); virtual int Stop(); diff --git a/DAQController.cc b/DAQController.cc index 5113af1e..5d6dcf8d 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -16,7 +16,6 @@ #include #include -#include // Status: // 0-idle diff --git a/DAQController.hh b/DAQController.hh index 85065a0e..4f7a7e1b 100644 --- a/DAQController.hh +++ b/DAQController.hh @@ -9,12 +9,12 @@ #include #include #include +#include class StraxFormatter; class MongoLog; class Options; class V1724; -class mongocxx::collection; class DAQController{ /* @@ -30,6 +30,7 @@ public: virtual int Start(); virtual int Stop(); virtual void StatusUpdate(mongocxx::collection*); + int status() {return fStatus;} protected: std::string fHostname; diff --git a/Makefile b/Makefile index ba38c4e1..4b694930 100644 --- a/Makefile +++ b/Makefile @@ -3,8 +3,8 @@ CC = g++ CXX = g++ CFLAGS = -Wall -Wextra -pedantic -pedantic-errors -g -DLINUX -std=c++17 -pthread $(shell pkg-config --cflags libmongocxx) CPPFLAGS := $(CFLAGS) -LDFLAGS = -lCAENVME -lstdc++fs -llz4 -lblosc $(shell pkg-config --libs libmongocxx) $(shell pkg-config --libs libbsoncxx) -LDFLAGS_CC = ${LDFLAGS} -lexpect -ltcl8.6 +LDFLAGS = -lCAENVME -lstdc++fs -llz4 -lblosc $(shell pkg-config --libs libmongocxx) $(shell pkg-config --libs libbsoncxx) -lexpect -ltcl8.6 +#LDFLAGS_CC = ${LDFLAGS} -lexpect -ltcl8.6 SOURCES_SLAVE = $(wildcard *.cc) OBJECTS_SLAVE = $(SOURCES_SLAVE:%.cc=%.o) @@ -30,7 +30,6 @@ $(EXEC_SLAVE) : $(OBJECTS_SLAVE) clean: rm -f *.o *.d rm -f $(EXEC_SLAVE) - rm -f $(EXEC_CC) include $(DEPS_SLAVE) diff --git a/MongoLog.cc b/MongoLog.cc index 39443b23..da88ad01 100644 --- a/MongoLog.cc +++ b/MongoLog.cc @@ -16,7 +16,7 @@ MongoLog::MongoLog(int DeleteAfterDays, std::string log_dir, std::string connect std::cout<<"Configured WITH local file logging to " << log_dir << std::endl; fFlush = true; fFlushThread = std::thread(&MongoLog::Flusher, this); - fRunId = "none"; + fRunId = -1; try{ mongocxx::uri uri{connection_uri}; diff --git a/MongoLog.hh b/MongoLog.hh index 1544f8a2..bc95eeeb 100644 --- a/MongoLog.hh +++ b/MongoLog.hh @@ -68,7 +68,7 @@ public: const static int Local = -1; // Write to local (file) log only int Entry(int priority,std::string message, ...); - void SetRunId(const std::string& runid) {fRunId = runid;} + void SetRunId(const int runid) {fRunId = runid;} private: void Flusher(); @@ -90,7 +90,7 @@ private: std::thread fFlushThread; std::atomic_bool fFlush; int fFlushPeriod; - std::string fRunId; + int fRunId; }; #endif diff --git a/Options.cc b/Options.cc index a089480d..f9b8ef2f 100644 --- a/Options.cc +++ b/Options.cc @@ -311,6 +311,20 @@ int Options::GetHEVOpt(HEVOptions &ret){ return 0; } +int Options::GetFaxOptions(fax_options_t& opts) { + try { + auto doc = bson_options["fax_options"]; + opts.rate = doc["rate"].get_double().value; + opts.tpc_size = doc["tpc_size"].get_int32().value; + opts.drift_speed = doc["drift_speed"].get_double().value; + opts.e_absorbtion_length = doc["e_absorbtion_length"].get_double().value; + } catch (std::exception& e) { + fLog->Entry(MongoLog::Warning, "Error getting fax options: %s", e.what()); + return -1; + } + return 0; +} + int Options::GetDAC(std::map>>& board_dacs, std::vector& bids) { board_dacs.clear(); diff --git a/Options.hh b/Options.hh index 828052c8..4267ba1a 100644 --- a/Options.hh +++ b/Options.hh @@ -55,6 +55,13 @@ struct HEVOptions{ }; +struct fax_options_t { + int tpc_size; + double rate; + double drift_speed; + double e_absorbtion_length; +}; + class MongoLog; class Options{ @@ -77,6 +84,7 @@ public: int16_t GetChannel(int, int); int GetNestedInt(std::string, int); std::vector GetThresholds(int); + int GetFaxOptions(fax_options_t&); void UpdateDAC(std::map>>&); void SaveBenchmarks(std::map>&, long, std::string, diff --git a/V1724.cc b/V1724.cc index e9405a26..1eaea60a 100644 --- a/V1724.cc +++ b/V1724.cc @@ -40,7 +40,7 @@ V1724::V1724(std::shared_ptr& log, std::shared_ptr& opts, int // there's a more elegant way to do this, but I'm not going to write it fClockPeriod = std::chrono::nanoseconds((1l<<31)*fClockCycle); - if (Init(link, crate)) { + if (Init(link, crate, opts)) { throw std::runtime_error("Board init failed"); } } @@ -54,16 +54,16 @@ V1724::~V1724(){ fLog->Entry(MongoLog::Local, msg.str()); } -int V1724::Init(int link, int crate) { +int V1724::Init(int link, int crate, std::shared_ptr& opts) { int a = CAENVME_Init(cvV2718, link, crate, &fBoardHandle); if(a != cvSuccess){ fLog->Entry(MongoLog::Warning, "Board %i failed to init, error %i handle %i link %i bdnum %i", - bid, a, fBoardHandle, link, crate); + fBID, a, fBoardHandle, link, crate); fBoardHandle = -1; return -1; } fLog->Entry(MongoLog::Debug, "Board %i initialized with handle %i (link/crate)(%i/%i)", - bid, fBoardHandle, link, crate); + fBID, fBoardHandle, link, crate); uint32_t word(0); int my_bid(0); @@ -91,6 +91,7 @@ int V1724::Init(int link, int crate) { link, crate, fBID, my_bid); } } + return 0; } int V1724::SINStart(){ diff --git a/V1724.hh b/V1724.hh index 31cd5021..28c60a58 100644 --- a/V1724.hh +++ b/V1724.hh @@ -71,7 +71,7 @@ protected: int BLT_SIZE; std::map fBLTCounter; - virtual int Init(int, int); + virtual int Init(int, int, std::shared_ptr&); bool MonitorRegister(uint32_t reg, uint32_t mask, int ntries, int sleep, uint32_t val=1); virtual std::tuple GetClockInfo(std::u32string_view); virtual int GetClockCounter(uint32_t); diff --git a/V2718.cc b/V2718.cc index 3c8c3db2..80ae6872 100644 --- a/V2718.cc +++ b/V2718.cc @@ -2,17 +2,9 @@ #include "MongoLog.hh" #include -V2718::V2718(std::shared_ptr& log){ +V2718::V2718(std::shared_ptr& log, CrateOptions c_opts, int link, int crate){ fLog = log; - fBoardHandle=fLink=fCrate=-1; - fCopts.s_in = fCopts.neutron_veto = fCopts.muon_veto = -1; - fCopts.led_trigger = fCopts.pulser_freq = -1; -} - -V2718::~V2718(){ -} - -int V2718::CrateInit(CrateOptions c_opts, int link, int crate){ + fBoardHandle=-1; fCrate = crate; fLink = link; @@ -22,10 +14,12 @@ int V2718::CrateInit(CrateOptions c_opts, int link, int crate){ int a = CAENVME_Init(cvV2718, fLink, fCrate, &fBoardHandle); if(a != cvSuccess){ fLog->Entry(MongoLog::Error, "Failed to init V2718 with CAEN error: %i", a); - return -1; + throw std::runtime_error("Could not init CC"); } SendStopSignal(false); - return 0; +} + +V2718::~V2718(){ } int V2718::SendStartSignal(){ diff --git a/V2718.hh b/V2718.hh index 2bf02c8c..e4bd09fb 100644 --- a/V2718.hh +++ b/V2718.hh @@ -7,12 +7,11 @@ class MongoLog; class V2718{ public: - V2718(std::shared_ptr&); + V2718(std::shared_ptr&, CrateOptions, int, int); virtual ~V2718(); - virtual int CrateInit(CrateOptions c_opts, int link, int crate); virtual int SendStartSignal(); - virtual int SendStopSignal(bool end=true); + virtual int SendStopSignal(bool end=true); CrateOptions GetCrateOptions(){ return fCopts;}; int GetHandle(){return fBoardHandle;}; diff --git a/ccontrol.cc b/ccontrol.cc deleted file mode 100644 index c12c1e33..00000000 --- a/ccontrol.cc +++ /dev/null @@ -1,198 +0,0 @@ -#include "CControl_Handler.hh" -#include "Options.hh" -#include "MongoLog.hh" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -std::atomic_bool b_run = true; - -void SignalHandler(int signum) { - std::cout << "Received signal "<: id number of this controller instance, required\n" - << "--uri : full MongoDB URI, required\n" - << "--db : name of the database to use, default \"daq\"\n" - << "--logdir : where to write the logs\n" - << "--help: print this message\n" - << "\n"; - return 1; -} - -int main(int argc, char** argv){ - - mongocxx::instance instance{}; - std::string mongo_uri = "", dbname="daq", sid="", log_dir = ""; - int log_retention = 7, c, opt_index; - struct option longopts[] = { - {"id", required_argument, 0, 0}, - {"uri", required_argument, 0, 1}, - {"db", required_argument, 0, 2}, - {"logdir", required_argument, 0, 3}, - {"help", no_argument, 0, 4} - }; - while ((c = getopt_long(argc, argv, "", longopts, &opt_index)) != -1) { - switch(c) { - case 0: - sid = optarg; break; - case 1: - mongo_uri = optarg; break; - case 2: - dbname = optarg; break; - case 3: - log_dir = optarg; break; - case 4: - default: - std::cout<<"Received unknown arg\n"; - return PrintUsage(); - } - } - if (mongo_uri== "" || sid == "") return PrintUsage(); - - // Control and status DB connectivity - // We're going to poll control for commands - // and update status with a heartbeat - mongocxx::uri uri(mongo_uri.c_str()); - mongocxx::client client(uri); - mongocxx::database db = client[dbname]; - mongocxx::collection control = db["control"]; - mongocxx::collection status = db["status"]; - mongocxx::collection options_collection = db["options"]; - mongocxx::collection dac_collection = db["dac_calibration"]; - - // Build a unique name for this process - // we trust that the user has given {ID} uniquely - char chostname[HOST_NAME_MAX]; - gethostname(chostname, HOST_NAME_MAX); - std::string hostname = chostname; - hostname += "_controller_"; - hostname += sid; - std::cout<<"I dub thee "<(log_retention, log_dir, mongo_uri, dbname, "log", hostname); - - // Options - std::shared_ptr options; - - // Holds session data - auto fHandler = std::make_shared(logger, hostname); - using namespace std::chrono; - - while(b_run){ - - auto order = bsoncxx::builder::stream::document{} << - "_id" << 1 <(ack_time.time_since_epoch()).count() << - bsoncxx::builder::stream::close_document << - bsoncxx::builder::stream::finalize - ); - - // Strip data from the supplied doc - int run = -1; - std::string command = ""; - try{ - command = doc["command"].get_utf8().value.to_string(); - if(command == "arm") - run = doc["number"].get_int32(); - } catch(const std::exception E){ - logger->Entry(MongoLog::Warning, - "ccontrol: Received a document from the dispatcher missing [command|number]"); - continue; - } - - // If the command is arm gonna use the options file to load the V2718, DDC10, etc...settings - std::string mode = ""; - if(command == "arm"){ - try{ - mode = doc["mode"].get_utf8().value.to_string(); - } catch(const std::exception E){ - logger->Entry(MongoLog::Warning, "ccontrol: Received an arm document with no run mode"); - continue; - } - - // Get an override doc from the 'options_override' field if it exists - std::string override_json = ""; - try{ - bsoncxx::document::view oopts = doc["options_override"].get_document().view(); - override_json = bsoncxx::to_json(oopts); - } catch(const std::exception E){ - logger->Entry(MongoLog::Local, "No override options provided"); - } - - //Here are our options - options = std::make_shared(logger, mode, hostname, mongo_uri, dbname, override_json); - - // Initialise the V2178, V1495 and DDC10...etc. - if(fHandler->DeviceArm(run, options) != 0){ - logger->Entry(MongoLog::Warning, "Failed to initialize devices"); - } - - } // end if "arm" command - else if(command == "start"){ - if((fHandler->DeviceStart()) != 0){ - logger->Entry(MongoLog::Warning, "Failed to start devices"); - } - auto start_time = system_clock::now(); - logger->Entry(MongoLog::Local, "Ack to Start took %i us", - duration_cast(start_time-ack_time).count()); - } - else if(command == "stop"){ - if((fHandler->DeviceStop()) != 0){ - logger->Entry(MongoLog::Warning, "Failed to stop devices"); - } - auto start_time = system_clock::now(); - logger->Entry(MongoLog::Local, "Ack to Stop took %i us", - duration_cast(start_time-ack_time).count()); - } - } //end for - } catch (const std::exception& e) { - std::cout<<"Can't access db, I'll keep doing my thing\n"; - continue; - } - - // Report back on what we are doing - try{ - status.insert_one(fHandler->GetStatusDoc(hostname)); - } catch(const std::exception& e) { - std::cout<<"Couldn't update database, I'll keep doing my thing\n"; - } - std::this_thread::sleep_for(seconds(1)); - } // while run - options.reset(); - fHandler.reset(); - logger.reset(); - return 0; -} - diff --git a/f1724.cc b/f1724.cc index 8ca3ba12..a2af56d6 100644 --- a/f1724.cc +++ b/f1724.cc @@ -1,5 +1,6 @@ #include "f1724.hh" #include "MongoLog.hh" +#include "StraxFormatter.hh" #include #include @@ -18,7 +19,7 @@ std::atomic_bool f1724::sRun, f1724::sReady; fax_options_t f1724::sFaxOptions; int f1724::sNumPMTs; vector f1724::sRegistry; -vector> f1724::sPMTxy; +vector f1724::sPMTxy; std::condition_variable f1724::sCV; std::shared_ptr f1724::sLog; @@ -48,7 +49,7 @@ f1724::pmt_pos_t f1724::PMTiToXY(int i) { return ret; } -f1724::f1724(std::shared_ptr& opts, std::shared_ptr& log, int, int, int bid, unsigned) : V1724(opts, log, 0, 0, bid, 0){ +f1724::f1724(std::shared_ptr& log, std::shared_ptr& opts, int, int, int bid, unsigned) : V1724(log, opts, 0, 0, bid, 0){ //fLog->Entry(MongoLog::Warning, "Initializing fax digitizer"); fSPEtemplate = {0.0, 0.0, 0.0, 2.81e-2, 7.4, 6.07e1, 3.26e1, 1.33e1, 7.60, 5.71, 7.75, 4.46, 3.68, 3.31, 2.97, 2.74, 2.66, 2.48, 2.27, 2.15, 2.03, 1.93, 1.70, @@ -62,8 +63,8 @@ f1724::~f1724() { End(); } -int f1724::Init(int, int) { - if (fOptions->GetFaxOptions(fFaxOptions)) { +int f1724::Init(int, int, std::shared_ptr& opts) { + if (opts->GetFaxOptions(fFaxOptions)) { return -1; } fGen = std::mt19937_64(fRD()); @@ -81,8 +82,9 @@ int f1724::Init(int, int) { return 0; } -void f1724::End() { +int f1724::End() { AcquisitionStop(true); + return 0; } int f1724::WriteRegister(unsigned int reg, unsigned int val) { @@ -270,7 +272,7 @@ vector f1724::GenerateEventSize(double, double, double z) { return {0, s1, s2}; } -vector f1724::MakeHitpattern(int s_i, int photons, double x, double y, double z) { +vector f1724::MakeHitpattern(int s_i, int photons, double x, double y, double z) { double signal_width = s_i == 1 ? 40 : 1000.+200.*std::sqrt(std::abs(z)); vector ret(photons); vector hit_prob(sNumPMTs, 0.); @@ -305,7 +307,7 @@ vector f1724::MakeHitpattern(int s_i, int photons, double x, double y, do return ret; } -void f1724::SendToWorkers(const vector& hits) { +void f1724::SendToWorkers(const vector& hits, long ts) { vector> hits_per_board(sRegistry.size()); int n_boards = sRegistry.size(); for (auto& hit : hits) { @@ -313,23 +315,24 @@ void f1724::SendToWorkers(const vector& hits) { } for (unsigned i = 0; i < sRegistry.size(); i++) if (hits_per_board[i].size() > 0) - sRegistry[i]->ReceiveFromGenerator(std::move(hits_per_board[i])); + sRegistry[i]->ReceiveFromGenerator(std::move(hits_per_board[i]), ts); return; } -void f1724::ReceiveFromGenerator(vector hits) { +void f1724::ReceiveFromGenerator(vector hits, long ts) { { std::lock_guard lk(fMutex); fProtoPulse = std::move(hits); + fTimestamp = ts; } fCV.notify_one(); } -void f1724::MakeWaveform(std::vector& hits) { +void f1724::MakeWaveform(std::vector& hits, long timestamp) { int mask = 0; double last_hit_time = 0, first_hit_time = 1e9; for (auto& hit : hits) { - mask |= (1<& hits) { wf_length += wf_length % 2 ? 1 : 2; // ensure an even number of samples with room auto wf = GenerateNoise(wf_length, mask); std::normal_distribution<> hit_scale{1., 0.15}; - int offset = 0, sample_width = GetSampleWidth(); + int offset = 0; double scale; for (auto& hit : hits) { - offset = hit.time/sample_width; + offset = hit.time/fSampleWidth; scale = hit_scale(fGen); for (unsigned i = 0; i < fSPEtemplate.size(); i++) { - wf[pmt_to_ch[hit.ch_i]][offset+i] -= fSPEtemplate[i]*scale; + wf[pmt_to_ch[hit.pmt_i]][offset+i] -= fSPEtemplate[i]*scale; } } - return ConvertToDigiFormat(wf, mask, timestamp); + ConvertToDigiFormat(wf, mask, timestamp); + return; } void f1724::ConvertToDigiFormat(const vector>& wf, int mask, long ts) { @@ -412,9 +416,9 @@ vector> f1724::GenerateNoise(int length, int mask) { void f1724::Run() { while (sRun == true) { std::unique_lock lk(fMutex); - fCV.wait(lk, []{return fProtoPulse.size() > 0 || sRun == false;}); + fCV.wait(lk, [&]{return fProtoPulse.size() > 0 || sRun == false;}); if (fProtoPulse.size() > 0 && sRun == true) { - MakeWaveform(fProtoPulse); + MakeWaveform(fProtoPulse, fTimestamp.load()); fProtoPulse.clear(); } else { } @@ -442,7 +446,7 @@ void f1724::GlobalRun() { for (const auto s_i : {1,2}) { hits = MakeHitpattern(s_i, photons[s_i], x, y, z); // split hitpattern and issue to digis - SendToWorkers(hits); + SendToWorkers(hits, sClock); t_max = 0; for (auto& hit : hits) { t_max = std::max(t_max, hit.time); diff --git a/f1724.hh b/f1724.hh index a839f7bb..16e73d6c 100644 --- a/f1724.hh +++ b/f1724.hh @@ -5,13 +5,16 @@ #include "Options.hh" #include #include +#include +#include +#include class f1724 : public V1724 { public: - f1724(std::shared_ptr&, std::shared_ptr&, int, int, int, unsigned); + f1724(std::shared_ptr&, std::shared_ptr&, int, int, int, unsigned); virtual ~f1724(); - virtual int Read(std::shared_ptr&); + virtual int Read(std::unique_ptr&); virtual int WriteRegister(unsigned, unsigned); virtual unsigned ReadRegister(unsigned); virtual int End(); @@ -44,7 +47,7 @@ protected: static std::tuple GenerateEventLocation(); static std::vector GenerateEventSize(double, double, double); static std::vector MakeHitpattern(int, int, double, double, double); - static void SendToWorkers(const std::vector&); + static void SendToWorkers(const std::vector&, long); static pmt_pos_t PMTiToXY(int); static std::thread sGeneratorThread; @@ -63,13 +66,13 @@ protected: static std::condition_variable sCV; static std::shared_ptr sLog; - virtual int Init(int, int); + virtual int Init(int, int, std::shared_ptr&); void Run(); virtual int GetClockCounter(uint32_t); - void MakeWaveform(std::vector&); + void MakeWaveform(std::vector&, long); void ConvertToDigiFormat(const std::vector>&, int, long); std::vector> GenerateNoise(int, int=0xFF); - void ReceiveFromGenerator(std::vector); + void ReceiveFromGenerator(std::vector, long); std::u32string fBuffer; std::mutex fBufferMutex; @@ -86,7 +89,7 @@ protected: std::atomic_int fEventCounter; std::vector fProtoPulse; std::condition_variable fCV; - std::mutex fWFMutex; + std::mutex fMutex; std::thread fGeneratorThread; bool fSeenUnder5, fSeenOver15; diff --git a/main.cc b/main.cc index 92210967..63f617a5 100644 --- a/main.cc +++ b/main.cc @@ -34,7 +34,7 @@ void UpdateStatus(std::string suri, std::string dbname, std::shared_ptrStatusUpdate(&status); }catch(const std::exception &e){ std::cout<<"Can't connect to DB to update."<(system_clock::now().time_since_epoch()).count() << + (long)duration_cast(ack_time.time_since_epoch()).count() << bsoncxx::builder::stream::close_document << bsoncxx::builder::stream::finalize ); @@ -188,7 +188,7 @@ int main(int argc, char** argv){ continue; } auto now = system_clock::now(); - fLog->Entry(MongoLog::Local, "Ack to start took %i us", + logger->Entry(MongoLog::Local, "Ack to start took %i us", duration_cast(now-ack_time).count()); } else @@ -199,7 +199,7 @@ int main(int argc, char** argv){ logger->Entry(MongoLog::Error, "DAQ failed to stop. Will continue clearing program memory."); auto now = system_clock::now(); - fLog->Entry(MongoLog::Local, "Ack to stop took %i us", + logger->Entry(MongoLog::Local, "Ack to stop took %i us", duration_cast(now-ack_time).count()); } else if(command == "arm"){ // Can only arm if we're in the idle, arming, or armed state From 5686ccdddfc2228b4d3ec49b1722f6882701decd Mon Sep 17 00:00:00 2001 From: xedaq Date: Tue, 29 Sep 2020 10:10:00 +0200 Subject: [PATCH 18/57] Makefile modification, some jank involved --- CControl_Handler.cc | 10 +++++++++- CControl_Handler.hh | 8 ++++++-- Makefile | 16 ++++++++++++++-- 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/CControl_Handler.cc b/CControl_Handler.cc index d94bba83..f9b7382f 100644 --- a/CControl_Handler.cc +++ b/CControl_Handler.cc @@ -1,7 +1,9 @@ #include "CControl_Handler.hh" #include "DAXHelpers.hh" #include "V2718.hh" +#ifdef HASDDC10 #include "DDC10.hh" +#endif #include "V1495.hh" #include @@ -9,7 +11,9 @@ CControl_Handler::CControl_Handler(std::shared_ptr& log, std::string p fCurrentRun = fBID = fBoardHandle-1; fV2718 = nullptr; fV1495 = nullptr; +#ifdef HASDDC10 fDDC10 = nullptr; +#endif fStatus = DAXHelpers::Idle; } @@ -60,6 +64,7 @@ int CControl_Handler::Arm(std::shared_ptr& opts){ fBoardHandle = fV2718->GetHandle(); fLog->Entry(MongoLog::Local, "V2718 Initialized"); +#ifdef HASDDC10 // Getting options for DDC10 HEV module std::vector dv = fOptions->GetBoards("DDC10"); if (dv.size() == 1){ @@ -78,6 +83,7 @@ int CControl_Handler::Arm(std::shared_ptr& opts){ } } else { } +#endif // HASDDC10 std::vector mv = fOptions->GetBoards("V1495"); if (mv.size() == 1){ @@ -124,9 +130,11 @@ int CControl_Handler::Stop(){ } fV2718.reset(); } + fV1495.reset(); +#ifdef HASDDC10 // Don't need to stop the DDC10 but just clean up a bit fDDC10.reset(); - fV1495.reset(); +#endif fStatus = DAXHelpers::Idle; return 0; diff --git a/CControl_Handler.hh b/CControl_Handler.hh index 37753fae..47bbeb0e 100644 --- a/CControl_Handler.hh +++ b/CControl_Handler.hh @@ -4,8 +4,10 @@ #include "DAQController.hh" class V2718; -class DDC10; class V1495; +#ifdef HASDDC10 +class DDC10; +#endif class CControl_Handler : public DAQController{ public: @@ -20,8 +22,10 @@ public: private: std::unique_ptr fV2718; - std::unique_ptr fDDC10; std::unique_ptr fV1495; +#ifdef HASDDC10 + std::unique_ptr fDDC10; +#endif int fStatus; int fCurrentRun; diff --git a/Makefile b/Makefile index 4b694930..ae6be457 100644 --- a/Makefile +++ b/Makefile @@ -3,14 +3,26 @@ CC = g++ CXX = g++ CFLAGS = -Wall -Wextra -pedantic -pedantic-errors -g -DLINUX -std=c++17 -pthread $(shell pkg-config --cflags libmongocxx) CPPFLAGS := $(CFLAGS) -LDFLAGS = -lCAENVME -lstdc++fs -llz4 -lblosc $(shell pkg-config --libs libmongocxx) $(shell pkg-config --libs libbsoncxx) -lexpect -ltcl8.6 +IS_READER0 := false +ifeq "$(shell hostname)" "reader0" + IS_READER0 = true +endif +LDFLAGS = -lCAENVME -lstdc++fs -llz4 -lblosc $(shell pkg-config --libs libmongocxx) $(shell pkg-config --libs libbsoncxx) #LDFLAGS_CC = ${LDFLAGS} -lexpect -ltcl8.6 -SOURCES_SLAVE = $(wildcard *.cc) +SOURCES_SLAVE = CControl_Handler.cc DAQController.cc f1724.cc main.cc MongoLog.cc \ + Options.cc StraxFormatter.cc V1495.cc V1724.cc V1724_MV.cc \ + V1730.cc V2718.cc OBJECTS_SLAVE = $(SOURCES_SLAVE:%.cc=%.o) DEPS_SLAVE = $(OBJECTS_SLAVE:%.o=%.d) EXEC_SLAVE = redax +ifeq "$(IS_READER0)" "true" + SOURCES_SLAVE += DDC10.cc + CFLAGS += -DHASDDC10 + LDFLAGS += -lexpect -ltcl8.6 +endif + all: $(EXEC_SLAVE) $(EXEC_SLAVE) : $(OBJECTS_SLAVE) From 7774b4e2caf77a467599a41d7300e31a9278a6a9 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Tue, 29 Sep 2020 10:31:56 +0200 Subject: [PATCH 19/57] Some typos --- CControl_Handler.hh | 1 - DAQController.cc | 1 - helpers/make_fax_config.py | 137 +++++++++++++++++++++++++++++++++++++ main.cc | 12 +++- 4 files changed, 146 insertions(+), 5 deletions(-) create mode 100644 helpers/make_fax_config.py diff --git a/CControl_Handler.hh b/CControl_Handler.hh index 47bbeb0e..825e8b58 100644 --- a/CControl_Handler.hh +++ b/CControl_Handler.hh @@ -27,7 +27,6 @@ private: std::unique_ptr fDDC10; #endif - int fStatus; int fCurrentRun; int fBID; int fBoardHandle; diff --git a/DAQController.cc b/DAQController.cc index 5d6dcf8d..2bcc5a64 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -41,7 +41,6 @@ DAQController::~DAQController(){ int DAQController::Arm(std::shared_ptr& options){ fOptions = options; - fLog->SetRunId(fOptions->GetInt("number", -1)); fNProcessingThreads = fOptions->GetNestedInt("processing_threads."+fHostname, 8); fLog->Entry(MongoLog::Local, "Beginning electronics initialization with %i threads", fNProcessingThreads); diff --git a/helpers/make_fax_config.py b/helpers/make_fax_config.py new file mode 100644 index 00000000..91620806 --- /dev/null +++ b/helpers/make_fax_config.py @@ -0,0 +1,137 @@ +import os +from pymongo import MongoClient +import argparse +import math + +PMTsPerBoard = 8 +BoardsPerLink = 8 + +def main(): + parser = argparse.ArgumentParser() + default_size = 2 + default_rate=10 + default_lifetime = 1.5 + default_speed = 1e-4 + parser.add_argument('--size', type=int, default=default_size, + help='How many rings of PMTs, default %i' % default_size) + parser.add_argument('--rate', type=float, default=default_rate, + help='Rate in Hz, default %.1f' % default_rate) + parser.add_argument('--e-lifetime', type=float, default=default_lifetime, + help='Electron livetime/TPC lengths, default %.1f' % default_lifetime) + parser.add_argument('--drift-speed', type=float, default=default_speed, + help='Drift speed in PMTs/ns, default %.1e' % default_speed) + parser.add_argument('--name', type=str, help='Name of config', default='UNDEFINED') + args = parser.parse_args() + + if args.name == 'UNDEFINED': + print('Please specify a config name') + return + + boards = [] + channel_map = {} + thresholds = {} + n_pmts = 2*(1+3*args.size*(args.size+1)) + n_boards = math.ceil(n_pmts/PMTsPerBoard) + n_links = math.ceil(n_boards/BoardsPerLink) + boards_per_link = [] + for l in range(n_links): + boards_per_link.append(n_boards//n_links) + if l < n_boards % n_links: + boards_per_link[l] += 1 + + bid = 0 + for l in range(n_links): + for b in range(boards_per_link[l]): + boards.append({ + "type": "V1724_fax", + "host": "reader4_reader_0", + "link": l, + "crate": b, + "vme_address": '0', + "board": bid + }) + thresholds[str(bid)] = [15]*PMTsPerBoard + bid += 1 + for ch in range(n_pmts): + bid = str(ch%n_boards) + ch_i = ch // n_boards + if bid not in channel_map: + channel_map[bid] = [2*n_pmts]*PMTsPerBoard + channel_map[bid][ch_i] = ch + + print('%i links/%i boards/%i PMTs' % (n_links, n_boards, n_pmts)) + + board_doc = { + "name": "fax_%i_boards" % n_boards, + "user": "darryl", + "description": "fax subconfig", + "detector": "include", + "boards": boards, + "registers": [] + } + channel_doc = { + "name": "fax_%i_channels" % n_pmts, + "user": "darryl", + "description": "fax subconfig", + "detector": "include", + "channels": channel_map, + "thresholds": thresholds, + } + generic_doc = { + "name": "fax_common_opts", + "user": "darryl", + "description": "fax subconfig", + "detector": "include", + "processing_threads": {"reader4_reader_0": "auto"}, + "baseline_dac_mode": "fixed", + "run_start": 1, + "detectors": {"reader4_reader_0": "fax"}, + } + strax_opts = { + "name": "fax_strax_options", + "user": "darryl", + "description": "fax subconfig", + "detector": "include", + "strax_chunk_length": 5.0, + "strax_chunk_overlap": 0.5, + "strax_fragment_payload_bytes": 40, + "compressor": "lz4", + "strax_buffer_num_chunks": 2, + "strax_chunk_phase_limit": 1, + "strax_output_path": "/live_data/test", + "output_files": {"reader4_reader_0": 4} + } + doc = { + "name": args.name, + "user": "darryl", + "desription": "fax config, size %i" % args.size, + "fax_options": { + "rate": args.rate, + "tpc_size": args.size, + "e_absorbtion_length": args.e_lifetime, + "drift_speed": args.drift_speed, + }, + "run_start": 0, + "detector": "fax", + "includes": [ + board_doc['name'], + channel_doc['name'], + generic_doc['name'], + strax_opts['name'], + ] + } + with MongoClient("mongodb://daq:%s@xenon1t-daq:27017/admin" % os.environ['MONGO_PASSWORD_DAQ']) as client: + coll = client["testdb"]["options"] + coll.update_one({"name": args.name}, {"$set": doc}, upsert=True) + coll.update_one({"name": strax_opts['name']}, {'$set': strax_opts}, upsert=True) + coll.update_one({'name': generic_doc['name']}, {'$set': generic_doc}, upsert=True) + coll.update_one({'name': board_doc['name']}, {'$set': board_doc}, upsert=True) + coll.update_one({'name': channel_doc['name']}, {'$set': channel_doc}, upsert=True) + + print('Documents inserted') + + return + +if __name__ == '__main__': + main() + diff --git a/main.cc b/main.cc index 63f617a5..6e979e3a 100644 --- a/main.cc +++ b/main.cc @@ -3,6 +3,7 @@ #include #include #include "DAQController.hh" +#include "CControl_Handler.hh" #include #include #include "MongoLog.hh" @@ -27,7 +28,7 @@ void SignalHandler(int signum) { return; } -void UpdateStatus(std::string suri, std::string dbname, std::shared_ptr& controller) { +void UpdateStatus(std::string suri, std::string dbname, std::unique_ptr& controller) { mongocxx::uri uri(suri); mongocxx::client c(uri); mongocxx::collection status = c[dbname]["status"]; @@ -130,7 +131,11 @@ int main(int argc, char** argv){ // The DAQController object is responsible for passing commands to the // boards and tracking the status - auto controller = std::make_shared(logger, hostname); + std::unique_ptr controller; + if (cc) + controller = std::make_unique(logger, hostname); + else + controller = std::make_unique(logger, hostname); std::thread status_update(&UpdateStatus, suri, dbname, std::ref(controller)); // Sort oldest to newest @@ -192,7 +197,7 @@ int main(int argc, char** argv){ duration_cast(now-ack_time).count()); } else - logger->Entry(MongoLog::Debug, "Cannot start DAQ since not in ARMED state"); + logger->Entry(MongoLog::Debug, "Cannot start DAQ since not in ARMED state (%i)", controller->status()); }else if(command == "stop"){ // "stop" is also a general reset command and can be called any time if(controller->Stop()!=0) @@ -218,6 +223,7 @@ int main(int argc, char** argv){ // Mongocxx types confusing so passing json strings around fOptions = std::make_shared(logger, (doc)["mode"].get_utf8().value.to_string(), hostname, suri, dbname, override_json); + logger->SetRunId(fOptions->GetInt("number", -1)); if(controller->Arm(fOptions) != 0){ logger->Entry(MongoLog::Error, "Failed to initialize electronics"); controller->Stop(); From 9d65866c3e6d60c8fc7c14093e896a23ca5b4876 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Wed, 30 Sep 2020 10:30:20 +0200 Subject: [PATCH 20/57] Improved cleanup --- DAQController.cc | 4 ++-- StraxFormatter.cc | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/DAQController.cc b/DAQController.cc index 2bcc5a64..6a80a10f 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -167,8 +167,8 @@ int DAQController::Stop(){ // Ensure digitizer is stopped if(digi->EnsureStopped(1000, 1000) != true){ fLog->Entry(MongoLog::Warning, - "Timed out waiting for acquisition to stop after SW stop sent"); - return -1; + "Timed out waiting for %i to stop after SW stop sent", digi->bid()); + //return -1; } } } diff --git a/StraxFormatter.cc b/StraxFormatter.cc index d5608b73..197a9036 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -305,6 +305,8 @@ void StraxFormatter::Process() { if (fBytesProcessed > 0) End(); fRunning = false; + if (fBuffer.size() > 0) + fLog->Entry(MongoLog::Warning, "%i DPs unprocessed", fBuffer.size()); } // Can tune here as needed, these are defaults from the LZ4 examples From 27aacabb559f26ab112d1832b4d6ff130e71192a Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 1 Oct 2020 10:12:02 +0200 Subject: [PATCH 21/57] Don't forget to update the docs --- DAQController.cc | 3 ++ Options.cc | 2 +- docs/_data/navigation.yml | 2 + docs/daq_options.md | 41 +++++++++++------ docs/databases.md | 65 +++++++++++++------------- docs/fax.md | 94 ++++++++++++++++++++++++++++++++++++++ docs/how_to_run.md | 9 ++-- docs/index.md | 2 + docs/installation.md | 52 ++++++++++----------- docs/prerequisites.md | 68 +++------------------------ helpers/make_fax_config.py | 5 +- 11 files changed, 203 insertions(+), 140 deletions(-) create mode 100644 docs/fax.md diff --git a/DAQController.cc b/DAQController.cc index 6a80a10f..9da47b6a 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -3,6 +3,7 @@ #include "V1724.hh" #include "V1724_MV.hh" #include "V1730.hh" +#include "f1724.hh" #include "DAXHelpers.hh" #include "Options.hh" #include "StraxFormatter.hh" @@ -57,6 +58,8 @@ int DAQController::Arm(std::shared_ptr& options){ digi = std::make_shared(fLog, fOptions, d.link, d.crate, d.board, d.vme_address); else if(d.type == "V1730") digi = std::make_shared(fLog, fOptions, d.link, d.crate, d.board, d.vme_address); + else if(d.type == "f1724") + digi = std::make_shared(fLog, fOptions, d.link, d.crate, d.board, 0); else digi = std::make_shared(fLog, fOptions, d.link, d.crate, d.board, d.vme_address); fDigitizers[d.link].emplace_back(digi); diff --git a/Options.cc b/Options.cc index f9b8ef2f..d6d2a0a2 100644 --- a/Options.cc +++ b/Options.cc @@ -188,7 +188,7 @@ std::vector Options::GetBoards(std::string type){ std::vector types; if(type == "V17XX") - types = {"V1724", "V1730", "V1724_MV"}; + types = {"V1724", "V1730", "V1724_MV", "f1724"}; else types.push_back(type); diff --git a/docs/_data/navigation.yml b/docs/_data/navigation.yml index eca8c548..c20cece2 100644 --- a/docs/_data/navigation.yml +++ b/docs/_data/navigation.yml @@ -13,3 +13,5 @@ docs: url: /docs/daq_options.md - title: "Simple use case" url: /docs/how_to_run.md + - title: "Waveform simulator" + url: /docs/fax.md diff --git a/docs/daq_options.md b/docs/daq_options.md index 2aee6e46..0c655ea6 100644 --- a/docs/daq_options.md +++ b/docs/daq_options.md @@ -5,6 +5,7 @@ * [Installation](installation.md) * [Options reference](daq_options.md) * [Example operation](how_to_run.md) +* [Waveform simulator](fax.md) # DAQ Options Reference @@ -94,11 +95,19 @@ Electronics are defined in the 'boards' field as follows: "link": 1, "host": "daq0_controller_0", "type": "V2718" + }, + { + "vme_address": "44440000", + "board": 145, + "crate": -1, + "link": -1, + "host": "daq0_controller_0", + "type": "V1495" } ] ``` -This is an example that might be used for reading out 3 boards and defining one crate controller in the electronics +This is an example that might be used for reading out 3 boards and defining one crate controller with V1495 in the electronics setup given in the [previous chapter](installation.md). Each subdocument contains the following: |Option |Description | @@ -108,7 +117,8 @@ setup given in the [previous chapter](installation.md). Each subdocument contain |vme_address |It is planned to support readout via a V2718 crate controller over the VME backplane. In this case board addressing is via VME address only and crate would refer to the location of the crate controller in the daisy chain. This feature is not yet implemented so the option is placeholder (but must be included). | |link |Defines the optical link index this board is connected to. This is simple in case of one optical link, though like plugging in USB-A there's always a 50-50 chance to guesss it backwards. It becomes a bit more complicated when you include multiple A3818s on one server. There's a good diagram in CAEN's A3818 documentation. | |host |This is the DAQ name of the process that should control the board. Multiple processes cannot share one optical link (but one process can control one optical link). | -|type |Either V1724, V1724_MV, or V1730 for digitizers or V2718 for crate controllers. If more board types are supported they will be added. Limited V1495 support exists. | +|type |Either V1724, V1724_MV, or V1730 for digitizers, V2718 for crate controllers, or V1495 for the FGPA. If more board types are supported they will be added. | +Note that the "crate" and "link" fields for the V1495 don't have meaning and can take any value. ## Register Definitions @@ -153,13 +163,16 @@ The V2718 crate controller has a few options to configure. Note that they must b "muon_veto": 0, "led_trigger": 0, "s_in": 1 + }, + "muon_veto": { + ... } } ``` | Field | Description | | ------ | ----------- | -| pulser_freq | Float. The frequency to pulse the trigger/LED pulser in Hz. Supports from 1 Hz up to several kHz. Keep in mind this may not be implemented exactly since the CAEN API doesn't support every possible frequency exactly, but the software will attempt to match the desired frequency as closely as possible. | +| pulser_freq | Float. The frequency to pulse the trigger/LED pulser in Hz. Supports from <1 Hz up to some MHz. Keep in mind this may not be implemented exactly since the CAEN API doesn't support every possible frequency exactly, but the software will attempt to match the desired frequency as closely as possible. | | neutron_veto | Should the S-IN signal be propogated to the neutron veto? 1-yes, 0-no | | muon_veto | Should the S-IN signal be propogated to the muon veto? 1-yes, 0-no | | led_trigger | Should the LED pulse be propagated to the LED driver? 1-yes, 0-no | @@ -192,11 +205,11 @@ Various options that tell redax how to run. |Option | Description | | -------- | ---------- | | run_start | Tells the DAQ whether to start the run via register or S-in. 0 for register, 1 for S-in. Note that starting by register means that the digitizer clocks will not be synchronized. This can be fine if you run with an external trigger and use the trigger time as synchronization signal. If running in triggerless mode you need to run with '1' and have your hardware set up accordingly. | -| baseline_dac_mode | "cached"/"fixed"/"fit". This defines how the DAC-offset values per channel are set. If set to cached the program will load cached baselines from the run specified in *baseline_reference_run*. If it can't find that run it will fall back to the value in *baseline_fixed_value*. If set to 'fixed' it will use *baseline_fixed_value* in any case. If set to 'fit' it will attempt to adjust the DAC offset values until the baseline for each channel matches the value in *baseline_value*. If using negative voltage signals the default value of 16000 is a good one. Baselines for each run are cached in the *dac_values* collection of the daq database. | +| baseline_dac_mode | cached/fixed/fit. This defines how the DAC-offset values per channel are set. If set to "cached" the program will load cached baselines from the run specified in *baseline_reference_run*. If it can't find that run it will fall back to the value in *baseline_fixed_value*. If set to "fixed" it will use *baseline_fixed_value* in any case. If set to 'fit' it will attempt to adjust the DAC offset values until the baseline for each channel matches the value in *baseline_value*. If using negative voltage signals the default value of 16000 is a good one. Baselines for each run are cached in the *dac_values* collection of the daq database. | |baseline_reference_run | Int. If 'baseline_dac_mode' is set to 'cached' it will use the values from the run number defined here. | -|baseline_value | Int. If 'baseline_dac_mode' is set to 'fit' it will attempt to adjust the baselines until they hit the decimal value defined here, which must lie between 0 and 16386 for a 14-bit ADC. Default 16000. | +|baseline_value | Int. If 'baseline_dac_mode' is set to 'fit' it will attempt to adjust the baselines until they hit the decimal value defined here, which must lie between 0 and 16385 for a 14-bit ADC. Default 16000. | |baseline_fixed_value | Int. Use this to set the DAC offset register directly with this value. See CAEN documentation for more details. Default 4000. | -|processing_threads | Dict. The number of threads working on converting data between CAEN and strax format. Should be larger for processes responsible for more boards and can be smaller for processes only reading a few boards. | +|processing_threads | Dict. The number of threads working on converting data between CAEN and strax format. Should be larger for processes responsible for more boards and can be smaller for processes only reading a few boards. For example, 24 threads will very easily handle a data flow of 200 MB/s (uncompressed) through that instance, but if you aren't expecting that much data then smaller values are fine. The default value is 8, but not specifying this could cause issues with processing. | |detectors | Dict. Which detector a given instance is attached to. Used mainly in aggregating registers. Required | ## Strax Output Options @@ -208,7 +221,9 @@ There are various configuration options for the strax output that must be set. "strax_chunk_overlap": 0.5, "strax_output_path": "/data/xenon/raw/xenonnt", "strax_chunk_length": 5.0, - "strax_fragment_payload_bytes": 220 + "strax_fragment_payload_bytes": 220, + "strax_buffer_num_chunks": 2, + "srax_chunk_phase_limit": 1 } ``` @@ -216,8 +231,10 @@ There are various configuration options for the strax output that must be set. | ---- | ---- | | strax_chunk_overlap | Float. Defines the overlap period between strax chunks in seconds. Make is at least some few times larger than your typical event length. In any case it should be larger than your largest expected event. Default 0.5. | | strax_chunk_length | Float. Length of each strax chunk in seconds. There's some balance required here. It should be short enough that strax can process reasonably online, as it waits for each chunk to finish then loads it at once (the size should be digestable). But it shouldn't be so short that it needlessly micro-segments the data. Order of 5-15 seconds seems reasonable at the time of writing. Default 5. | -|strax_fragment_payload_bytes | Int. How long are the fragments? In general this should be long enough that it definitely covers the vast majority of your SPE pulses. Our SPE pulses are ~100 samples, so the default value of 220 bytes (2 bytes per sample) provides a small amount of overhead. Undefined behavior if the value is odd. | -|strax_output_path | String. Where should we write data? This must be a locally mounted data store. Redax will handle sub-directories so just provide the top-level directory where all the live data should go. | +|strax_fragment_payload_bytes | Int. How long are the fragments? In general this should be long enough that it definitely covers the vast majority of your SPE pulses. Our SPE pulses are ~100 samples, so the default value of 220 bytes (2 bytes per sample) provides a small amount of overhead. Undefined behavior if the value is odd, possibly undefined if it isn't a multiple of 4. | +|strax_output_path | String. Where should we write data? This must be a locally mounted data store. Redax will handle sub-directories so just provide the top-level directory where all the live data should go (e.g. `/data/live`). | +|strax_buffer_num_chunks | Int. How many full chunks should get buffered? Setting this at 1 or lower may cause data loss, and greater than 2 just means you need more memory in your readout machine. For instance, if 5 and 6 are buffered, as soon as something in chunk 7 shows up, chunk 5 is dumped to disk. | +|strax_chunk_phase_limit | Int. Sometimes pulses will show up at the processing stage late (or somehow behind the rest of them). If a pulse is this many chunks behind (or out of phase with) the chunks currently being buffered, log a warning to the database. | ## Channel Map @@ -261,7 +278,7 @@ Redax assigns trigger thresholds using a syntax identical to that of the channel ## Lower-level diagnostic options -Redax accepts a variety of options that control various low-level operations. The default values should be fine, and really should only be adjusted if you know what's going on. +Redax accepts a variety of options that control various low-level operations. The default values should be fine, and really should only be adjusted if you know what's going on or don't mind dealing with strange behavior. |Option | Description | | ---- | ---- | @@ -277,7 +294,5 @@ Redax accepts a variety of options that control various low-level operations. Th | baseline_ms_between_triggers | Int. How long between software triggers. Default 10. | | blt_size | Int. How many bytes to read from the digitizer during each BLT readout. Default 0x80000. | | blt_safety_factor | Float. Sometimes the digitizer returns more bytes during a BLT readout than you ask for (it depends on the number and size of events in the digitizer's memory). This value is how much extra memory to allocate so you don't overrun the readout buffer. Default 1.5. | -| buffer_safety_factor | Float. Same, except for the longer-lived buffer that redax carries until data is processed and queued for compression. Default 1.1. | | do_sn_check | 0/1. Whether or not to have each board check its serial number during initialization. Default 0. | -| buffer_type | "single"/"dual". The StraxInserter can either ask the DAQController for one event at a time to process (buffer_type = 'single') or it can ask for several events to store in its own buffer (buffer_type = 'dual'). All accesses to the DAQController buffer are mutexed, so in high-rate modes it's better to use the dual-buffer setup. Default 'dual' | -| max_events_per_thread | Int. The maximum number of data packets to give to a single processing thread at once. Default 1024. | + diff --git a/docs/databases.md b/docs/databases.md index 6948dd9b..c044c291 100644 --- a/docs/databases.md +++ b/docs/databases.md @@ -5,6 +5,7 @@ * [Installation](installation.md) * [Options reference](daq_options.md) * [Example operation](how_to_run.md) +* [Waveform simulator](fax.md) # Configuration of Backend Databases @@ -24,26 +25,26 @@ fields can be present as well. This is especially pertinent to run documents. ### db.status -The status collection should be configured as a capped collection (see the 'helpers' directory for a script to set up the necessary capped collections). Each readout client report's it's status to this DB every few seconds. The dispatcher -queries the newest document from each node (considering also the time stamp of the document) to determine the aggregate -DAQ state. +The status collection should be configured as either a capped collection or with a TTL index on the "time" field (see the 'helpers' directory for a script to set up the necessary capped collections). +Each readout client reports its status to this DB every second. +The dispatcher queries the newest document from each node (considering also the time stamp of the document) to determine the aggregate DAQ state. The form of the document is the following: ```python { "host": "xedaq00_reader_0", # DAQ name of client + "time": , # the time when the document was made "status": 0, # status enum - "rate": 23, # data rate in MB since last update - "buffer_length" : 4, # current buffer utilization in MB + "rate": 13.37, # data rate in MB since last update + "buffer_size" : 4.3, # current buffer utilization in MB "run_mode" : "background_stable", # current run mode - "current_run_id" : "006363", # current run number "channels" : {0 : 67, # Rate per channel on this host in kB since last update 19 : 16, ... }, } ``` -The status enum has the following values: +Note that documents from a Crate Controller instance will also have a "run_number" field. The status enum has the following values: |Value |State | | ----- | ----- | @@ -73,8 +74,8 @@ The document format is similar to status: "readers" : 2, // number of readers connected "time" : ISODate("2018-09-20T13:35:05.642Z"), // time document inserted "buff" : 0, // total buffered data from all readers float - "mode" : null // run mode string (null because Idle here) -} + "mode" : null // run mode string (null because Idle here) +} ``` ### db.detector_control @@ -97,23 +98,21 @@ Because some of these fields require slightly more explanation a table has been "comment" : "", "link_mv" : "false", "link_nv" : "false", - "diagnosis": "goal", - "human_readable_status": "Idle" + "remote" : "false" } ``` |Field |Description | | ----- | ----- | |detector |Either 'tpc', 'muon_veto', or 'neutron_veto'. Or whatever funny thing you've got in your lab. | -|active |The user can set whether this detector is 'active' or not. If it's not active then we don't care about it's status. In fact we can't care since some readers will be reused when running in combined modes and may not longer belong to their original detectors.| -|stop_after |How many minutes (or seconds? check code) until the run automatically restarts. This is a global DAQ state setting, not the setting for a single run. So if you want to run for an hour you set this to 60 minutes, put the detector active, and the dispatcher should handle giving you the 1 hour runs. | -|finish_run_on_stop |How to deal with a run in progress if you set active to 'false'. If 'finish_run_on_stop' is true, we wait for the run to finish due to stop_after (but no new one is started). If false, we stop the run. Has no effect if active is 'true'. | +|active |The user can set whether this detector is 'active' or not. If it's not active then we don't care about its status. In fact we can't care since some readers will be reused when running in combined modes and may not longer belong to their original detectors.| +|stop_after |How many minutes until the run automatically restarts. This is a global DAQ state setting, not the setting for a single run. So if you want to run for an hour you set this to 60 minutes, put the detector active, and the dispatcher should handle giving you the 1 hour runs. | +|finish_run_on_stop |How to deal with a run in progress if you set active to 'false'. If 'finish_run_on_stop' is true, we wait for the run to finish due to stop_after (but no new one is started). If false, we stop the run. Has no effect if active is 'true'. | |mode |The options mode we're running the DAQ in. Should correspond to the 'name' field of one of the documents in the options collection. | |user |Who gets credit/blame for starting these runs? This is the user who last changed this command doc and it will be recorded in the run documents of all runs recorded while this command is active. | |Comment |You can automatically connect a comment to all runs started with this setting by setting this field. The comment is put in the run doc for all runs started while the command is active. | |link_mv, link_nv |These are used by the frontend for detector=tpc only. They simply indicate if the neutron or muon veto are included as part of 'tpc' for this run (for running in combined mode). To the backend this makes no difference. A reader is a reader. To the frontend it can limit the options modes given to the user or help in setting visual cues in the web interface so the operator can figure out what's going on. | -|diagnosis |The dispatcher's take on what's going on. It's 'goal' if the program thinks everything is OK. It's 'error' if there's an error. It's 'processing' if the dispatcher issued a command and is waiting for this to be implemented. In case the command takes too long the dispatcher can set this field to 'timeout'. | -|human_readable_status |Just translates the status enum to something people can read. Useful if displaying on a web page or someone calling the API who doesn't want to learn the codes. | +|remote |If this detector is controllable via the API. Set to "true" to disable control from the website and enable control via the API. | ### db.control The control database is used to propagate commands from the dispatcher to the reader and crate controller nodes. It is used purely internally by the dispatcher. Users wanting to set the DAQ state should set the detector control doc instead (preferably using the web interface). The exception to this is if you're running a small setup with a custom dispatcher and want to issue commands to your readout nodes manually. @@ -125,21 +124,22 @@ The control database is used to propagate commands from the dispatcher to the re "mode" : "two_links", "user" : "web", "host" : ["fdaq00_reader_0"], - "acknowledged" : [ - "fdaq00_reader_0" - ], - "command" : "arm" -} + "acknowledged" : { + "fdaq00_reader_0" : 1601469970934 + }, + "command" : "arm", + "createdAt": +} ``` |Field |Description | | ----- | ----- | -|options_override |Override specific options in the options ini document. Mostly used to set custom output paths so that we're writing to the right place for each run. | +|options_override |Override specific options in the options ini document. Mostly used to set the run identifier | |mode |Options file to use for this run. Corresponds to the 'name' field of the options doc. | |user |Who started the run? Corresponds to the last person to change the detector_status doc during normal operation. Exceptional stop commands can be automatically issued by various subsystems as well in case of errors. |host |List of all hosts to which this command is directed. Readers and crate controllers will only process commands addressed to them. | -|acknowledged |Before attempting to process a command all reader and crate controller processes will first acknowledge the command as received. This does not indicate that processing the command was successful! It just indicates the thing tried. The dispatcher has to watch for the appropriate state change of the slave nodes in order to determine if the command achieved its goal. | -|command |This is the actual command. 'arm' gets the DAQ ready to start. 'start' starts readout by sending the S-in signal. 'send_stop_signal' puts the s-in to zero. 'stop' resets readout processes. | +|acknowledged |Before attempting to process a command all reader and crate controller processes will first acknowledge the command as received. This does not indicate that processing the command was successful! It just indicates the thing tried. The dispatcher has to watch for the appropriate state change of the slave nodes in order to determine if the command achieved its goal. This is a dictionary, with values set to the timestamp (in ms) of when the acknowledgement happened. | +|command |This is the actual command. 'arm' gets the DAQ ready to start. 'start' and 'stop' do what they say on the tin. 'stop' can also be used as a general reset command for a given instance. | ### db.options @@ -155,7 +155,8 @@ Basic log documents have the following simple format: { "message" : "Received arm command from user web for mode test", "priority" : 1, - "user" : "fdaq00_reader_0" + "user" : "fdaq00_reader_0", + "runid": 42 } ``` Where the 'user' is an identifier for which process sent the message, or in case of messages sent by a user it can @@ -164,7 +165,7 @@ gives the standard priorities: |Priority |Value |Use | | ----- | ----- | ------ | -|0 |DEBUG |Debug output for developers. Can either be silenced in production or added with a TTL expiry index (see next section). | +|0 |DEBUG |Low-level information. This is only written to disk, it will never show up in the database.| |1 |MESSAGE |Normal log output that is important to propagate during normal operation, but does not indicate any exceptional state. | |2 |WARNING |Inform the user of an exceptional situation. However this flag is reserved for minor issues that the system will handle on its own and should require no user input. | |3 |ERROR |An exceptional situation with major operational impact that requires intervention from the user. | @@ -193,23 +194,21 @@ Here are the fields set by the DAQ. There are additional fields set at various l "start": ISODate("2018-09-20T13:35:05.642Z"), # time that the run was started "end": ISODate("2018-09-20T13:55:05.642Z"), # time that the run was ended. d.n.e. if run not ended "detectors": ["tpc", "muon_veto", "neutron_veto"], # subdetectors in run - "reader": { - "ini": {DOCUMENT} # the entire options doc used for readout - }, + "daq_config": {DOCUMENT}, # the entire options doc used for readout "source": { "type": "none" # the source type used. (i.e. LED, Rn220). }, - "strax": {DOCUMENT}, # override settings for strax (see strax docs) "data": [ # all locations where data for this run might be found { "type": "live", # raw/processed/reduced/etc. live means pre-trigger. "host": "daq", # usually host of the machine but 'daq' just means pre-trigger - "path": "/mnt/cephfs/pre_trigger/run_10000" + "location": "/live_data/xenonnt/10000" "status": "transferring" # transferring/transferred/error } - ] + ], + "status": "eb_ready_to_upload" # A high-level indication of the status of this run } - + ``` ### db.users, db.shift_rules, db.shifts, etc diff --git a/docs/fax.md b/docs/fax.md new file mode 100644 index 00000000..9de52d12 --- /dev/null +++ b/docs/fax.md @@ -0,0 +1,94 @@ +## Contents +* [Intro](index.md) +* [Pre-install](prerequisites.md) +* [DB config](databases.md) +* [Installation](installation.md) +* [Options reference](daq_options.md) +* [Example operation](how_to_run.md) +* [Waveform simulator](fax.md) + +# Hardware-free DAQ + +Redax comes with a simple waveform simulator. + +## Wait, what? + +Some issues only arise when you're passing a lot of data around between a lot of threads. +You might have one or two spare digitizers sitting around, but you may not have enough PMTs or a running TPC you can use to test things. +You can hook up your board(s) to an external pulser, but a constant external trigger frequency won't always test everything you want. +Also, adding new boards usually means a trip to the lab, which may or may not be close to your desk, and you'll probably go past the coffee machine on your way, and before you realize it, it's lunchtime. +Far simpler to have a testing solution that doesn't require to you to leave your office. + +## The caveats + +The redax waveform simulator is bare-bones. +The data it produces should be written straight to /dev/null (but can't, because you can't make subfolders there). +It takes CPU cycles away from the rest of the program that's trying to reformat the data for output. +It was envisioned as a method to test new features by assuming digitizers with "perfect" performance. + +## What it assumes + +The redax waveform simulator assumes you're using V1724 boards and a "small" fake TPC with the usual aspect ratio. +The PMTs are arranged in two identical arrays of hexagonal rings. +PMT0 is in the center of the top array, and PMTs are incrementally numbered in counter-clockwise oriented rings beginning from the positive x-axis. +The bottom array is identical with an offset in the numbering. + +## How it works, "physics"-y + +All coordinates are assumed to be in units of PMTs, so a depth of 3.5 means 3.5 PMTs. +First, redax generates an event location. +This is flat in z with a minimum depth of 0.5 to prevent overlap between the S1 and S2, and also flat in theta and r. +Note that it is not flat in r-squared. +Next, redax generates an S1 between 11 and 30 PE. +Then, it assumes the S2/S1 gain is normally distributed around 100 with a standard deviation of 20 and electron loss in proportion to the depth and electron absorbtion length, which it uses to generate the S2 from the S1. +Now that we have coordinates and photon numbers, the hitpattern is generated. +The S1 top fraction is 0.1 at the bottom and 0.4 at the top, and linear. +The S2 top fraction is 0.65, and the S2 hitpattern is Gaussian with a width of 1.3 PMTs. +S1s are 40ns wide, and S2 width is 1us at the top and increases with 200ns/sqrt(z). +PMT numbers and hit times are randomly selected, given these weights and widths. +These values are then used to generate PMT waveforms, which are converted to the digitizer's internal format and made ready for "readout". + +## Implementation details + +The first three steps (location, size, and hitpattern) are performed in a static thread, which sleeps both between the S1 and S2, and between the S2 and the next event. +This makes pulses show up in the digitizers at quasi-realistic intervals. +Once hits (PMT id and time) are generated, these are sorted out into digitizer-specific parts and sent to each digitizer. +Each digitizer has its own thread that is uses to convert PMT id's and times into waveforms, for which it takes a single photon model and randomly generates a scale factor (normally distributed about 1 with a width of 0.15). +This is then converted into the expected format and added to its internal buffer. +When the main readout thread "reads" from the digitizer, it just takes whatever contents are in this buffer (technically it takes the whole buffer via std::move). +From this point, the fax pulses are "indistinguishable" from real pulses and exhibit all the usual digitizer features like saturation and clock rollovers (though rollovers are tracked differently because asynchronous event generation). + +## Known limitations + +The zero-padding pre- and post-threshold crossing isn't implemented, so pulses are rather shorter than from hardware digitizers. +Dark counts aren't a thing, and probably can't be added without an overhaul of how the internal buffer works. +This is due to how overlaps are treated, which is to say that they aren't. +I haven't looked at the sum waveform or hitpattern of any pulse, and I don't particularly intend to. +The waveform simulator requires an overhead of (n_boards + 1) threads, so if you want to simulate high rates the processing performance won't be strictly representative, unless you have an absurd number of threads on your readout machine. +There are no electron trains (not sure if this is a limitation or a feature). + +## How to use + +The waveform simulator is great for testing redax features or a new deployment, without having to worry about hardware. + +### f1724 + +The fax digitizer has the model number "f1724", because it's a fake V1724. +Beyond this, the "link" and "crate" fields behave the same as in hardware. +The "helpers/make_fax_config.py" script will generate all fax-specific options necessary, +but before running it you should change things like hostnames, database names, output directories, etc. +The script takes a handful of options: + +|Argument |Description | Default value | +| ----- | ----- | ----- | +|--size | How many rings of PMTs the detector has, i.e. 3 = the center PMT and 3 additional full rings. | 2 | +|--rate | The rate of events, in Hz. Will be generated Poisson-ly. | 10 | +|--e-lifetime | Electron absorbtion length, in units of TPC lengths, i.e. S2_actual = S2_real \* exp(z\*value) | 1.5 | +|--drift-speed | Drift speed of electrons in units of PMTs/ns. | 1e-4 | +|--name | The name of your option document | - | + +This script will divide boards as evenly as possible among as few links as are necessary to support the number of boards, so 7 boards will all go on one link, and 9 boards will end up with 4 on one link and 5 on another. +PMTs are assigned as discussed above. +As there is no f2718 (yet), the only way to start the process is by software, rather than an S-IN signal, so make sure that the config is set appropriately. +There is only one thread that relies on real-time, so there is no impact on the synchronization of the "digitizers". + diff --git a/docs/how_to_run.md b/docs/how_to_run.md index c1f1f5d8..3430b7a6 100644 --- a/docs/how_to_run.md +++ b/docs/how_to_run.md @@ -5,6 +5,7 @@ * [Installation](installation.md) * [Options reference](daq_options.md) * [Example operation](how_to_run.md) +* [Waveform simulator](fax.md) # Examples of Running the Program @@ -16,7 +17,7 @@ Note: this is going to be a minor pain in the neck. This software is designed to ## 1. Create and insert an options file -Refer to the example script helpers/set_run_mode.py +Refer to the example script helpers/set_run_mode.py. These settings are unlikely to cause crashes, but also unlikely to be exactly what you want. This is provided as an example, it is not a "complete" options doc. ```python from pymongo import MongoClient @@ -133,6 +134,7 @@ idoc = { "comment" : "", "link_mv" : "false", "link_nv" : "false", + "remote": "false" } collection.insert(idoc) ``` @@ -145,10 +147,11 @@ Stop the run by updating the document. Either in a python script or just in shel ```javascript use daq_db_name -db.detector_control.update({"detector": "my_detector"}, {"$set": {"active": false}}) +db.detector_control.update({detector: "my_detector"}, {$set: {active: "false"}}) ``` The daq should cease acquisition in window (2) and go idle. You can also update the mode to a different mode (if you defined one) or change 'stop_after'. -## 5. Just for fun, check you runs DB +## 5. Just for fun, check your runs DB You must have defined a runs DB in your dispatcher settings. Have a look at the run collection and make sure you see entries for the data that was just taken. + diff --git a/docs/index.md b/docs/index.md index 4e6ff70d..1d6a1bbf 100644 --- a/docs/index.md +++ b/docs/index.md @@ -5,6 +5,7 @@ * [Installation](installation.md) * [Options reference](daq_options.md) * [Example operation](how_to_run.md) +* [Waveform simulator](fax.md) # redax Docs D. Coderre, D. Masson, 24. January 2020 @@ -37,6 +38,7 @@ configuration of the final system. Use these links to navigate to the sub-pages. 3. [Installing software and helper programs](installation.md) 4. [DAQ Configuration options](daq_options.md) 5. [Examples of running the readout](how_to_run.md) + 6. [The hardware-free DAQ](fax.md) A brief overview of the complete system follows. Please refer to Figure 1. diff --git a/docs/installation.md b/docs/installation.md index 05d0dfce..59271195 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -5,6 +5,7 @@ * [Installation](installation.md) * [Options reference](daq_options.md) * [Example operation](how_to_run.md) +* [Waveform simulator](fax.md) # Redax installation and initial configuration @@ -19,13 +20,14 @@
PC-0 is the more straightforward case. It runs one readout process which is responsible for reading out the digitizers -connected to A3818 optical link zero. It additionally runs one crate controller process that controls a V2718 crate -controller connected to A3818 optical link one. +connected to A3818 optical link 0. It additionally runs one crate controller process that controls a V2718 crate +controller connected to A3818 optical link 1. PC-1, on the other hand, showcases the ability of redax to read out multiple optical links. From the perspective of raw readout speed there is no disadvantage to this since each optical link is read internally in its own thread. On the other hand, a second process is employed on the same PC reading out the third link. This is also allowed since the -command addressing and dispatcher logic refers to the 'daq name' of the process, not just the hostname of the PC. +command addressing and dispatcher logic refers to the 'daq name' of the process, not just the hostname of the PC. +One downside of this configuration is that both processes log to the database, and for XENONnT the status documents from the processes is about 1 GB/day. ## Reader installation: Run on both PCs @@ -33,36 +35,39 @@ If you already have the prerequities described in a [previous chapter](prerequis software and running it. ``` -cd redax -make +$ cd redax +$ make -j ``` -You then need to start the process, which takes two important command line arguments. +You then need to start the process, which takes three important command line arguments and a few other optional ones. ``` -/main {ID} {MONGO_URI} +$ ./redax --id {ID} --uri {MONGO_URI} [--db {DATABASE}] [--logidr ] [--reader | --cc] ``` - -Here **ID** is an integer that will designate this process and be used in addressing. If you run multiple processes of -the same type on one host you *must* provide them with unique ID numbers. **URI** is the complete URI to your DAQ -MongoDB database, including username, password, and authentication database as required. +|Argument|Description|Required| +| ----- | ----- | ----- | +|--id | The identifier for this instance of redax on this host. Must not be the same as any other instance on this host, but can be the same as one on another host. Numbers are simplest, but in principle you could name them after people you (don't) like.| Yes | +| --uri | The full URI of the database you're connection to, in the form `mongodb://{username}:{password}@{host}:{port}/{authentication}`. | Yes | +| --reader, --cc | Which specific task you want this instance to do. `--reader` means you want to read from digitizers, `--cc` means you want to be the crate controller. You must specify one of these options | Yes | +|--db | The name of the database where everything is stored. The default is 'daq'. | No | +|--logdir | The directory where you want logfiles to be written. Multi-host management is much simpler if all logfiles are written to the same network-mounted folder, because then you don't need to log into 4 machines to see what they were all doing. Default is the working directory. | No | Assuming we have a database at database.com port 27017 with user daq, password alsodaq, and authentication database authdb then the process on host daq0 might be started with: ``` -./main 0 mongodb://daq:alsodaq@database.com:27017/authdb +./redax --id 0 --uri mongodb://daq:alsodaq@database.com:27017/authdb --reader --logdir /nfs/redax_logs ``` This will start a process that will be named daq0_reader_0. The process naming convention is `{HOSTNAME}_{PROCESS_TYPE}_{ID}`, where PROCESS_TYPE is either reader or ccontrol, HOSTNAME is the hostname of the PC, -and ID is the number provided by the operator on the command line. +and ID is the number provided by the operator on the command line. Also, the logfiles will be written to a central location. For host daq1 we want to start two processes like so: ``` -./main 0 mongodb://daq:alsodaq@database.com:27017/authdb -./main 1 mongodb://daq:alsodaq@database.com:27017/authdb +./main --id 0 --uri mongodb://daq:alsodaq@database.com:27017/authdb --reader --logdir /nfs/redax_logs +./main --id 1 --uri mongodb://daq:alsodaq@database.com:27017/authdb --reader --logdir /nfs/redax_logs ``` This will start processes with names daq1_reader_0 and daq1_reader_1. Note that these names are important for configuring @@ -71,20 +76,15 @@ the options documents in the next chapter! Once the processes are started they will immediately begin polling the database looking for commands and updating the database with their status. -## Crate Controller Installation, Run on daq0 +## Crate Controller, Run on daq0 The crate controller module is responsible for the V2718 crate controller. It will also be responsible for configuring the -V1495 general purpose module (if required) and the DDC-10 high energy veto module. +V1495 general purpose module (if required) and the DDC-10 high energy veto module (if required). -To compile it: -``` -cd redax -make ccontrol -``` Running it is the same as running the reader: ``` -./ccontrol 0 mongodb://daq:alsodaq@database.com:27017/authdb +./redax --id 0 --uri mongodb://daq:alsodaq@database.com:27017/authdb --cc --logdir /nfs/redax_logs ``` Assuming you run that command on daq0 you'll have a crate controller process running called daq0_ccontrol_0. Identically @@ -113,8 +113,7 @@ the status collection. ## Optional Process: System Monitor If using [nodiaq](https://github.com/coderdj/nodiaq) there is funtionality to display key system parameters like CPU, -memory, and disk usage of each host. An exceedingly simple system monitor is included, which dumps diagnostic data into -a capped collection for display. +memory, and disk usage of each host. An exceedingly simple system monitor is included, which dumps diagnostic data into a capped (or TTL) collection for display. This is simply run with: ``` @@ -122,4 +121,5 @@ cd redax/monitor python monitor.py ``` -Ideally you make this into a system service so you can always keep an eye on the health of your readout machines. +Ideally you would make this into a system service so you can always keep an eye on the health of your readout machines. + diff --git a/docs/prerequisites.md b/docs/prerequisites.md index 3d55807f..11ca60be 100644 --- a/docs/prerequisites.md +++ b/docs/prerequisites.md @@ -5,6 +5,7 @@ * [Installation](installation.md) * [Options reference](daq_options.md) * [Example operation](how_to_run.md) +* [Waveform simulator](fax.md) # Installation of Prerequisites @@ -14,14 +15,14 @@ This section lists all the prerequisites needed. We're assuming an Ubuntu 18.04 The most basic system will consist of a CAEN V1724 connected via optical link to a CAEN A3818 or A2818 PCI(e) card installed in the same PC where the software will run. More complex setups, for example using a V2718 crate controller to facilitate synchronized starting of multiple V1724, are of course also possible. -**Note:** the V1724 can be either used with the DPP_DAQ firmware or with the default firmware without 'zero-length-encoding' enabled. ZLE support may be included in a future release if it is needed. +**Note:** the V1724 can be either used with the DPP_DAW firmware or with the default firmware without 'zero-length-encoding' enabled. ZLE support may be included in a future release if it is needed. ## Libraries from the package repo * [LZ4](http://lz4.org) is needed as the primary compression algorithm. * [Blosc](http://blosc.org/) is the secondary for compression algorithm. - * Normal build libraries required. Note that we're using C++17 so require a relatively recent gcc, in case you're on an older OS. - + * Normal build libraries required. Support for C++17 is required. + Install with: `sudo apt-get install build-essential libblosc-dev liblz4` ## CAEN Libraries @@ -66,66 +67,11 @@ you need to install a database there are a few options. 1. Use a cloud-hosted DB. [Mlab](https://www.mlab.com) and [MongoDB Atlas](https://www.mongodb.com/cloud/atlas) are popular choices and feature a free tier, which is enough for testing. Note that MLab has been acquired by MongoDB. 2. Use your running cloud service. If you happen to be XENONnT we use a mongo cloud deployment that allows us to deploy new databases to our own servers at the click of a button. It is a service from MongoDB and costs a fee per data-bearing machine. Our production systems are managed in this way. -3. Install your own standalone database. This is easy to do and gives you full freedom to use your own hardware. Additionally, a cloud-based solution may not be fully appropriate for a DAQ deployment that is inexorably tied to specific physical hardware (i.e. the detector and electronics readout). Instructions for this follow. - -### Local Installtion - -The easiest way to do this is to google it. - -**Enable authentication** - -MongoDB has had a bit of a bad security reputation since it contains no security protection at all by default. This must -be manually configured. Therefore a bunch of smart people working at sometimes surprisingly professional-sounding places and -storing also surprisingly sensitive data (much more important than our temporary DAQ data) created databases that were -completely open to the outside world. Of course a bunch of obviously smarter but morally deficient people then gained -access to these machines, which is so easy you might do it by accident, and deleted all the data in demand of a ransom, -which was also never paid. It was a whole thing. Don't be one of those guys and put at least a password on your database. - -See the official docs [here](https://docs.mongodb.com/manual/tutorial/enable-authentication/). - -Log in to the database: -``` -mongo --host 127.0.0.1 --port 27017 -``` - -Create a user in database 'admin' with full control: -``` -use admin -db.createUser( - { - user: "user", - pwd: "password", - roles: [ { role: "userAdminAnyDatabase", db: "admin" }, - { role: "readWriteAnyDatabase", db: "admin" } ] - } -) -``` - -This is the bare minimum user configuration for a useful DB. For our full deployment we will create several database users and give each only the permissions it needs to operate. - -Open the file /etc/mongod.conf and change the config file to enable auth. The lines are: - -``` -security: - authorization: enabled -``` - -Restart the process: - -```sudo systemctl restart mongod``` - -Now try to log in like before… you should get in but any command (i.e. 'show dbs) should fail, right? If it doesn't fail something went wrong. If it does fail then you did it right and are now secure. I still wouldn't expose my database to the internet… but it's probably fine in your subnet. - -You can log in now by providing a user, password, and authentication database like so: - -```mongo -u user -p password --authenticationDatabase=admin --host=127.0.0.1 --port=27017``` - -Or using a connection string like so: +3. Install your own standalone database. This is easy to do and gives you full freedom to use your own hardware. Additionally, a cloud-based solution may not be fully appropriate for a DAQ deployment that is inexorably tied to specific physical hardware (i.e. the detector and electronics readout). -```mongo mongodb://user:password@127.0.0.1:27017/admin``` +### Local Installation -Another note on security. A database isn't really meant to be a public-facing thing like an api. So try to avoid any unnecessary exposure to the internet (certainly) or public/semipublic subnets. An API is recommended for for access to the database (in our case by the slow control system). It is easy to expand this -to do whatever you want. +The MongoDB documentation for installing a standalone database is really good and you should follow it. Don't forget to enable authentication. ## Anaconda Environment for the dispatcher and system monitor diff --git a/helpers/make_fax_config.py b/helpers/make_fax_config.py index 91620806..5e942265 100644 --- a/helpers/make_fax_config.py +++ b/helpers/make_fax_config.py @@ -43,7 +43,7 @@ def main(): for l in range(n_links): for b in range(boards_per_link[l]): boards.append({ - "type": "V1724_fax", + "type": "f1724", "host": "reader4_reader_0", "link": l, "crate": b, @@ -82,7 +82,7 @@ def main(): "user": "darryl", "description": "fax subconfig", "detector": "include", - "processing_threads": {"reader4_reader_0": "auto"}, + "processing_threads": {"reader4_reader_0": 4}, "baseline_dac_mode": "fixed", "run_start": 1, "detectors": {"reader4_reader_0": "fax"}, @@ -99,7 +99,6 @@ def main(): "strax_buffer_num_chunks": 2, "strax_chunk_phase_limit": 1, "strax_output_path": "/live_data/test", - "output_files": {"reader4_reader_0": 4} } doc = { "name": args.name, From 749a0cca75ba0d8b2cf4023ed901eb640c427817 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Fri, 2 Oct 2020 14:51:58 +0200 Subject: [PATCH 22/57] Removed unused imports, unifies run identification --- DAQController.cc | 4 +--- DAXHelpers.hh | 1 - StraxFormatter.cc | 16 +++++++++------- dispatcher/MongoConnect.py | 2 +- main.cc | 4 ---- 5 files changed, 11 insertions(+), 16 deletions(-) diff --git a/DAQController.cc b/DAQController.cc index 9da47b6a..41d69f00 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -1,5 +1,4 @@ #include "DAQController.hh" -#include #include "V1724.hh" #include "V1724_MV.hh" #include "V1730.hh" @@ -8,13 +7,11 @@ #include "Options.hh" #include "StraxFormatter.hh" #include "MongoLog.hh" -#include #include #include #include #include #include -#include #include @@ -42,6 +39,7 @@ DAQController::~DAQController(){ int DAQController::Arm(std::shared_ptr& options){ fOptions = options; + fLog->SetRunId(fOptions->GetInt("number", -1)); fNProcessingThreads = fOptions->GetNestedInt("processing_threads."+fHostname, 8); fLog->Entry(MongoLog::Local, "Beginning electronics initialization with %i threads", fNProcessingThreads); diff --git a/DAXHelpers.hh b/DAXHelpers.hh index 4344d311..bb4611a5 100644 --- a/DAXHelpers.hh +++ b/DAXHelpers.hh @@ -3,7 +3,6 @@ #include #include -#include class DAXHelpers{ /* diff --git a/StraxFormatter.cc b/StraxFormatter.cc index 197a9036..2c952750 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -6,12 +6,8 @@ #include #include #include -#include -#include -#include #include #include -#include #include #include @@ -38,7 +34,13 @@ StraxFormatter::StraxFormatter(std::shared_ptr& opts, std::shared_ptrGetString("compressor", "lz4"); fFullChunkLength = fChunkLength+fChunkOverlap; fHostname = fOptions->Hostname(); - std::string run_name = fOptions->GetString("run_identifier", "run"); + std::string run_name; + int run_num = fOptions->GetInt("number", -1); + if (run_num == -1) run_name = "run"; + else { + run_name = std::stoi(run_num); + if (run_name.size() < 6) run_name.insert(0, 6 - run_name.size(), "0"); + } fEmptyVerified = 0; fLog = log; @@ -323,11 +325,11 @@ void StraxFormatter::WriteOutChunk(int chunk_i){ clock_gettime(CLOCK_THREAD_CPUTIME_ID, &comp_start); std::vector*> buffers = {&fChunks[chunk_i], &fOverlaps[chunk_i]}; - std::vector uncompressed_size(3, 0); + std::vector uncompressed_size(3, 0); std::string uncompressed; std::vector> out_buffer(3); std::vector wsize(3); - size_t max_compressed_size = 0; + long max_compressed_size = 0; for (int i = 0; i < 2; i++) { uncompressed_size[i] = buffers[i]->size()*(fFragmentBytes + fStraxHeaderSize); diff --git a/dispatcher/MongoConnect.py b/dispatcher/MongoConnect.py index 4624a0e1..6d57e470 100644 --- a/dispatcher/MongoConnect.py +++ b/dispatcher/MongoConnect.py @@ -392,7 +392,7 @@ def SendCommand(self, command, hosts, user, detector, mode="", delay=0): "user": user, "detector": detector, "mode": mode, - "options_override": {"run_identifier": n_id, "number": number}, + "options_override": {"number": number}, "number": number, "createdAt": datetime.datetime.utcnow() } diff --git a/main.cc b/main.cc index 6e979e3a..1820576e 100644 --- a/main.cc +++ b/main.cc @@ -1,6 +1,4 @@ #include -#include -#include #include #include "DAQController.hh" #include "CControl_Handler.hh" @@ -8,7 +6,6 @@ #include #include "MongoLog.hh" #include "Options.hh" -#include #include #include #include @@ -223,7 +220,6 @@ int main(int argc, char** argv){ // Mongocxx types confusing so passing json strings around fOptions = std::make_shared(logger, (doc)["mode"].get_utf8().value.to_string(), hostname, suri, dbname, override_json); - logger->SetRunId(fOptions->GetInt("number", -1)); if(controller->Arm(fOptions) != 0){ logger->Entry(MongoLog::Error, "Failed to initialize electronics"); controller->Stop(); From 4d6504114d495d0f05a705bf3fccaee08e740511 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Wed, 7 Oct 2020 12:25:07 +0200 Subject: [PATCH 23/57] Time to test --- dispatcher/MongoConnect.py | 31 ++++++++++++++++++++++++++++--- dispatcher/dispatcher.py | 11 ++++++----- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/dispatcher/MongoConnect.py b/dispatcher/MongoConnect.py index 4624a0e1..24729943 100644 --- a/dispatcher/MongoConnect.py +++ b/dispatcher/MongoConnect.py @@ -257,9 +257,34 @@ def AggregateStatus(self): def GetWantedState(self): - # Pull the wanted state per detector from the DB and return a dict + # Aggregate the wanted state per detector from the DB and return a dict try: - for doc in self.collections['incoming_commands'].find(): + for doc in self.collections['incoming_commands'].aggregate([ + {'$sort': {'_id': -1}}, + {'$group': { + '_id': {'$concat': ['$detector', '.', '$field']}, + 'value': {'$first': '$value'}, + 'user': {'$first': '$user'}, + 'time': {'$first': '$time'}, + 'detector': {'$first': '$detector'}, + 'key': {'$first': '$field'} + }}, + {'$group': { + '_id': '$detector', + 'keys': {'$push': '$key'}, + 'values': {'$push': '$value'}, + 'users': {'$push': '$user'}, + 'times': {'$push': '$time'} + }}, + {'$project': { + 'detector': '$_id', + '_id': 0, + 'state': {'$arrayToObject': {'$zip': {'inputs': ['$keys', '$values']}}}, + 'user': {'$arrayElemAt': ['$users', {'$indexOfArray': ['$times', {'$max', '$times'}]}]} + }} + ]): + doc.update(doc['state']) + del doc['state'] self.latest_settings[doc['detector']]=doc return self.latest_settings except: @@ -392,7 +417,7 @@ def SendCommand(self, command, hosts, user, detector, mode="", delay=0): "user": user, "detector": detector, "mode": mode, - "options_override": {"run_identifier": n_id, "number": number}, + "options_override": {"number": number}, "number": number, "createdAt": datetime.datetime.utcnow() } diff --git a/dispatcher/dispatcher.py b/dispatcher/dispatcher.py index cf263731..c8ce8a9f 100644 --- a/dispatcher/dispatcher.py +++ b/dispatcher/dispatcher.py @@ -53,6 +53,12 @@ def main(): while(sh.event.is_set() == False): sh.event.wait(sleep_period) + # Print an update + for detector in latest_status.keys(): + logger.debug("Detector %s should be %sACTIVE and is %s"%( + detector, '' if goal_state[detector]['active'] == 'true' else 'IN', + latest_status[detector]['status'].name)) + # Get most recent check-in from all connected hosts if MongoConnector.GetUpdate(): continue @@ -68,11 +74,6 @@ def main(): # Time to report back MongoConnector.UpdateAggregateStatus() - # Print an update - for detector in latest_status.keys(): - logger.debug("Detector %s should be %sACTIVE and is %s"%( - detector, '' if goal_state[detector]['active'] == 'true' else 'IN', - latest_status[detector]['status'].name)) MongoConnector.Quit() return From 1e6ba25e1dcc41cd0082ab9129450e31013dffb5 Mon Sep 17 00:00:00 2001 From: xedaq Date: Wed, 7 Oct 2020 12:41:16 +0200 Subject: [PATCH 24/57] Typos --- StraxFormatter.cc | 4 ++-- main.cc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/StraxFormatter.cc b/StraxFormatter.cc index 2c952750..56cd428b 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -38,8 +38,8 @@ StraxFormatter::StraxFormatter(std::shared_ptr& opts, std::shared_ptrGetInt("number", -1); if (run_num == -1) run_name = "run"; else { - run_name = std::stoi(run_num); - if (run_name.size() < 6) run_name.insert(0, 6 - run_name.size(), "0"); + run_name = std::to_string(run_num); + if (run_name.size() < 6) run_name.insert(0, 6 - run_name.size(), 48); // 48 == '0' } fEmptyVerified = 0; diff --git a/main.cc b/main.cc index 1820576e..14b53cea 100644 --- a/main.cc +++ b/main.cc @@ -104,6 +104,7 @@ int main(int argc, char** argv){ } // We will consider commands addressed to this PC's ID + const int HOST_NAME_MAX = 64; // should be #defined in unistd.h but isn't??? char chostname[HOST_NAME_MAX]; gethostname(chostname, HOST_NAME_MAX); hostname=chostname; From c30beadb87666e5979ebd176b4236b4c8b0861ea Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 8 Oct 2020 10:38:07 +0200 Subject: [PATCH 25/57] Subtle but major bug fix --- StraxFormatter.cc | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/StraxFormatter.cc b/StraxFormatter.cc index 56cd428b..0ea22910 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -39,7 +39,7 @@ StraxFormatter::StraxFormatter(std::shared_ptr& opts, std::shared_ptr>1; fragment.append((char*)&length, sizeof(length)); - int16_t sw = 10; + int16_t sw = digi->SampleWidth(); fragment.append((char*)&sw, sizeof(sw)); int16_t channel = 790; // TODO add MV and NV support fragment.append((char*)&channel, sizeof(channel)); @@ -111,7 +111,8 @@ void StraxFormatter::GenerateArtificialDeadtime(int64_t timestamp, const std::sh fragment.append((char*)&fragment_i, sizeof(fragment_i)); int16_t baseline = 0; fragment.append((char*)&baseline, sizeof(baseline)); - int8_t zero = 0; + int16_t zero = 0; + while ((int)fragment.size() < fFragmentBytes+fStraxHeaderSize) fragment.append((char*)&zero, sizeof(zero)); AddFragmentToBuffer(std::move(fragment), 0, 0); @@ -201,7 +202,7 @@ int StraxFormatter::ProcessChannel(std::u32string_view buff, int words_in_event, auto [timestamp, channel_words, baseline_ch, wf] = dp->digi->UnpackChannelHeader( buff, dp->clock_counter, dp->header_time, event_time, words_in_event, n_channels); - uint32_t samples_in_pulse = wf.size()*sizeof(uint16_t)/sizeof(char32_t); + uint32_t samples_in_pulse = wf.size()*2; // 2 16-bit samples per 32-bit word uint16_t sw = dp->digi->SampleWidth(); int samples_per_frag= fFragmentBytes>>1; int16_t global_ch = fOptions->GetChannel(dp->digi->bid(), channel); @@ -255,14 +256,14 @@ void StraxFormatter::AddFragmentToBuffer(std::string fragment, uint32_t ts, int max_chunk = (*max_iter).first; } + const short* channel = (const short*)(fragment.data()+14); if (min_chunk - chunk_id > fWarnIfChunkOlderThan) { - const short* channel = (const short*)(fragment.data()+14); fLog->Entry(MongoLog::Warning, "Thread %lx got data from ch %i that's in chunk %i instead of %i/%i (ts %lx), it might get lost (ts %lx ro %i)", fThreadId, *channel, chunk_id, min_chunk, max_chunk, timestamp, ts, rollovers); } else if (chunk_id - max_chunk > 1) { - fLog->Entry(MongoLog::Message, "Thread %lx skipped %i chunk(s)", - fThreadId, chunk_id - max_chunk - 1); + fLog->Entry(MongoLog::Message, "Thread %lx skipped %i chunk(s) (ch%i)", + fThreadId, chunk_id - max_chunk - 1, channel); } fOutputBufferSize += fragment.size(); From c40f5fcf8be9ab77fc78ec6456ff1be8070359f7 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 8 Oct 2020 11:21:06 +0200 Subject: [PATCH 26/57] Cleanup --- StraxFormatter.cc | 31 ++++++++++++++++++------------- StraxFormatter.hh | 1 + 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/StraxFormatter.cc b/StraxFormatter.cc index 0ea22910..66ed0c55 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -112,8 +112,7 @@ void StraxFormatter::GenerateArtificialDeadtime(int64_t timestamp, const std::sh int16_t baseline = 0; fragment.append((char*)&baseline, sizeof(baseline)); int16_t zero = 0; - - while ((int)fragment.size() < fFragmentBytes+fStraxHeaderSize) + for (; length > 0; length--) fragment.append((char*)&zero, sizeof(zero)); AddFragmentToBuffer(std::move(fragment), 0, 0); } @@ -132,6 +131,7 @@ void StraxFormatter::ProcessDatapacket(std::unique_ptr dp){ clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_start); words = (*it)&0xFFFFFFF; std::u32string_view sv(dp->buff.data() + std::distance(dp->buff.begin(), it), words); + // std::u32string_view sv(it, it+words); //c++20 :( ProcessEvent(sv, dp, dpc); clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ev_end); fProcTimeEv += timespec_subtract(ev_end, ev_start); @@ -183,8 +183,6 @@ int StraxFormatter::ProcessEvent(std::u32string_view buff, ret = ProcessChannel(buff, words, channel_mask, event_time, frags, ch, dp, dpc); clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ch_end); fProcTimeCh += timespec_subtract(ch_end, ch_start); - if (ret == -1) - break; buff.remove_prefix(ret); } } @@ -202,7 +200,7 @@ int StraxFormatter::ProcessChannel(std::u32string_view buff, int words_in_event, auto [timestamp, channel_words, baseline_ch, wf] = dp->digi->UnpackChannelHeader( buff, dp->clock_counter, dp->header_time, event_time, words_in_event, n_channels); - uint32_t samples_in_pulse = wf.size()*2; // 2 16-bit samples per 32-bit word + uint32_t samples_in_pulse = wf.size()*sizeof(char32_t)/sizeof(uint16_t); uint16_t sw = dp->digi->SampleWidth(); int samples_per_frag= fFragmentBytes>>1; int16_t global_ch = fOptions->GetChannel(dp->digi->bid(), channel); @@ -266,7 +264,7 @@ void StraxFormatter::AddFragmentToBuffer(std::string fragment, uint32_t ts, int fThreadId, chunk_id - max_chunk - 1, channel); } - fOutputBufferSize += fragment.size(); + fOutputBufferSize += fFragmentBytes + fStraxHeaderSize; if(!overlap){ fChunks[chunk_id].emplace_back(std::move(fragment)); @@ -363,8 +361,7 @@ void StraxFormatter::WriteOutChunk(int chunk_i){ out_buffer[2] = out_buffer[1]; wsize[2] = wsize[1]; uncompressed_size[2] = uncompressed_size[1]; - std::vector names {{GetStringFormat(chunk_i), - GetStringFormat(chunk_i)+"_post", GetStringFormat(chunk_i+1)+"_pre"}}; + auto names = GetChunkNames(chunk_i); for (int i = 0; i < 3; i++) { // write to *_TEMP auto output_dir_temp = GetDirectoryPath(names[i], true); @@ -409,8 +406,12 @@ void StraxFormatter::WriteOutChunks() { void StraxFormatter::End() { // this line is awkward, but iterators don't always like it when you're // changing the container while looping over its contents - if (fChunks.size() > 0) CreateEmpty(fChunks.begin()->first); - while (fChunks.size() > 0) WriteOutChunk(fChunks.begin()->first); + int max_chunk = -1; + while (fChunks.size() > 0) { + max_chunk = fChunks.begin()->first; + WriteOutChunk(max_chunk); + } + if (max_chunk != -1) CreateEmpty(max_chunk); fChunks.clear(); auto end_dir = GetDirectoryPath("THE_END"); if(!fs::exists(end_dir)){ @@ -447,9 +448,7 @@ fs::path StraxFormatter::GetFilePath(const std::string& id, bool temp){ void StraxFormatter::CreateEmpty(int back_from){ for(; fEmptyVerified names {{GetStringFormat(fEmptyVerified), - GetStringFormat(fEmptyVerified)+"_post", GetStringFormat(fEmptyVerified+1)+"_pre"}}; - for (auto& n : names) { + for (auto& n : GetChunkNames(fEmptyVerified)) { if(!fs::exists(GetFilePath(n))){ if(!fs::exists(GetDirectoryPath(n))) fs::create_directory(GetDirectoryPath(n)); @@ -460,3 +459,9 @@ void StraxFormatter::CreateEmpty(int back_from){ } // chunks } +std::vector StraxFormatter::GetChunkNames(int chunk) { + std::vector ret{{GetStringFormat(chunk), GetStringFormat(chunk)+"_post", + GetStringFormat(chunk+1)+"_pre"}}; + return ret; +} + diff --git a/StraxFormatter.hh b/StraxFormatter.hh index c9029b4b..f3142acd 100644 --- a/StraxFormatter.hh +++ b/StraxFormatter.hh @@ -71,6 +71,7 @@ private: void End(); void GenerateArtificialDeadtime(int64_t, const std::shared_ptr&); void AddFragmentToBuffer(std::string, uint32_t, int); + std::vector GetChunkNames(int); std::experimental::filesystem::path GetFilePath(const std::string&, bool=false); std::experimental::filesystem::path GetDirectoryPath(const std::string&, bool=false); From 6062693e900ca8bf8d0fa013114e9e661b1d3337 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Wed, 7 Oct 2020 12:25:07 +0200 Subject: [PATCH 27/57] Time to test --- dispatcher/MongoConnect.py | 29 +++++++++++++++++++++++++++-- dispatcher/dispatcher.py | 11 ++++++----- 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/dispatcher/MongoConnect.py b/dispatcher/MongoConnect.py index 6d57e470..24729943 100644 --- a/dispatcher/MongoConnect.py +++ b/dispatcher/MongoConnect.py @@ -257,9 +257,34 @@ def AggregateStatus(self): def GetWantedState(self): - # Pull the wanted state per detector from the DB and return a dict + # Aggregate the wanted state per detector from the DB and return a dict try: - for doc in self.collections['incoming_commands'].find(): + for doc in self.collections['incoming_commands'].aggregate([ + {'$sort': {'_id': -1}}, + {'$group': { + '_id': {'$concat': ['$detector', '.', '$field']}, + 'value': {'$first': '$value'}, + 'user': {'$first': '$user'}, + 'time': {'$first': '$time'}, + 'detector': {'$first': '$detector'}, + 'key': {'$first': '$field'} + }}, + {'$group': { + '_id': '$detector', + 'keys': {'$push': '$key'}, + 'values': {'$push': '$value'}, + 'users': {'$push': '$user'}, + 'times': {'$push': '$time'} + }}, + {'$project': { + 'detector': '$_id', + '_id': 0, + 'state': {'$arrayToObject': {'$zip': {'inputs': ['$keys', '$values']}}}, + 'user': {'$arrayElemAt': ['$users', {'$indexOfArray': ['$times', {'$max', '$times'}]}]} + }} + ]): + doc.update(doc['state']) + del doc['state'] self.latest_settings[doc['detector']]=doc return self.latest_settings except: diff --git a/dispatcher/dispatcher.py b/dispatcher/dispatcher.py index cf263731..c8ce8a9f 100644 --- a/dispatcher/dispatcher.py +++ b/dispatcher/dispatcher.py @@ -53,6 +53,12 @@ def main(): while(sh.event.is_set() == False): sh.event.wait(sleep_period) + # Print an update + for detector in latest_status.keys(): + logger.debug("Detector %s should be %sACTIVE and is %s"%( + detector, '' if goal_state[detector]['active'] == 'true' else 'IN', + latest_status[detector]['status'].name)) + # Get most recent check-in from all connected hosts if MongoConnector.GetUpdate(): continue @@ -68,11 +74,6 @@ def main(): # Time to report back MongoConnector.UpdateAggregateStatus() - # Print an update - for detector in latest_status.keys(): - logger.debug("Detector %s should be %sACTIVE and is %s"%( - detector, '' if goal_state[detector]['active'] == 'true' else 'IN', - latest_status[detector]['status'].name)) MongoConnector.Quit() return From e568e1a3f37acf34119477c0bca7136c28a98e35 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 8 Oct 2020 11:25:31 +0200 Subject: [PATCH 28/57] Reordering prints --- dispatcher/dispatcher.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/dispatcher/dispatcher.py b/dispatcher/dispatcher.py index c8ce8a9f..2d8a91a0 100644 --- a/dispatcher/dispatcher.py +++ b/dispatcher/dispatcher.py @@ -53,12 +53,6 @@ def main(): while(sh.event.is_set() == False): sh.event.wait(sleep_period) - # Print an update - for detector in latest_status.keys(): - logger.debug("Detector %s should be %sACTIVE and is %s"%( - detector, '' if goal_state[detector]['active'] == 'true' else 'IN', - latest_status[detector]['status'].name)) - # Get most recent check-in from all connected hosts if MongoConnector.GetUpdate(): continue @@ -68,6 +62,13 @@ def main(): goal_state = MongoConnector.GetWantedState() if goal_state is None: continue + + # Print an update + for detector in latest_status.keys(): + logger.debug("Detector %s should be %sACTIVE and is %s"%( + detector, '' if goal_state[detector]['active'] == 'true' else 'IN', + latest_status[detector]['status'].name)) + # Decision time. Are we actually in our goal state? If not what should we do? DAQControl.SolveProblem(latest_status, goal_state) From 4d82de0a63a47c4120d367bf49e7f436cf4ed250 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Fri, 9 Oct 2020 11:05:10 +0200 Subject: [PATCH 29/57] Tweaks --- DAQController.cc | 2 +- StraxFormatter.cc | 41 ++++++++++++++++++++++------------------- StraxFormatter.hh | 3 ++- V1724.cc | 1 + V1724.hh | 2 ++ V1724_MV.cc | 1 + V1730.cc | 1 + 7 files changed, 30 insertions(+), 21 deletions(-) diff --git a/DAQController.cc b/DAQController.cc index 41d69f00..edea75b6 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -241,7 +241,7 @@ void DAQController::ReadData(int link){ if (local_buffer.size() > 0) { fDataRate += local_size; int selector = (fCounter++)%fNProcessingThreads; - fFormatters[selector]->ReceiveDatapackets(local_buffer); + fFormatters[selector]->ReceiveDatapackets(local_buffer, local_size); local_size = 0; } readcycler++; diff --git a/StraxFormatter.cc b/StraxFormatter.cc index 66ed0c55..3ddfac7a 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -31,6 +31,7 @@ StraxFormatter::StraxFormatter(std::shared_ptr& opts, std::shared_ptrGetDouble("strax_chunk_length", 5)*1e9); // default 5s fChunkOverlap = long(fOptions->GetDouble("strax_chunk_overlap", 0.5)*1e9); // default 0.5s fFragmentBytes = fOptions->GetInt("strax_fragment_payload_bytes", 110*2); + FullFragmentSize = fFragmentBytes + fStraxHeaderSize; fCompressor = fOptions->GetString("compressor", "lz4"); fFullChunkLength = fChunkLength+fChunkOverlap; fHostname = fOptions->Hostname(); @@ -39,6 +40,7 @@ StraxFormatter::StraxFormatter(std::shared_ptr& opts, std::shared_ptr& ret) { void StraxFormatter::GenerateArtificialDeadtime(int64_t timestamp, const std::shared_ptr& digi) { std::string fragment; - fragment.reserve(fFragmentBytes + fStraxHeaderSize); - timestamp *= digi->GetClockWidth(); - fragment.append((char*)×tamp, sizeof(timestamp)); + fragment.reserve(fFullFragmentSize); + timestamp *= digi->GetClockWidth(); // TODO nv int32_t length = fFragmentBytes>>1; + int16_t sw = digi->SampleWidth(), channel = digi->GetADChannel(), zero = 0; + fragment.append((char*)×tamp, sizeof(timestamp)); fragment.append((char*)&length, sizeof(length)); - int16_t sw = digi->SampleWidth(); fragment.append((char*)&sw, sizeof(sw)); - int16_t channel = 790; // TODO add MV and NV support fragment.append((char*)&channel, sizeof(channel)); fragment.append((char*)&length, sizeof(length)); - int16_t fragment_i = 0; - fragment.append((char*)&fragment_i, sizeof(fragment_i)); - int16_t baseline = 0; - fragment.append((char*)&baseline, sizeof(baseline)); - int16_t zero = 0; + fragment.append((char*)&zero, sizeof(zero)); // fragment_i + fragment.append((char*)&zero, sizeof(zero)); // baseline for (; length > 0; length--) - fragment.append((char*)&zero, sizeof(zero)); + fragment.append((char*)&zero, sizeof(zero)); // wf AddFragmentToBuffer(std::move(fragment), 0, 0); + return; } void StraxFormatter::ProcessDatapacket(std::unique_ptr dp){ @@ -176,8 +175,9 @@ int StraxFormatter::ProcessEvent(std::u32string_view buff, buff.remove_prefix(event_header_words); int ret; int frags(0); + unsigned n_chan = dp->digi->GetNumChannels(); - for(unsigned ch=0; ch>& in) { +void StraxFormatter::ReceiveDatapackets(std::list>& in, int bytes) { { const std::lock_guard lk(fBufferMutex); fBufferCounter[in.size()]++; fBuffer.splice(fBuffer.end(), in); + fInputBufferSize += bytes; } fCV.notify_one(); } @@ -331,7 +334,7 @@ void StraxFormatter::WriteOutChunk(int chunk_i){ long max_compressed_size = 0; for (int i = 0; i < 2; i++) { - uncompressed_size[i] = buffers[i]->size()*(fFragmentBytes + fStraxHeaderSize); + uncompressed_size[i] = buffers[i]->size()*fFullFragmentSize; uncompressed.reserve(uncompressed_size[i]); for (auto it = buffers[i]->begin(); it != buffers[i]->end(); it++) uncompressed += *it; diff --git a/StraxFormatter.hh b/StraxFormatter.hh index f3142acd..13bce697 100644 --- a/StraxFormatter.hh +++ b/StraxFormatter.hh @@ -58,7 +58,7 @@ public: void Process(); std::pair GetBufferSize() {return {fInputBufferSize.load(), fOutputBufferSize.load()};} void GetDataPerChan(std::map& ret); - void ReceiveDatapackets(std::list>&); + void ReceiveDatapackets(std::list>&, int); private: void ProcessDatapacket(std::unique_ptr dp); @@ -83,6 +83,7 @@ private: int64_t fChunkOverlap; // ns int fFragmentBytes; int fStraxHeaderSize; // bytes + int fFullFragmentSize; int fBufferNumChunks; int fWarnIfChunkOlderThan; unsigned fChunkNameLength; diff --git a/V1724.cc b/V1724.cc index 1eaea60a..d9a0ade7 100644 --- a/V1724.cc +++ b/V1724.cc @@ -39,6 +39,7 @@ V1724::V1724(std::shared_ptr& log, std::shared_ptr& opts, int BLT_SIZE = opts->GetInt("blt_size", 512*1024); // there's a more elegant way to do this, but I'm not going to write it fClockPeriod = std::chrono::nanoseconds((1l<<31)*fClockCycle); + fArtificialDeadtimeChannel = 790; if (Init(link, crate, opts)) { throw std::runtime_error("Board init failed"); diff --git a/V1724.hh b/V1724.hh index 28c60a58..2a9e3e5d 100644 --- a/V1724.hh +++ b/V1724.hh @@ -27,6 +27,7 @@ class V1724{ int bid() {return fBID;} uint16_t SampleWidth() {return fSampleWidth;} int GetClockWidth() {return fClockCycle;} + int16_t GetADChannel() {return fArtificialDeadtimeChannel;} virtual int LoadDAC(std::vector&); void ClampDACValues(std::vector&, std::map>&); @@ -90,6 +91,7 @@ protected: float fBLTSafety, fBufferSafety; int fSampleWidth, fClockCycle; + int16_t fArtificialDeadtimeChannel; }; diff --git a/V1724_MV.cc b/V1724_MV.cc index 98d62d7f..99a611f2 100644 --- a/V1724_MV.cc +++ b/V1724_MV.cc @@ -6,6 +6,7 @@ V1724_MV::V1724_MV(std::shared_ptr& log, std::shared_ptr& opt V1724(log, opts, link, crate, bid, address) { // MV boards seem to have reg 0x1n80 for channel n threshold fChTrigRegister = 0x1080; + fArtificialDeadtimeChannel = 791; } V1724_MV::~V1724_MV(){} diff --git a/V1730.cc b/V1730.cc index dd36be67..e82a3a18 100644 --- a/V1730.cc +++ b/V1730.cc @@ -7,6 +7,7 @@ V1730::V1730(std::shared_ptr& log, std::shared_ptr& options, fNChannels = 16; fSampleWidth = 2; fClockCycle = 2; + fArtificialDeadtimeChannel = 792; } V1730::~V1730(){} From 6a3b91f5437ffc17d2be19c0ecc3b74b6ee1f36f Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Fri, 9 Oct 2020 15:16:40 +0200 Subject: [PATCH 30/57] Forgot to dereference --- StraxFormatter.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/StraxFormatter.cc b/StraxFormatter.cc index 3ddfac7a..5569ee3a 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -263,7 +263,7 @@ void StraxFormatter::AddFragmentToBuffer(std::string fragment, uint32_t ts, int fThreadId, *channel, chunk_id, min_chunk, max_chunk, timestamp, ts, rollovers); } else if (chunk_id - max_chunk > 1) { fLog->Entry(MongoLog::Message, "Thread %lx skipped %i chunk(s) (ch%i)", - fThreadId, chunk_id - max_chunk - 1, channel); + fThreadId, chunk_id - max_chunk - 1, *channel); } fOutputBufferSize += fFullFragmentSize; From f4b5abd46000b270122b09fcae50483d8fab5881 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Mon, 12 Oct 2020 15:18:50 +0200 Subject: [PATCH 31/57] Tweaks to logic --- DAQController.cc | 7 ++++--- StraxFormatter.cc | 28 ++++++++++++++++++---------- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/DAQController.cc b/DAQController.cc index 41d69f00..78692b93 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -271,11 +271,13 @@ int DAQController::OpenThreads(){ } void DAQController::CloseThreads(){ + fLog->Entry(MongoLog::Local, "Ending RO threads"); + for (auto& t : fReadoutThreads) if (t.joinable()) t.join(); std::map board_fails; const std::lock_guard lg(fMutex); for (auto& sf : fFormatters) sf->Close(board_fails); - // give threads time to finish - std::this_thread::sleep_for(std::chrono::seconds(1)); + if (fFormatters.size() > 0) // give threads time to finish + std::this_thread::sleep_for(std::chrono::seconds(1)); fLog->Entry(MongoLog::Local, "Joining processing threads"); for (auto& t : fProcessingThreads) if (t.joinable()) t.join(); fProcessingThreads.clear(); @@ -290,7 +292,6 @@ void DAQController::CloseThreads(){ for (auto& iter : board_fails) msg << iter.first << ":" << iter.second << " | "; fLog->Entry(MongoLog::Warning, msg.str()); } - for (auto& t : fReadoutThreads) if (t.joinable()) t.join(); } void DAQController::StatusUpdate(mongocxx::collection* collection) { diff --git a/StraxFormatter.cc b/StraxFormatter.cc index 66ed0c55..1ad8210f 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -331,10 +331,12 @@ void StraxFormatter::WriteOutChunk(int chunk_i){ long max_compressed_size = 0; for (int i = 0; i < 2; i++) { + if (buffers[i]->size() == 0) continue; uncompressed_size[i] = buffers[i]->size()*(fFragmentBytes + fStraxHeaderSize); uncompressed.reserve(uncompressed_size[i]); for (auto it = buffers[i]->begin(); it != buffers[i]->end(); it++) - uncompressed += *it; + uncompressed += *it; // std::accumulate would be nice but 3x slower without -O2 + // (also only works on c++20 because std::move, but still) buffers[i]->clear(); if(fCompressor == "blosc"){ max_compressed_size = uncompressed_size[i] + BLOSC_MAX_OVERHEAD; @@ -377,8 +379,8 @@ void StraxFormatter::WriteOutChunk(int chunk_i){ auto filename = GetFilePath(names[i]); // shenanigans or skulduggery? if(fs::exists(filename)) { - fLog->Entry(MongoLog::Warning, "Chunk %s from thread %lx already exists? %li vs %li bytes", - names[i].c_str(), fThreadId, fs::file_size(filename), wsize[i]); + fLog->Entry(MongoLog::Warning, "Chunk %s from thread %lx already exists? %li vs %li bytes (%lx)", + names[i].c_str(), fThreadId, fs::file_size(filename), wsize[i], uncompressed_size[i]); } // Move this chunk from *_TEMP to the same path without TEMP @@ -392,12 +394,18 @@ void StraxFormatter::WriteOutChunk(int chunk_i){ } void StraxFormatter::WriteOutChunks() { - if ((int)fChunks.size() < fBufferNumChunks) return; - auto [min_iter, max_iter] = std::minmax_element(fChunks.begin(), fChunks.end(), - [&](auto& a, auto& b){return a.first < b.first;}); - int max_chunk = (*max_iter).first; - int min_chunk = (*min_iter).first; - for (; min_chunk <= max_chunk - fBufferNumChunks; min_chunk++) + int min_chunk(999999), max_chunk(0), tot_frags(0), n_frags(0); + double average_chunk(0); + for (auto it = fChunks.begin(); it != fChunks.end(); it++) { + min_chunk = std::min(min_chunk, it->first); + max_chunk = std::max(max_chunk, it->first); + n_frags = it->second.size() + fOverlaps[it->first].size(); + tot_frags += n_frags; + average_chunk += it->first * n_frags; + } + if (tot_frags == 0) return; + average_chunk /= tot_frags; + for (; min_chunk < average_chunk - fBufferNumChunks; min_chunk++) WriteOutChunk(min_chunk); CreateEmpty(min_chunk); return; @@ -408,7 +416,7 @@ void StraxFormatter::End() { // changing the container while looping over its contents int max_chunk = -1; while (fChunks.size() > 0) { - max_chunk = fChunks.begin()->first; + max_chunk = std::max(max_chunk, fChunks.begin()->first); WriteOutChunk(max_chunk); } if (max_chunk != -1) CreateEmpty(max_chunk); From 175e19af906668ab5db56ef2ba20b53a144457dc Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Wed, 7 Oct 2020 12:25:07 +0200 Subject: [PATCH 32/57] Time to test --- dispatcher/MongoConnect.py | 29 +++++++++++++++++++++++++++-- dispatcher/dispatcher.py | 11 ++++++----- 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/dispatcher/MongoConnect.py b/dispatcher/MongoConnect.py index 6d57e470..24729943 100644 --- a/dispatcher/MongoConnect.py +++ b/dispatcher/MongoConnect.py @@ -257,9 +257,34 @@ def AggregateStatus(self): def GetWantedState(self): - # Pull the wanted state per detector from the DB and return a dict + # Aggregate the wanted state per detector from the DB and return a dict try: - for doc in self.collections['incoming_commands'].find(): + for doc in self.collections['incoming_commands'].aggregate([ + {'$sort': {'_id': -1}}, + {'$group': { + '_id': {'$concat': ['$detector', '.', '$field']}, + 'value': {'$first': '$value'}, + 'user': {'$first': '$user'}, + 'time': {'$first': '$time'}, + 'detector': {'$first': '$detector'}, + 'key': {'$first': '$field'} + }}, + {'$group': { + '_id': '$detector', + 'keys': {'$push': '$key'}, + 'values': {'$push': '$value'}, + 'users': {'$push': '$user'}, + 'times': {'$push': '$time'} + }}, + {'$project': { + 'detector': '$_id', + '_id': 0, + 'state': {'$arrayToObject': {'$zip': {'inputs': ['$keys', '$values']}}}, + 'user': {'$arrayElemAt': ['$users', {'$indexOfArray': ['$times', {'$max', '$times'}]}]} + }} + ]): + doc.update(doc['state']) + del doc['state'] self.latest_settings[doc['detector']]=doc return self.latest_settings except: diff --git a/dispatcher/dispatcher.py b/dispatcher/dispatcher.py index cf263731..c8ce8a9f 100644 --- a/dispatcher/dispatcher.py +++ b/dispatcher/dispatcher.py @@ -53,6 +53,12 @@ def main(): while(sh.event.is_set() == False): sh.event.wait(sleep_period) + # Print an update + for detector in latest_status.keys(): + logger.debug("Detector %s should be %sACTIVE and is %s"%( + detector, '' if goal_state[detector]['active'] == 'true' else 'IN', + latest_status[detector]['status'].name)) + # Get most recent check-in from all connected hosts if MongoConnector.GetUpdate(): continue @@ -68,11 +74,6 @@ def main(): # Time to report back MongoConnector.UpdateAggregateStatus() - # Print an update - for detector in latest_status.keys(): - logger.debug("Detector %s should be %sACTIVE and is %s"%( - detector, '' if goal_state[detector]['active'] == 'true' else 'IN', - latest_status[detector]['status'].name)) MongoConnector.Quit() return From 4c647d1126bdad83dfc0864ce15a1b7644c2e44a Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 8 Oct 2020 11:25:31 +0200 Subject: [PATCH 33/57] Reordering prints --- dispatcher/dispatcher.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/dispatcher/dispatcher.py b/dispatcher/dispatcher.py index c8ce8a9f..2d8a91a0 100644 --- a/dispatcher/dispatcher.py +++ b/dispatcher/dispatcher.py @@ -53,12 +53,6 @@ def main(): while(sh.event.is_set() == False): sh.event.wait(sleep_period) - # Print an update - for detector in latest_status.keys(): - logger.debug("Detector %s should be %sACTIVE and is %s"%( - detector, '' if goal_state[detector]['active'] == 'true' else 'IN', - latest_status[detector]['status'].name)) - # Get most recent check-in from all connected hosts if MongoConnector.GetUpdate(): continue @@ -68,6 +62,13 @@ def main(): goal_state = MongoConnector.GetWantedState() if goal_state is None: continue + + # Print an update + for detector in latest_status.keys(): + logger.debug("Detector %s should be %sACTIVE and is %s"%( + detector, '' if goal_state[detector]['active'] == 'true' else 'IN', + latest_status[detector]['status'].name)) + # Decision time. Are we actually in our goal state? If not what should we do? DAQControl.SolveProblem(latest_status, goal_state) From 56160d8932b5101364d57df1ef733a754724bf12 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Wed, 7 Oct 2020 12:25:07 +0200 Subject: [PATCH 34/57] Time to test --- dispatcher/dispatcher.py | 1 - 1 file changed, 1 deletion(-) diff --git a/dispatcher/dispatcher.py b/dispatcher/dispatcher.py index 2d8a91a0..e36ea7aa 100644 --- a/dispatcher/dispatcher.py +++ b/dispatcher/dispatcher.py @@ -74,7 +74,6 @@ def main(): # Time to report back MongoConnector.UpdateAggregateStatus() - MongoConnector.Quit() return From 1b69e49746a704e3d4fda3c996fe830573071318 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Fri, 9 Oct 2020 11:05:10 +0200 Subject: [PATCH 35/57] Tweaks --- DAQController.cc | 2 +- StraxFormatter.cc | 41 ++++++++++++++++++++++------------------- StraxFormatter.hh | 3 ++- V1724.cc | 1 + V1724.hh | 2 ++ V1724_MV.cc | 1 + V1730.cc | 1 + 7 files changed, 30 insertions(+), 21 deletions(-) diff --git a/DAQController.cc b/DAQController.cc index 78692b93..ff578405 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -241,7 +241,7 @@ void DAQController::ReadData(int link){ if (local_buffer.size() > 0) { fDataRate += local_size; int selector = (fCounter++)%fNProcessingThreads; - fFormatters[selector]->ReceiveDatapackets(local_buffer); + fFormatters[selector]->ReceiveDatapackets(local_buffer, local_size); local_size = 0; } readcycler++; diff --git a/StraxFormatter.cc b/StraxFormatter.cc index 1ad8210f..3b1d849a 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -31,6 +31,7 @@ StraxFormatter::StraxFormatter(std::shared_ptr& opts, std::shared_ptrGetDouble("strax_chunk_length", 5)*1e9); // default 5s fChunkOverlap = long(fOptions->GetDouble("strax_chunk_overlap", 0.5)*1e9); // default 0.5s fFragmentBytes = fOptions->GetInt("strax_fragment_payload_bytes", 110*2); + FullFragmentSize = fFragmentBytes + fStraxHeaderSize; fCompressor = fOptions->GetString("compressor", "lz4"); fFullChunkLength = fChunkLength+fChunkOverlap; fHostname = fOptions->Hostname(); @@ -39,6 +40,7 @@ StraxFormatter::StraxFormatter(std::shared_ptr& opts, std::shared_ptr& ret) { void StraxFormatter::GenerateArtificialDeadtime(int64_t timestamp, const std::shared_ptr& digi) { std::string fragment; - fragment.reserve(fFragmentBytes + fStraxHeaderSize); - timestamp *= digi->GetClockWidth(); - fragment.append((char*)×tamp, sizeof(timestamp)); + fragment.reserve(fFullFragmentSize); + timestamp *= digi->GetClockWidth(); // TODO nv int32_t length = fFragmentBytes>>1; + int16_t sw = digi->SampleWidth(), channel = digi->GetADChannel(), zero = 0; + fragment.append((char*)×tamp, sizeof(timestamp)); fragment.append((char*)&length, sizeof(length)); - int16_t sw = digi->SampleWidth(); fragment.append((char*)&sw, sizeof(sw)); - int16_t channel = 790; // TODO add MV and NV support fragment.append((char*)&channel, sizeof(channel)); fragment.append((char*)&length, sizeof(length)); - int16_t fragment_i = 0; - fragment.append((char*)&fragment_i, sizeof(fragment_i)); - int16_t baseline = 0; - fragment.append((char*)&baseline, sizeof(baseline)); - int16_t zero = 0; + fragment.append((char*)&zero, sizeof(zero)); // fragment_i + fragment.append((char*)&zero, sizeof(zero)); // baseline for (; length > 0; length--) - fragment.append((char*)&zero, sizeof(zero)); + fragment.append((char*)&zero, sizeof(zero)); // wf AddFragmentToBuffer(std::move(fragment), 0, 0); + return; } void StraxFormatter::ProcessDatapacket(std::unique_ptr dp){ @@ -176,8 +175,9 @@ int StraxFormatter::ProcessEvent(std::u32string_view buff, buff.remove_prefix(event_header_words); int ret; int frags(0); + unsigned n_chan = dp->digi->GetNumChannels(); - for(unsigned ch=0; ch>& in) { +void StraxFormatter::ReceiveDatapackets(std::list>& in, int bytes) { { const std::lock_guard lk(fBufferMutex); fBufferCounter[in.size()]++; fBuffer.splice(fBuffer.end(), in); + fInputBufferSize += bytes; } fCV.notify_one(); } @@ -332,7 +335,7 @@ void StraxFormatter::WriteOutChunk(int chunk_i){ for (int i = 0; i < 2; i++) { if (buffers[i]->size() == 0) continue; - uncompressed_size[i] = buffers[i]->size()*(fFragmentBytes + fStraxHeaderSize); + uncompressed_size[i] = buffers[i]->size()*fFullFragmentSize; uncompressed.reserve(uncompressed_size[i]); for (auto it = buffers[i]->begin(); it != buffers[i]->end(); it++) uncompressed += *it; // std::accumulate would be nice but 3x slower without -O2 diff --git a/StraxFormatter.hh b/StraxFormatter.hh index f3142acd..13bce697 100644 --- a/StraxFormatter.hh +++ b/StraxFormatter.hh @@ -58,7 +58,7 @@ public: void Process(); std::pair GetBufferSize() {return {fInputBufferSize.load(), fOutputBufferSize.load()};} void GetDataPerChan(std::map& ret); - void ReceiveDatapackets(std::list>&); + void ReceiveDatapackets(std::list>&, int); private: void ProcessDatapacket(std::unique_ptr dp); @@ -83,6 +83,7 @@ private: int64_t fChunkOverlap; // ns int fFragmentBytes; int fStraxHeaderSize; // bytes + int fFullFragmentSize; int fBufferNumChunks; int fWarnIfChunkOlderThan; unsigned fChunkNameLength; diff --git a/V1724.cc b/V1724.cc index 1eaea60a..d9a0ade7 100644 --- a/V1724.cc +++ b/V1724.cc @@ -39,6 +39,7 @@ V1724::V1724(std::shared_ptr& log, std::shared_ptr& opts, int BLT_SIZE = opts->GetInt("blt_size", 512*1024); // there's a more elegant way to do this, but I'm not going to write it fClockPeriod = std::chrono::nanoseconds((1l<<31)*fClockCycle); + fArtificialDeadtimeChannel = 790; if (Init(link, crate, opts)) { throw std::runtime_error("Board init failed"); diff --git a/V1724.hh b/V1724.hh index 28c60a58..2a9e3e5d 100644 --- a/V1724.hh +++ b/V1724.hh @@ -27,6 +27,7 @@ class V1724{ int bid() {return fBID;} uint16_t SampleWidth() {return fSampleWidth;} int GetClockWidth() {return fClockCycle;} + int16_t GetADChannel() {return fArtificialDeadtimeChannel;} virtual int LoadDAC(std::vector&); void ClampDACValues(std::vector&, std::map>&); @@ -90,6 +91,7 @@ protected: float fBLTSafety, fBufferSafety; int fSampleWidth, fClockCycle; + int16_t fArtificialDeadtimeChannel; }; diff --git a/V1724_MV.cc b/V1724_MV.cc index 98d62d7f..99a611f2 100644 --- a/V1724_MV.cc +++ b/V1724_MV.cc @@ -6,6 +6,7 @@ V1724_MV::V1724_MV(std::shared_ptr& log, std::shared_ptr& opt V1724(log, opts, link, crate, bid, address) { // MV boards seem to have reg 0x1n80 for channel n threshold fChTrigRegister = 0x1080; + fArtificialDeadtimeChannel = 791; } V1724_MV::~V1724_MV(){} diff --git a/V1730.cc b/V1730.cc index dd36be67..e82a3a18 100644 --- a/V1730.cc +++ b/V1730.cc @@ -7,6 +7,7 @@ V1730::V1730(std::shared_ptr& log, std::shared_ptr& options, fNChannels = 16; fSampleWidth = 2; fClockCycle = 2; + fArtificialDeadtimeChannel = 792; } V1730::~V1730(){} From c34fdd6580ce90d2eefef903093cdf64f01b1b39 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Fri, 9 Oct 2020 15:16:40 +0200 Subject: [PATCH 36/57] Forgot to dereference --- StraxFormatter.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/StraxFormatter.cc b/StraxFormatter.cc index 3b1d849a..eb76881f 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -263,7 +263,7 @@ void StraxFormatter::AddFragmentToBuffer(std::string fragment, uint32_t ts, int fThreadId, *channel, chunk_id, min_chunk, max_chunk, timestamp, ts, rollovers); } else if (chunk_id - max_chunk > 1) { fLog->Entry(MongoLog::Message, "Thread %lx skipped %i chunk(s) (ch%i)", - fThreadId, chunk_id - max_chunk - 1, channel); + fThreadId, chunk_id - max_chunk - 1, *channel); } fOutputBufferSize += fFullFragmentSize; From d108c1f2cfeaef07390e9a6bca4c4554fd811fcd Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Mon, 12 Oct 2020 15:25:05 +0200 Subject: [PATCH 37/57] Moved a mutex --- DAQController.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DAQController.cc b/DAQController.cc index ff578405..951646a1 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -271,10 +271,10 @@ int DAQController::OpenThreads(){ } void DAQController::CloseThreads(){ + const std::lock_guard lg(fMutex); fLog->Entry(MongoLog::Local, "Ending RO threads"); for (auto& t : fReadoutThreads) if (t.joinable()) t.join(); std::map board_fails; - const std::lock_guard lg(fMutex); for (auto& sf : fFormatters) sf->Close(board_fails); if (fFormatters.size() > 0) // give threads time to finish std::this_thread::sleep_for(std::chrono::seconds(1)); From 73867b4989d835146d5b13122863dd7dcb269276 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Mon, 12 Oct 2020 15:26:46 +0200 Subject: [PATCH 38/57] Typos --- StraxFormatter.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/StraxFormatter.cc b/StraxFormatter.cc index eb76881f..ee780f6e 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -31,7 +31,7 @@ StraxFormatter::StraxFormatter(std::shared_ptr& opts, std::shared_ptrGetDouble("strax_chunk_length", 5)*1e9); // default 5s fChunkOverlap = long(fOptions->GetDouble("strax_chunk_overlap", 0.5)*1e9); // default 0.5s fFragmentBytes = fOptions->GetInt("strax_fragment_payload_bytes", 110*2); - FullFragmentSize = fFragmentBytes + fStraxHeaderSize; + fFullFragmentSize = fFragmentBytes + fStraxHeaderSize; fCompressor = fOptions->GetString("compressor", "lz4"); fFullChunkLength = fChunkLength+fChunkOverlap; fHostname = fOptions->Hostname(); @@ -212,7 +212,7 @@ int StraxFormatter::ProcessChannel(std::u32string_view buff, int words_in_event, frags += num_frags; int32_t samples_this_frag = 0; int64_t time_this_frag = 0; - const uint16_t filler = 0; + const uint16_t zero_filler = 0; for (uint16_t frag_i = 0; frag_i < num_frags; frag_i++) { std::string fragment; fragment.reserve(fFullFragmentSize); From c06e3ac712be3d29a9b729f0a972c7497bc01eaa Mon Sep 17 00:00:00 2001 From: xedaq Date: Mon, 12 Oct 2020 15:30:40 +0200 Subject: [PATCH 39/57] Typo: --- dispatcher/MongoConnect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dispatcher/MongoConnect.py b/dispatcher/MongoConnect.py index 24729943..f45558d4 100644 --- a/dispatcher/MongoConnect.py +++ b/dispatcher/MongoConnect.py @@ -280,7 +280,7 @@ def GetWantedState(self): 'detector': '$_id', '_id': 0, 'state': {'$arrayToObject': {'$zip': {'inputs': ['$keys', '$values']}}}, - 'user': {'$arrayElemAt': ['$users', {'$indexOfArray': ['$times', {'$max', '$times'}]}]} + 'user': {'$arrayElemAt': ['$users', {'$indexOfArray': ['$times', {'$max': '$times'}]}]} }} ]): doc.update(doc['state']) From 445eaa8ee70596a3026db6a646be633539f4ddfd Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Tue, 13 Oct 2020 14:53:23 +0200 Subject: [PATCH 40/57] Tweaks --- Options.cc | 18 +++++++----------- StraxFormatter.cc | 9 +++------ StraxFormatter.hh | 2 +- 3 files changed, 11 insertions(+), 18 deletions(-) diff --git a/Options.cc b/Options.cc index d6d2a0a2..3310d29d 100644 --- a/Options.cc +++ b/Options.cc @@ -396,25 +396,21 @@ void Options::UpdateDAC(std::map> return; } -void Options::SaveBenchmarks(std::map>& counters, long bytes, - std::string sid, std::map& times) { +void Options::SaveBenchmarks(std::map>& counters, + long bytes, std::string sid, std::map& times) { using namespace bsoncxx::builder::stream; - int level = GetInt("benchmark_level", 2); + int level = GetInt("benchmark_level", 1); if (level == 0) return; - int run_id = -1; - try{ - run_id = std::stoi(GetString("run_identifier", "latest")); - } catch (...) { - } + int run_id = GetInt("number", -1); std::map> _counters; - if (level == 2) { + if (level == 1) { for (const auto& p : counters) for (const auto& pp : p.second) if (pp.first != 0) - _counters[p.first][int(std::floor(std::log2(pp.first)))] += pp.second; + _counters[p.first][int(std::log2(pp.first))] += pp.second; else _counters[p.first][-1] += pp.second; - } else if (level == 3) { + } else if (level == 2) { _counters = counters; } diff --git a/StraxFormatter.cc b/StraxFormatter.cc index ee780f6e..23f8d3c9 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -291,9 +291,9 @@ void StraxFormatter::Process() { std::stringstream ss; ss< dp; - while (fActive == true) { + while (fActive == true || fBuffer.size() > 0) { std::unique_lock lk(fBufferMutex); fCV.wait(lk, [&]{return fBuffer.size() > 0 || fActive == false;}); if (fBuffer.size() > 0) { @@ -308,9 +308,6 @@ void StraxFormatter::Process() { } if (fBytesProcessed > 0) End(); - fRunning = false; - if (fBuffer.size() > 0) - fLog->Entry(MongoLog::Warning, "%i DPs unprocessed", fBuffer.size()); } // Can tune here as needed, these are defaults from the LZ4 examples @@ -326,7 +323,7 @@ void StraxFormatter::WriteOutChunk(int chunk_i){ struct timespec comp_start, comp_end; clock_gettime(CLOCK_THREAD_CPUTIME_ID, &comp_start); - std::vector*> buffers = {&fChunks[chunk_i], &fOverlaps[chunk_i]}; + std::vector*> buffers{{&fChunks[chunk_i], &fOverlaps[chunk_i]}}; std::vector uncompressed_size(3, 0); std::string uncompressed; std::vector> out_buffer(3); diff --git a/StraxFormatter.hh b/StraxFormatter.hh index 13bce697..2d77bf97 100644 --- a/StraxFormatter.hh +++ b/StraxFormatter.hh @@ -91,7 +91,7 @@ private: std::string fOutputPath, fHostname, fFullHostname; std::shared_ptr fOptions; std::shared_ptr fLog; - std::atomic_bool fActive, fRunning; + std::atomic_bool fActive; std::string fCompressor; std::map> fChunks, fOverlaps; std::map fFailCounter; From d17bf77204aada754062064f0591438c15c01ed0 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Tue, 13 Oct 2020 15:01:00 +0200 Subject: [PATCH 41/57] Typo --- Options.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Options.cc b/Options.cc index 3310d29d..544afa6b 100644 --- a/Options.cc +++ b/Options.cc @@ -423,7 +423,7 @@ void Options::SaveBenchmarks(std::map>& counter update_doc << "bytes" << bytes; for (auto& p : times) update_doc << p.first << p.second; - if (level >= 2) { + if (level >= 1) { for (auto& p : _counters) { update_doc << p.first << open_document; for (auto& pp : p.second) From 3c8d9ca8c42e5c8a806bc1fa340c6eab0c82b1b6 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Tue, 13 Oct 2020 15:56:55 +0200 Subject: [PATCH 42/57] Few minor things from Coverity --- CControl_Handler.cc | 2 +- Options.cc | 1 - f1724.cc | 3 +-- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/CControl_Handler.cc b/CControl_Handler.cc index f9b7382f..d4b1898b 100644 --- a/CControl_Handler.cc +++ b/CControl_Handler.cc @@ -8,7 +8,7 @@ #include CControl_Handler::CControl_Handler(std::shared_ptr& log, std::string procname) : DAQController(log, procname){ - fCurrentRun = fBID = fBoardHandle-1; + fCurrentRun = fBID = fBoardHandle = -1; fV2718 = nullptr; fV1495 = nullptr; #ifdef HASDDC10 diff --git a/Options.cc b/Options.cc index 544afa6b..92ac5cb6 100644 --- a/Options.cc +++ b/Options.cc @@ -179,7 +179,6 @@ std::string Options::GetString(std::string path, std::string default_value){ fLog->Entry(MongoLog::Local, "Using default value for %s", path.c_str()); return default_value; } - return ""; } std::vector Options::GetBoards(std::string type){ diff --git a/f1724.cc b/f1724.cc index a2af56d6..11a89538 100644 --- a/f1724.cc +++ b/f1724.cc @@ -24,9 +24,8 @@ std::condition_variable f1724::sCV; std::shared_ptr f1724::sLog; f1724::pmt_pos_t f1724::PMTiToXY(int i) { - pmt_pos_t ret; + pmt_pos_t ret{0.,0.,0}; if (i == 0) { - ret.x = ret.y = 0; return ret; } if (i < 7) { From 0161ee456fcd292532b5b55d56de58bcc772cdfe Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 15 Oct 2020 11:16:37 +0200 Subject: [PATCH 43/57] Tweak for end-of-run logic --- DAQController.cc | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/DAQController.cc b/DAQController.cc index 951646a1..18479ca8 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -274,11 +274,13 @@ void DAQController::CloseThreads(){ const std::lock_guard lg(fMutex); fLog->Entry(MongoLog::Local, "Ending RO threads"); for (auto& t : fReadoutThreads) if (t.joinable()) t.join(); - std::map board_fails; - for (auto& sf : fFormatters) sf->Close(board_fails); - if (fFormatters.size() > 0) // give threads time to finish - std::this_thread::sleep_for(std::chrono::seconds(1)); fLog->Entry(MongoLog::Local, "Joining processing threads"); + std::map board_fails; + for (auto& sf : fFormatters) { + while (sf->GetBufferSize().first > 0) + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + sf->Close(board_fails); + } for (auto& t : fProcessingThreads) if (t.joinable()) t.join(); fProcessingThreads.clear(); fLog->Entry(MongoLog::Local, "Destroying formatters"); From 414003cfaaf779d850b93961030dc19456109d71 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 15 Oct 2020 12:00:20 +0200 Subject: [PATCH 44/57] Configurable sleep between board reads --- DAQController.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/DAQController.cc b/DAQController.cc index 18479ca8..b9ef7ca5 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -205,6 +205,7 @@ void DAQController::ReadData(int link){ int words = 0; int local_size(0); fRunning[link] = true; + std::chrono::microseconds sleep_time(fOptions->GetInt("us_between_reads", 10)); while(fReadLoop){ for(auto& digi : fDigitizers[link]) { @@ -245,7 +246,7 @@ void DAQController::ReadData(int link){ local_size = 0; } readcycler++; - usleep(1); + std::this_thread::sleep_for(sleep_time); } // while run fRunning[link] = false; fLog->Entry(MongoLog::Local, "RO thread %i returning", link); From eb0977695c1ed3aa29edcbe8818c4ce6cbea5aab Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 15 Oct 2020 15:27:08 +0200 Subject: [PATCH 45/57] Don't forget to update the docs --- docs/daq_options.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/docs/daq_options.md b/docs/daq_options.md index 0c655ea6..23a1ffcb 100644 --- a/docs/daq_options.md +++ b/docs/daq_options.md @@ -173,10 +173,10 @@ The V2718 crate controller has a few options to configure. Note that they must b | Field | Description | | ------ | ----------- | | pulser_freq | Float. The frequency to pulse the trigger/LED pulser in Hz. Supports from <1 Hz up to some MHz. Keep in mind this may not be implemented exactly since the CAEN API doesn't support every possible frequency exactly, but the software will attempt to match the desired frequency as closely as possible. | -| neutron_veto | Should the S-IN signal be propogated to the neutron veto? 1-yes, 0-no | -| muon_veto | Should the S-IN signal be propogated to the muon veto? 1-yes, 0-no | -| led_trigger | Should the LED pulse be propagated to the LED driver? 1-yes, 0-no | -| s_in | Should the run be started with S-IN? 1-yes, 0-no | +| neutron_veto | Should the S-IN signal be propogated to the neutron veto (connector 4)? 1-yes, 0-no | +| muon_veto | Should the S-IN signal be propogated to the muon veto (connector 1)? 1-yes, 0-no | +| led_trigger | Should the LED pulse be propagated to the LED driver (connector 2)? 1-yes, 0-no | +| s_in | Should the run be started with S-IN (connector 0)? 1-yes, 0-no | The top-level field 'run_start' (next section) is also required to define run start via S-IN. @@ -233,12 +233,12 @@ There are various configuration options for the strax output that must be set. | strax_chunk_length | Float. Length of each strax chunk in seconds. There's some balance required here. It should be short enough that strax can process reasonably online, as it waits for each chunk to finish then loads it at once (the size should be digestable). But it shouldn't be so short that it needlessly micro-segments the data. Order of 5-15 seconds seems reasonable at the time of writing. Default 5. | |strax_fragment_payload_bytes | Int. How long are the fragments? In general this should be long enough that it definitely covers the vast majority of your SPE pulses. Our SPE pulses are ~100 samples, so the default value of 220 bytes (2 bytes per sample) provides a small amount of overhead. Undefined behavior if the value is odd, possibly undefined if it isn't a multiple of 4. | |strax_output_path | String. Where should we write data? This must be a locally mounted data store. Redax will handle sub-directories so just provide the top-level directory where all the live data should go (e.g. `/data/live`). | -|strax_buffer_num_chunks | Int. How many full chunks should get buffered? Setting this at 1 or lower may cause data loss, and greater than 2 just means you need more memory in your readout machine. For instance, if 5 and 6 are buffered, as soon as something in chunk 7 shows up, chunk 5 is dumped to disk. | +|strax_buffer_num_chunks | Int. How many full chunks should get buffered? Setting this at 1 or lower may cause data loss, and greater than 2 usually means you need more memory in your readout machine. For instance, if 5 and 6 are buffered, as soon as something in chunk 7 shows up, chunk 5 is dumped to disk. | |strax_chunk_phase_limit | Int. Sometimes pulses will show up at the processing stage late (or somehow behind the rest of them). If a pulse is this many chunks behind (or out of phase with) the chunks currently being buffered, log a warning to the database. | ## Channel Map -Redax needs to provide the channel values to strax. Therefore the channel map (mapping module/channel in the digitizers to PMT position) must be provided at the readout stage. +Redax needs to provide the channel values to strax. Therefore the channel map (mapping module/channel in the digitizers to PMT position) must be provided at the readout stage. This is in a quite simple format: @@ -295,4 +295,5 @@ Redax accepts a variety of options that control various low-level operations. Th | blt_size | Int. How many bytes to read from the digitizer during each BLT readout. Default 0x80000. | | blt_safety_factor | Float. Sometimes the digitizer returns more bytes during a BLT readout than you ask for (it depends on the number and size of events in the digitizer's memory). This value is how much extra memory to allocate so you don't overrun the readout buffer. Default 1.5. | | do_sn_check | 0/1. Whether or not to have each board check its serial number during initialization. Default 0. | +| us_between_reads | Int. How many microseconds to sleep between polling digitizers for data. This has a major performance impact that will matter when under extremely high loads (ie, the bleeding edge of what your server(s) are capable of), but otherwise shouldn't matter much. Default 10. | From 4318db8ab7a9c744a4903666dbe02cda02384099 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Mon, 19 Oct 2020 12:02:44 +0200 Subject: [PATCH 46/57] Fixes? --- DAQController.cc | 1 + MongoLog.cc | 5 ++--- StraxFormatter.cc | 3 ++- V1724.cc | 10 +++++++++- V1724.hh | 2 ++ 5 files changed, 16 insertions(+), 5 deletions(-) diff --git a/DAQController.cc b/DAQController.cc index b9ef7ca5..ba6f9807 100644 --- a/DAQController.cc +++ b/DAQController.cc @@ -382,6 +382,7 @@ void DAQController::InitLink(std::vector>& digis, success += digi->LoadDAC(dac_values[bid]); // Load all the other fancy stuff success += digi->SetThresholds(fOptions->GetThresholds(bid)); + digi->ResetClocks(); fLog->Entry(MongoLog::Local, "Board %i programmed", digi->bid()); if(success!=0){ diff --git a/MongoLog.cc b/MongoLog.cc index da88ad01..08d089ab 100644 --- a/MongoLog.cc +++ b/MongoLog.cc @@ -87,12 +87,11 @@ int MongoLog::RotateLogFile() { } last_week.tm_mday += days_per_month[last_week.tm_mon]; // off by one error??? } - std::experimental::filesystem::path p = LogFileName(&last_week); + std::experimental::filesystem::path p = fOutputDir/LogFileName(&last_week); if (std::experimental::filesystem::exists(p)) { fOutfile << FormatTime(&today) << " [INIT]: Deleting " << p << '\n'; std::experimental::filesystem::remove(p); - } - else { + } else { fOutfile << FormatTime(&today) << " [INIT]: No older logfile to delete :(\n"; } return 0; diff --git a/StraxFormatter.cc b/StraxFormatter.cc index 23f8d3c9..48db6c0c 100644 --- a/StraxFormatter.cc +++ b/StraxFormatter.cc @@ -365,13 +365,14 @@ void StraxFormatter::WriteOutChunk(int chunk_i){ uncompressed_size[2] = uncompressed_size[1]; auto names = GetChunkNames(chunk_i); for (int i = 0; i < 3; i++) { + if (uncompressed_size[i] == 0) continue; // write to *_TEMP auto output_dir_temp = GetDirectoryPath(names[i], true); auto filename_temp = GetFilePath(names[i], true); if (!fs::exists(output_dir_temp)) fs::create_directory(output_dir_temp); std::ofstream writefile(filename_temp, std::ios::binary); - if (uncompressed_size[i] > 0) writefile.write(out_buffer[i]->data(), wsize[i]); + writefile.write(out_buffer[i]->data(), wsize[i]); writefile.close(); out_buffer[i].reset(); diff --git a/V1724.cc b/V1724.cc index d9a0ade7..7d3903ed 100644 --- a/V1724.cc +++ b/V1724.cc @@ -18,6 +18,7 @@ V1724::V1724(std::shared_ptr& log, std::shared_ptr& opts, int fAqStatusRegister = 0x8104; fSwTrigRegister = 0x8108; fResetRegister = 0xEF24; + fClearRegister = 0xEF28; fChStatusRegister = 0x1088; fChDACRegister = 0x1098; fNChannels = 8; @@ -101,6 +102,8 @@ int V1724::SINStart(){ } int V1724::SoftwareStart(){ fLastClockTime = std::chrono::high_resolution_clock::now(); + fRolloverCounter = 0; + fLastClock = 0; return WriteRegister(fAqCtrlRegister, 0x104); } int V1724::AcquisitionStop(bool){ @@ -121,7 +124,11 @@ bool V1724::EnsureStopped(int ntries, int tsleep){ uint32_t V1724::GetAcquisitionStatus(){ return ReadRegister(fAqStatusRegister); } - +int V1724::ResetClocks() { + fClockCounter = 0; + fLastClock = 0; + return WriteRegister(fClearRegister, 0x1); +} int V1724::CheckErrors(){ auto pll = ReadRegister(fBoardFailStatRegister); auto ros = ReadRegister(fReadoutStatusRegister); @@ -147,6 +154,7 @@ std::tuple V1724::GetClockInfo(std::u32string_view sv) { return {ht, GetClockCounter(ht)}; } } while (++it < sv.end()); + fLog->Entry(MongoLog::Info, "No clock info for %i?", fBID); return {0xFFFFFFFF, -1}; } diff --git a/V1724.hh b/V1724.hh index 2a9e3e5d..a5a69333 100644 --- a/V1724.hh +++ b/V1724.hh @@ -51,6 +51,7 @@ class V1724{ virtual bool EnsureStopped(int ntries, int sleep); virtual int CheckErrors(); virtual uint32_t GetAcquisitionStatus(); + virtual int ResetClocks(); protected: // Some values for base classes to override @@ -58,6 +59,7 @@ protected: unsigned int fAqStatusRegister; unsigned int fSwTrigRegister; unsigned int fResetRegister; + unsigned int fClearRegister; unsigned int fChStatusRegister; unsigned int fChDACRegister; unsigned int fChTrigRegister; From 5c37440809ad8185dbe1b2b1042054db992306dd Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Mon, 19 Oct 2020 12:06:29 +0200 Subject: [PATCH 47/57] Typo --- V1724.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/V1724.cc b/V1724.cc index 7d3903ed..8f0fd487 100644 --- a/V1724.cc +++ b/V1724.cc @@ -125,7 +125,7 @@ uint32_t V1724::GetAcquisitionStatus(){ return ReadRegister(fAqStatusRegister); } int V1724::ResetClocks() { - fClockCounter = 0; + fRolloverCounter = 0; fLastClock = 0; return WriteRegister(fClearRegister, 0x1); } @@ -154,7 +154,7 @@ std::tuple V1724::GetClockInfo(std::u32string_view sv) { return {ht, GetClockCounter(ht)}; } } while (++it < sv.end()); - fLog->Entry(MongoLog::Info, "No clock info for %i?", fBID); + fLog->Entry(MongoLog::Message, "No clock info for %i?", fBID); return {0xFFFFFFFF, -1}; } From d8896ebc068976587666a105abdc33bf18e3716e Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Tue, 20 Oct 2020 10:14:03 +0200 Subject: [PATCH 48/57] Back to 2 dbs --- dispatcher/config.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dispatcher/config.ini b/dispatcher/config.ini index d81919f8..4ea3c28a 100644 --- a/dispatcher/config.ini +++ b/dispatcher/config.ini @@ -14,7 +14,7 @@ ClientTimeout = 10 # Database URIs. Please store DB password in the MONGO_PASSWORD # environment variable and runs DB password as RUNS_MONGO_PASSWORD -ControlDatabaseURI = mongodb://daq:%%s@xenon1t-daq:27017/admin +ControlDatabaseURI = mongodb://daq:%%s@xenon1t-daq:27020/admin ControlDatabaseName = daq RunsDatabaseURI = mongodb://daq:%%s@xenon1t-daq:27017/admin RunsDatabaseName = xenonnt From 74003801d487c5994164f475fb6f21b56e162daa Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Wed, 21 Oct 2020 16:13:55 +0200 Subject: [PATCH 49/57] Problem persists, workaround? --- main.cc | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/main.cc b/main.cc index 14b53cea..3775fd97 100644 --- a/main.cc +++ b/main.cc @@ -219,8 +219,15 @@ int main(int argc, char** argv){ logger->Entry(MongoLog::Debug, "No override options provided, continue without."); } // Mongocxx types confusing so passing json strings around - fOptions = std::make_shared(logger, (doc)["mode"].get_utf8().value.to_string(), + std::string mode = doc["mode"].get_utf8().value.to_string(); + fLog->Entry(MongoLog::Local, "Getting options doc for mode %s", mode.c_str()); + fOptions = std::make_shared(logger, mode, hostname, suri, dbname, override_json); + if (duration_cast(system_clock::now()-ack_time).count() > 9000){ + fLog->Entry(MongoLog::Warning, + "Took too long to pull the config docs, try again"); + continue; + } if(controller->Arm(fOptions) != 0){ logger->Entry(MongoLog::Error, "Failed to initialize electronics"); controller->Stop(); @@ -238,7 +245,7 @@ int main(int argc, char** argv){ std::cout<<"Can't connect to DB so will continue what I'm doing"< Date: Wed, 21 Oct 2020 16:15:05 +0200 Subject: [PATCH 50/57] Typo --- main.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/main.cc b/main.cc index 3775fd97..aad1cbe0 100644 --- a/main.cc +++ b/main.cc @@ -220,11 +220,11 @@ int main(int argc, char** argv){ } // Mongocxx types confusing so passing json strings around std::string mode = doc["mode"].get_utf8().value.to_string(); - fLog->Entry(MongoLog::Local, "Getting options doc for mode %s", mode.c_str()); + logger->Entry(MongoLog::Local, "Getting options doc for mode %s", mode.c_str()); fOptions = std::make_shared(logger, mode, hostname, suri, dbname, override_json); if (duration_cast(system_clock::now()-ack_time).count() > 9000){ - fLog->Entry(MongoLog::Warning, + logger->Entry(MongoLog::Warning, "Took too long to pull the config docs, try again"); continue; } From 2d4f5436564c8fb733e3d50eda3cc592e2e7a979 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Wed, 21 Oct 2020 17:32:19 +0200 Subject: [PATCH 51/57] Shorter timeout --- main.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/main.cc b/main.cc index aad1cbe0..f26b99ca 100644 --- a/main.cc +++ b/main.cc @@ -223,10 +223,13 @@ int main(int argc, char** argv){ logger->Entry(MongoLog::Local, "Getting options doc for mode %s", mode.c_str()); fOptions = std::make_shared(logger, mode, hostname, suri, dbname, override_json); - if (duration_cast(system_clock::now()-ack_time).count() > 9000){ + int dt = duration_cast(system_clock::now()-ack_time).count(); + if (dt > 2000){ logger->Entry(MongoLog::Warning, "Took too long to pull the config docs, try again"); continue; + } else { + fLog->Entry(MongoLog::Local, "Took %i ms to load config", dt); } if(controller->Arm(fOptions) != 0){ logger->Entry(MongoLog::Error, "Failed to initialize electronics"); From 737bc5614aa09a14a5d877a217aec191321b54f8 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Wed, 21 Oct 2020 17:33:04 +0200 Subject: [PATCH 52/57] Last time I make that typo --- main.cc | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/main.cc b/main.cc index f26b99ca..c759ab30 100644 --- a/main.cc +++ b/main.cc @@ -122,7 +122,7 @@ int main(int argc, char** argv){ mongocxx::collection dac_collection = db["dac_calibration"]; // Logging - auto logger = std::make_shared(log_retention, log_dir, suri, dbname, "log", hostname); + auto fLog = std::make_shared(log_retention, log_dir, suri, dbname, "log", hostname); //Options std::shared_ptr fOptions; @@ -131,9 +131,9 @@ int main(int argc, char** argv){ // boards and tracking the status std::unique_ptr controller; if (cc) - controller = std::make_unique(logger, hostname); + controller = std::make_unique(fLog, hostname); else - controller = std::make_unique(logger, hostname); + controller = std::make_unique(fLog, hostname); std::thread status_update(&UpdateStatus, suri, dbname, std::ref(controller)); // Sort oldest to newest @@ -156,7 +156,7 @@ int main(int argc, char** argv){ ); for(auto doc : cursor) { - logger->Entry(MongoLog::Debug, "Found a doc with command %s", + fLog->Entry(MongoLog::Debug, "Found a doc with command %s", doc["command"].get_utf8().value.to_string().c_str()); // Very first thing: acknowledge we've seen the command. If the command // fails then we still acknowledge it because we tried @@ -180,7 +180,7 @@ int main(int argc, char** argv){ } catch (const std::exception &e){ //LOG - logger->Entry(MongoLog::Warning, "Received malformed command %s", + fLog->Entry(MongoLog::Warning, "Received malformed command %s", bsoncxx::to_json(doc).c_str()); } @@ -191,18 +191,18 @@ int main(int argc, char** argv){ continue; } auto now = system_clock::now(); - logger->Entry(MongoLog::Local, "Ack to start took %i us", + fLog->Entry(MongoLog::Local, "Ack to start took %i us", duration_cast(now-ack_time).count()); } else - logger->Entry(MongoLog::Debug, "Cannot start DAQ since not in ARMED state (%i)", controller->status()); + fLog->Entry(MongoLog::Debug, "Cannot start DAQ since not in ARMED state (%i)", controller->status()); }else if(command == "stop"){ // "stop" is also a general reset command and can be called any time if(controller->Stop()!=0) - logger->Entry(MongoLog::Error, + fLog->Entry(MongoLog::Error, "DAQ failed to stop. Will continue clearing program memory."); auto now = system_clock::now(); - logger->Entry(MongoLog::Local, "Ack to stop took %i us", + fLog->Entry(MongoLog::Local, "Ack to stop took %i us", duration_cast(now-ack_time).count()); } else if(command == "arm"){ // Can only arm if we're in the idle, arming, or armed state @@ -216,30 +216,30 @@ int main(int argc, char** argv){ override_json = bsoncxx::to_json(oopts); } catch(const std::exception &e){ - logger->Entry(MongoLog::Debug, "No override options provided, continue without."); + fLog->Entry(MongoLog::Debug, "No override options provided, continue without."); } // Mongocxx types confusing so passing json strings around std::string mode = doc["mode"].get_utf8().value.to_string(); - logger->Entry(MongoLog::Local, "Getting options doc for mode %s", mode.c_str()); - fOptions = std::make_shared(logger, mode, + fLog->Entry(MongoLog::Local, "Getting options doc for mode %s", mode.c_str()); + fOptions = std::make_shared(fLog, mode, hostname, suri, dbname, override_json); int dt = duration_cast(system_clock::now()-ack_time).count(); if (dt > 2000){ - logger->Entry(MongoLog::Warning, + fLog->Entry(MongoLog::Warning, "Took too long to pull the config docs, try again"); continue; } else { fLog->Entry(MongoLog::Local, "Took %i ms to load config", dt); } if(controller->Arm(fOptions) != 0){ - logger->Entry(MongoLog::Error, "Failed to initialize electronics"); + fLog->Entry(MongoLog::Error, "Failed to initialize electronics"); controller->Stop(); }else{ - logger->Entry(MongoLog::Debug, "Initialized electronics"); + fLog->Entry(MongoLog::Debug, "Initialized electronics"); } } // if status is ok else - logger->Entry(MongoLog::Warning, "Cannot arm DAQ while not 'Idle'"); + fLog->Entry(MongoLog::Warning, "Cannot arm DAQ while not 'Idle'"); } else if (command == "quit") b_run = false; } // for doc in cursor } @@ -253,7 +253,7 @@ int main(int argc, char** argv){ status_update.join(); controller.reset(); fOptions.reset(); - logger.reset(); + fLog.reset(); exit(0); } From ea66979cf5d63073901f8927333f61095bc84db1 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Wed, 21 Oct 2020 18:02:00 +0200 Subject: [PATCH 53/57] Looser timeout --- main.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.cc b/main.cc index c759ab30..8cb4a5b5 100644 --- a/main.cc +++ b/main.cc @@ -224,7 +224,7 @@ int main(int argc, char** argv){ fOptions = std::make_shared(fLog, mode, hostname, suri, dbname, override_json); int dt = duration_cast(system_clock::now()-ack_time).count(); - if (dt > 2000){ + if (dt > 6000){ fLog->Entry(MongoLog::Warning, "Took too long to pull the config docs, try again"); continue; From 71886194c243944398e7e63fa6632b81fc5540c8 Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Wed, 21 Oct 2020 19:10:16 +0200 Subject: [PATCH 54/57] Timeout too long --- main.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.cc b/main.cc index 8cb4a5b5..551c8823 100644 --- a/main.cc +++ b/main.cc @@ -224,7 +224,7 @@ int main(int argc, char** argv){ fOptions = std::make_shared(fLog, mode, hostname, suri, dbname, override_json); int dt = duration_cast(system_clock::now()-ack_time).count(); - if (dt > 6000){ + if (dt > 3000){ fLog->Entry(MongoLog::Warning, "Took too long to pull the config docs, try again"); continue; From 4879bbc9315fecdfd15bed165ded53c18c61c35a Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Thu, 22 Oct 2020 14:30:48 +0200 Subject: [PATCH 55/57] If you can't beat em, join em --- main.cc | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/main.cc b/main.cc index 551c8823..a4cb3d83 100644 --- a/main.cc +++ b/main.cc @@ -224,13 +224,14 @@ int main(int argc, char** argv){ fOptions = std::make_shared(fLog, mode, hostname, suri, dbname, override_json); int dt = duration_cast(system_clock::now()-ack_time).count(); - if (dt > 3000){ - fLog->Entry(MongoLog::Warning, - "Took too long to pull the config docs, try again"); - continue; - } else { + if (dt < 15000) std::this_thread::sleep_for(milliseconds(15000-dt)); + //if (dt > 3000){ + // fLog->Entry(MongoLog::Warning, + // "Took too long to pull the config docs, try again"); + // continue; + //} else { fLog->Entry(MongoLog::Local, "Took %i ms to load config", dt); - } + //} if(controller->Arm(fOptions) != 0){ fLog->Entry(MongoLog::Error, "Failed to initialize electronics"); controller->Stop(); From 60935cef8fa0bde11b964d43f41fc68b3d2e147c Mon Sep 17 00:00:00 2001 From: Darryl Masson Date: Fri, 23 Oct 2020 11:30:04 +0200 Subject: [PATCH 56/57] Configurable arm delay --- main.cc | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/main.cc b/main.cc index a4cb3d83..e203f255 100644 --- a/main.cc +++ b/main.cc @@ -43,13 +43,15 @@ void UpdateStatus(std::string suri, std::string dbname, std::unique_ptr: id number of this readout instance, required\n" << "--uri : full MongoDB URI, required\n" << "--db : name of the database to use, default \"daq\"\n" << "--logdir : where to write the logs, default pwd\n" << "--reader: this instance is a reader\n" << "--cc: this instance is a crate controller\n" + << "--arm-delay : ms to wait between the ARM command and the arming sequence, default 15000\n" + << "--log-retention : how many days to keep logfiles, default 7\n" << "--help: print this message\n" << "\n"; return 1; @@ -67,15 +69,17 @@ int main(int argc, char** argv){ std::string dbname = "daq", suri = "", sid = ""; bool reader = false, cc = false; int log_retention = 7; // days - int c, opt_index; + int c(0), opt_index, delay(15000); struct option longopts[] = { - {"id", required_argument, 0, 0}, - {"uri", required_argument, 0, 1}, - {"db", required_argument, 0, 2}, - {"logdir", required_argument, 0, 3}, - {"reader", no_argument, 0, 4}, - {"cc", no_argument, 0, 5}, - {"help", no_argument, 0, 6} + {"id", required_argument, 0, c++}, + {"uri", required_argument, 0, c++}, + {"db", required_argument, 0, c++}, + {"logdir", required_argument, 0, c++}, + {"reader", no_argument, 0, c++}, + {"cc", no_argument, 0, c++}, + {"arm-delay", required_argument, 0, c++}, + {"log-retention", required_argument, 0, c++}, + {"help", no_argument, 0, c++} }; while ((c = getopt_long(argc, argv, "", longopts, &opt_index)) != -1) { switch(c) { @@ -92,6 +96,10 @@ int main(int argc, char** argv){ case 5: cc = true; break; case 6: + delay = std::stoi(optarg); break; + case 7: + log_retention = std::stoi(optarg); break; + case 8: default: std::cout<<"Received unknown arg\n"; return PrintUsage(); @@ -99,7 +107,7 @@ int main(int argc, char** argv){ } if (suri == "" || sid == "") return PrintUsage(); if (reader == cc) { - std::cout<<"Specify --reader OR --cc\n"; + std::cout<<"Specify --reader XOR --cc\n"; return 1; } @@ -224,14 +232,8 @@ int main(int argc, char** argv){ fOptions = std::make_shared(fLog, mode, hostname, suri, dbname, override_json); int dt = duration_cast(system_clock::now()-ack_time).count(); - if (dt < 15000) std::this_thread::sleep_for(milliseconds(15000-dt)); - //if (dt > 3000){ - // fLog->Entry(MongoLog::Warning, - // "Took too long to pull the config docs, try again"); - // continue; - //} else { - fLog->Entry(MongoLog::Local, "Took %i ms to load config", dt); - //} + fLog->Entry(MongoLog::Local, "Took %i ms to load config", dt); + if (dt < delay) std::this_thread::sleep_for(milliseconds(delay-dt)); if(controller->Arm(fOptions) != 0){ fLog->Entry(MongoLog::Error, "Failed to initialize electronics"); controller->Stop(); From e9f0c11e90ede8b2991520b455c8fefdb5854234 Mon Sep 17 00:00:00 2001 From: xedaq Date: Tue, 27 Oct 2020 08:42:56 +0100 Subject: [PATCH 57/57] Tweaks --- dispatcher/MongoConnect.py | 2 -- dispatcher/config.ini | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/dispatcher/MongoConnect.py b/dispatcher/MongoConnect.py index f45558d4..f84e45cb 100644 --- a/dispatcher/MongoConnect.py +++ b/dispatcher/MongoConnect.py @@ -328,8 +328,6 @@ def GetRunMode(self, mode): del incdoc[field] newdoc.update(incdoc) return newdoc - except NotMasterError: - self.log.error('Database snafu') except Exception as E: # LOG ERROR self.log.error("Got a %s exception in doc pulling: %s" % (type(E), E)) diff --git a/dispatcher/config.ini b/dispatcher/config.ini index 4ea3c28a..5e971596 100644 --- a/dispatcher/config.ini +++ b/dispatcher/config.ini @@ -10,7 +10,7 @@ ArmTimeout = 40 # How long since a client's last check-in until we consider # it to be 'timing out' -ClientTimeout = 10 +ClientTimeout = 15 # Database URIs. Please store DB password in the MONGO_PASSWORD # environment variable and runs DB password as RUNS_MONGO_PASSWORD