@inproceedings{3140740, title = "The ALICE DAQ Online Databasesac", author = "Altini, V. and Carena, F. and Carena, W. and Chapeland, S. and Barroso, and V. Chibante and Costa, F. and Divia, R. and Frauman, M. and Fuchs, U. and and Makhlyueva, I. and Rademakers, O. and Navarro, D. Rodriguez and and Roukoutakis, F. and Schossmaier, K. and Soos, C. and Telesca, A. and and Vyvre, P. Vande and von Haller, B. and ALICE Collaboration", year = "2009", pages = "361+", publisher = "IEEE Comput. Soc", booktitle = "2009 16TH IEEE-NPSS REAL TIME CONFERENCE", doi = "10.1109/RTC.2009.5322155", abstract = "ALICE (A Large Ion Collider Experiment) is the heavy-ion detector designed to study the physics of strongly interacting matter and the quark-gluon plasma at the CERN Large Hadron Collider (LHC). The ALICE Data Acquisition system (DAQ) is made of a large number of distributed hardware and software components, which rely on several online databases: the configuration database. describing the counting room machines, some detector-specific electronics settings and the DAQ and Experiment Control System runtime parameters; the to database. centrally collecting reports front running processes, the experiment logbook. tracking the run statistics filled automatically and the operator entries; the online archive of constantly updated data quality monitoring reports; the File indexing, services, including the status of transient files for permanent storage and the calibration results for offline export; the user guides (Wiki), test databases to check the interfaces with external components; reference data sets used to restore known configurations. With 35 GB of online data hosted on a MySQL server and organized in more than 500 relational tables for a total of 40 million rows, this information is populated and accessible through various frontends. including C library for efficient repetitive access. Tcl/TK GUIs for configuration editors and to browser. HTML/PHP pages for the logbook, and command line tools for scripting and expert debugging. Exhaustive hardware benchmarks have been conducted to select the appropriate database server architecture. Secure access from private and general networks was implemented. Ad-hoc monitoring, and backup mechanisms have been designed and deployed. We discuss the implementation of these complex databases and how the inhomogeneous requirements have been addressed. We also review the performance analysis outcome after more than one year in production and show results of data mining from this central information source." }