OMR-Research.bib

@inproceedings{Achankunju2018,
  author = {Achankunju, Sanu Pulimootil},
  booktitle = {1st International Workshop on Reading Music Systems},
  title = {Music Search Engine from Noisy {OMR} Data},
  year = {2018},
  address = {Paris, France},
  editor = {Calvo-Zaragoza, Jorge and Haji{\v{c}} jr., Jan and Pacha, Alexander},
  pages = {23--24},
  file = {:pdfs/2018 - Music Search Engine from Noisy OMR Data.pdf:PDF},
  url = {https://sites.google.com/view/worms2018/proceedings}
}
@inproceedings{Adamska2015,
  author = {Adamska, Julia and Piecuch, Mateusz and Podg{\'o}rski, Mateusz and Walkiewicz, Piotr and Lukasik, Ewa},
  booktitle = {Computer Information Systems and Industrial Management},
  title = {Mobile System for Optical Music Recognition and Music Sound Generation},
  year = {2015},
  address = {Cham},
  editor = {Saeed, Khalid and Homenda, Wladyslaw},
  pages = {571--582},
  publisher = {Springer International Publishing},
  abstract = {The paper presents a mobile system for generating a melody based on a photo of a musical score. The client-server architecture was applied. The client role is designated to a mobile application responsible for taking a photo of a score, sending it to the server for further processing and playing mp3 file received from the server. The server role is to recognize notes from the image, generate mp3 file and send it to the client application. The key element of the system is the program realizing the algorithm of notes recognition. It is based on the decision trees and characteristics of the individual symbols extracted from the image. The system is implemented in the Windows Phone 8 framework and uses a cloud operating system Microsoft Azure. It enables easy archivization of photos, recognized notes in the Music XML format and generated mp3 files. An easy transition to other mobile operating systems is possible as well as processing multiple music collections scans.},
  affiliation = {Institute of Computing Science, Poznan University of Technology, Poznań, Poland},
  author_keywords = {Mobile applications; Omr; Optical music recognition; Windows phone},
  doi = {10.1007/978-3-319-24369-6_48},
  file = {:pdfs/2015 - Mobile System for Optical Music Recognition.pdf:PDF},
  isbn = {978-3-319-24369-6}
}
@techreport{AlfaroContreras2020,
  author = {Alfaro-Contreras, Mar{\'{i}}a and Calvo-Zaragoza, Jorge and I{\~{n}}esta, Jos{\'{e}} M.},
  institution = {Departamento de Lenguajes y Sistemas Informáticos, Universidad de Alicante, Spain},
  title = {Reconocimiento hol{\'{i}}stico de partituras musicales},
  year = {2020},
  file = {:pdfs/2020 - Reconocimiento Holistico De Partituras Musicales.pdf:PDF},
  language = {Spanish},
  url = {https://rua.ua.es/dspace/bitstream/10045/108270/1/Reconocimiento_holistico_de_partituras_musicales.pdf}
}
@inproceedings{AlfaroContreras2021,
  author = {Alfaro-Contreras, Mar\'{i}a and Valero-Mas, Jose J. and I{\~{n}}esta, Jos{\'e} Manuel},
  booktitle = {Proceedings of the 3rd International Workshop on Reading Music Systems},
  title = {Neural architectures for exploiting the components of Agnostic Notation in Optical Music Recognition},
  year = {2021},
  address = {Alicante, Spain},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander},
  pages = {33--37},
  file = {:pdfs/2021 - Neural Architectures for Exploiting the Components of Agnostic Notation in Optical Music Recognition.pdf:PDF},
  url = {https://sites.google.com/view/worms2021/proceedings}
}
@inproceedings{AlfaroContreras2023,
  author = {Alfaro-Contreras, Mar\'{i}a},
  booktitle = {Proceedings of the 5th International Workshop on Reading Music Systems},
  title = {Few-Shot Music Symbol Classification via Self-Supervised Learning and Nearest Neighbor},
  year = {2023},
  address = {Milan, Italy},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {39--43},
  doi = {10.48550/arXiv.2311.04091},
  file = {:pdfs/2023 - Few Shot Music Symbol Classification Via Self Supervised Learning and Nearest Neighbor.pdf:PDF},
  url = {https://sites.google.com/view/worms2023/proceedings}
}
@article{Alirezazadeh2014,
  author = {Alirezazadeh, Fatemeh and Ahmadzadeh, Mohammad Reza},
  journal = {Journal of Advanced Computer Science \& Technology},
  title = {Effective staff line detection, restoration and removal approach for different quality of scanned handwritten music sheets},
  year = {2014},
  number = {2},
  pages = {136--142},
  volume = {3},
  doi = {10.14419/jacst.v3i2.3196},
  file = {:pdfs/2014 - Effective staff line detection, restoration and removal approach for different quality of scanned handwritte music sheets.pdf:PDF},
  publisher = {Science Publishing Corporation}
}
@inproceedings{Andronico1982,
  author = {Alfio Andronico and Alberto Ciampa},
  booktitle = {International Computer Music Conference},
  title = {On Automatic Pattern Recognition and Acquisition of Printed Music},
  year = {1982},
  address = {Venice, Italy},
  publisher = {Michigan Publishing},
  bibsource = {dblp computer science bibliography, https://dblp.org},
  biburl = {https://dblp.org/rec/bib/conf/icmc/AndronicoC82},
  file = {:pdfs/1982 - On Automatic Pattern Recognition and Acquisition of Printed Music.pdf:PDF},
  url = {http://hdl.handle.net/2027/spo.bbp2372.1982.024}
}
@inproceedings{Anquetil2000,
  author = {Anquetil, {\'E}ric and Co{\"u}asnon, Bertrand and Dambreville, Fr{\'e}d{\'e}ric},
  booktitle = {Graphics Recognition Recent Advances},
  title = {A Symbol Classifier Able to Reject Wrong Shapes for Document Recognition Systems},
  year = {2000},
  address = {Berlin, Heidelberg},
  editor = {Chhabra, Atul K. and Dori, Dov},
  pages = {209--218},
  publisher = {Springer Berlin Heidelberg},
  abstract = {We propose in this paper a new framework to develop a transparent classifier able to deal with reject notions. The generated classifier can be characterized by a strong reliability without loosing good properties in generalization. We show on a musical scores recognition system that this classifier is very well suited to develop a complete document recognition system. Indeed this classifier allows them firstly to extract known symbols in a document (text for example) and secondly to validate segmentation hypotheses. Tests had been successfully performed on musical and digit symbols databases.},
  file = {:pdfs/2000 - A Symbol Classifier Able to Reject Wrong Shapes for Document Recognition Systems.pdf:PDF},
  isbn = {978-3-540-40953-3},
  url = {https://link.springer.com/chapter/10.1007%2F3-540-40953-X_17}
}
@inproceedings{Anstice1996,
  author = {Anstice, Jamie and Bell, Tim and Cockburn, Andy and Setchell, Martin},
  booktitle = {6th Australian Conference on Computer-Human Interaction},
  title = {The design of a pen-based musical input system},
  year = {1996},
  pages = {260--267},
  abstract = {Computerising the task of music editing can avoid a considerable amount of tedious work for musicians, particularly for tasks such as key transposition, part extraction, and layout. However the task of getting the music onto the computer can still be time consuming and is usually done with the help of bulky equipment. This paper reports on the design of a pen-based input system that uses easily-learned gestures to facilitate fast input, particularly if the system must be portable. The design is based on observations of musicians writing music by hand, and an analysis of the symbols in samples of music. A preliminary evaluation of the system is presented, and the speed is compared with the alternatives of handwriting, synthesiser keyboard input, and optical music recognition. Evaluations suggest that the gesture-based system could be approximately three times as fast as other methods of music data entry reported in the literature.},
  doi = {10.1109/OZCHI.1996.560019},
  file = {:pdfs/1996 - The Design of a Pen Based Musical Input System.pdf:PDF},
  keywords = {light pens;pen-based musical input system;music editing;key transposition;part extraction;music layout;time consuming;gesture interface;symbols;handwriting;synthesiser keyboard input;optical music recognition;music data entry;Writing;Keyboards;Proposals;Mice;Liquid crystal displays;Computer science;Handwriting recognition;Music information retrieval;Content based retrieval;Portable computers}
}
@inproceedings{Armand1993,
  author = {Armand, Jean-Pierre},
  booktitle = {2nd International Conference on Document Analysis and Recognition},
  title = {Musical score recognition: A hierarchical and recursive approach},
  year = {1993},
  pages = {906--909},
  abstract = {Musical scores for live music show specific characteristics: large format, orchestral score, bad quality of (photo) copies. Moreover such music is generally handwritten. The author addresses the music recognition problem for such scores, and show a dedicated filtering that has been developed, both for segmentation and correction of copy defects. Recognition process involves geometrical and topographical parameters evaluation. The whole process (filtering + recognition) is recursively applied on images and sub-images, in a knowledge-based way.<>},
  doi = {10.1109/ICDAR.1993.395590},
  file = {:pdfs/1993 - Musical Score Recognition_ a Hierarchical and Recursive Approach.pdf:PDF},
  keywords = {image recognition;music;geometrical evaluation;musical score recognition;hierarchical;recursive;live music show specific characteristics;large format;orchestral score;handwritten;music recognition;dedicated filtering;segmentation;correction;copy defects;topographical parameters evaluation;filtering;recognition;images;sub-images;knowledge-based;Filtering;Image segmentation;Music;Head;Image recognition;Digital filters;Morphology;EMP radiation effects;Electronic mail;Classification algorithms}
}
@misc{Audiveris,
  author = {Bitteur, Herv{\'{e}}},
  howpublished = {\url{https://github.com/audiveris}},
  title = {Audiveris},
  year = {2004},
  url = {https://github.com/audiveris}
}
@inproceedings{Baba2012,
  author = {Baba, Tetsuaki and Kikukawa, Yuya and Yoshiike, Toshiki and Suzuki, Tatsuhiko and Shoji, Rika and Kushiyama, Kumiko and Aoki, Makoto},
  booktitle = {ACM SIGGRAPH 2012 Emerging Technologies},
  title = {Gocen: A Handwritten Notational Interface for Musical Performance and Learning Music},
  year = {2012},
  address = {New York, USA},
  pages = {9--9},
  publisher = {ACM},
  acmid = {2343465},
  doi = {10.1145/2343456.2343465},
  file = {:pdfs/2012 - Gocen - A Handwritten Notational Interface for Musical Performance and Learning Music.pdf:PDF},
  isbn = {978-1-4503-1680-4}
}
@article{Bacon1988,
  author = {Bacon, Richard A. and Carter, Nicholas Paul},
  journal = {Physics Bulletin},
  title = {Recognising music automatically},
  year = {1988},
  number = {7},
  pages = {265},
  volume = {39},
  abstract = {Recognising characters typed in at a keyboard is a familiar task to most computers and one at which they excel, except that they (usually) insist on recognising what we have typed, rather than what we meant to type. A number of programs now on the market, however, go rather beyond merely recognising keystrokes on a keyboard, to actually recognising printed words on paper.},
  file = {:pdfs/1988 - Recognising Music Automatically.pdf:PDF},
  url = {http://stacks.iop.org/0031-9112/39/i=7/a=013}
}
@misc{Bainbridge1991,
  author = {Bainbridge, David},
  title = {Preliminary experiments in musical score recognition},
  year = {1991},
  address = {Edinburgh, Scotland},
  comment = {B.Sc. Thesis},
  school = {University of Edinburgh}
}
@techreport{Bainbridge1994,
  author = {Bainbridge, David},
  institution = {University of Canterbury},
  title = {A complete optical music recognition system: Looking to the future},
  year = {1994},
  file = {:pdfs/1994 - A Complete Optical Music Recognition System - Looking to the Future.pdf:PDF},
  url = {https://ir.canterbury.ac.nz/handle/10092/14874}
}
@techreport{Bainbridge1994a,
  author = {Bainbridge, David},
  institution = {Department of Computer Science, University of Canterbury},
  title = {Optical music recognition: Progress report 1},
  year = {1994},
  file = {:pdfs/1994 - Optical Music Recognition - Progress Report 1.pdf:PDF},
  url = {http://hdl.handle.net/10092/9670}
}
@article{Bainbridge1996,
  author = {Bainbridge, David and Bell, Tim},
  journal = {Australian Computer Science Communications},
  title = {An extensible optical music recognition system},
  year = {1996},
  pages = {308--317},
  volume = {18},
  booktitle = {The Nineteenth Australasian computer science conference},
  comment = {Last seen 07.04.2017},
  file = {:pdfs/1997 - An extensible Optical Music Recognition system.pdf:PDF},
  publisher = {University of Canterbury},
  url = {http://www.cs.waikato.ac.nz/~davidb/publications/acsc96/final.html}
}
@inproceedings{Bainbridge1997,
  author = {Bainbridge, David and Bell, Tim},
  booktitle = {6th International Conference on Image Processing and its Applications},
  title = {Dealing with Superimposed Objects in Optical Music Recognition},
  year = {1997},
  number = {443},
  pages = {756--760},
  abstract = {Optical music recognition ({OMR}) involves identifying musical symbols on a scanned sheet of music, and interpreting them so that the music can either be played by the computer, or put into a music editor. Applications include providing an automatic accompaniment, transposing or extracting parts for individual instruments, and performing an automated musicological analysis of the music. A key problem with music recognition, compared with character recognition, is that symbols very often overlap on the page. The most significant form of this problem is that the symbols are superimposed on a five-line staff. Although the staff provides valuable positional information, it creates ambiguity because it is difficult to determine whether a pixel would be black or white if the staff line was not there. The other main difference between music recognition and character recognition is the set of permissible symbols. In text, the alphabet size is fixed. Conversely, in music notation there is no standard "alphabet" of shapes, with composers inventing new notation where necessary, and music for particular instruments using specialised notation where appropriate. The focus of this paper is on techniques we have developed to deal with superimposed objects (6 Refs.) recognition},
  doi = {10.1049/cp:19970997},
  file = {:pdfs/1997 - Dealing with Superimposed Objects in Optical Music Recognition.pdf:PDF},
  isbn = {0 85296 692 X},
  issn = {0537-9989},
  keywords = {m, optical music recognition, superimposed objects, to classify}
}
@phdthesis{Bainbridge1997a,
  author = {Bainbridge, David},
  school = {University of Canterbury},
  title = {Extensible optical music recognition},
  year = {1997},
  file = {:pdfs/1997 - Extensible Optical Music Recognition.pdf:PDF},
  pages = {112},
  url = {http://hdl.handle.net/10092/9420}
}
@incollection{Bainbridge1997b,
  author = {Bainbridge, David and Carter, Nicholas Paul},
  booktitle = {Handbook of Character Recognition and Document Image Analysis},
  publisher = {World Scientific},
  title = {Automatic reading of music notation},
  year = {1997},
  address = {Singapore},
  editor = {Bunke, H. and Wang, P.},
  pages = {583--603},
  abstract = {The aim of Optical Music Recognition (OMR) is to convert optically scanned pages of music into a machine-readable format. In this tutorial level discussion of the topic, an historical background of work is presented, followed by a detailed explanation of the four key stages to an OMR system: stave line identification, musical object location, symbol identification, and musical understanding. The chapter also shows how recent work has addressed the issues of touching and fragmented objects—objectives that must be solved in a practical OMR system. The report concludes by discussing remaining problems, including measuring accuracy.},
  doi = {10.1142/9789812830968_0022},
  file = {:pdfs/1997 - Automatic Reading of Music Notation.pdf:PDF}
}
@inproceedings{Bainbridge1998,
  author = {Bainbridge, David and Inglis, Stuart},
  booktitle = {Data Compression Conference},
  title = {Musical image compression},
  year = {1998},
  pages = {209--218},
  abstract = {Optical music recognition aims to convert the vast repositories of sheet music in the world into an on-line digital format. In the near future it will be possible to assimilate music into digital libraries and users will be able to perform searches based on a sung melody in addition to typical text-based searching. An important requirement for such a system is the ability to reproduce the original score as accurately as possible. Due to the huge amount of sheet music available, the efficient storage of musical images is an important topic of study. This paper investigates whether the "knowledge" extracted from the optical music recognition (OMR) process can be exploited to gain higher compression than the JBIG international standard for bi-level image compression. We present a hybrid approach where the primitive shapes of music extracted by the optical music recognition process-note heads, note stems, staff lines and so forth-are fed into a graphical symbol based compression scheme originally designed for images containing mainly printed text. Using this hybrid approach the average compression rate for a single page is improved by 3.5% over JBIG. When multiple pages with similar typography are processed in sequence, the file size is decreased by 4-8%. The relevant background to both optical music recognition and textual image compression is presented. Experiments performed on 66 test images are described, outlining the combinations of parameters that were examined to give the best results.},
  doi = {10.1109/DCC.1998.672149},
  file = {:pdfs/1998 - Musical Image Compression.pdf:PDF},
  issn = {1068-0314},
  keywords = {music;optical character recognition;image coding;data compression;musical image compression;optical music recognition;sheet music;on-line digital format;digital libraries;text-based searching;music score;musical image storage;JBIG international standard;bi-level image compression;hybrid approach;primitive shapes;note heads;note stems;staff lines;graphical symbol based compression;printed text;average compression rate;file size;textual image compression;experiments;Image coding;Image recognition;Image storage;Image converters;Software libraries;Ordinary magnetoresistance;Shape;Text recognition;Head;Optical design}
}
@inproceedings{Bainbridge1999,
  author = {Bainbridge, David and Wijaya, K.},
  booktitle = {7th International Conference on Image Processing and its Applications},
  title = {Bulk processing of optically scanned music},
  year = {1999},
  pages = {474--478},
  publisher = {Institution of Engineering and Technology},
  abstract = {For many years now optical music recognition (OMR) has been advocated as the leading methodology for transferring the vast repositories of music notation from paper to digital database. Other techniques exist for acquiring music on-line; however, these methods require operators with musical and computer skills. The notion, therefore, of an entirely automated process through OMR is highly attractive. It has been an active area of research since its inception in 1966 (Pruslin), and even though there has been the development of many systems with impressively high accuracy rates it is surprising to note that there is little evidence of large collections being processed with the technology-work by Carter (1994) and Bainbridge and Carter (1997) being the only known notable exception. This paper outlines some of the insights gained, and algorithms implemented, through the practical experience of converting collections in excess of 400 pages. In doing so, the work demonstrates that there are additional factors not currently considered by other research centres that are necessary for OMR to reach its full potential.},
  affiliation = {Waikato Univ., Hamilton},
  doi = {10.1049/cp:19990367},
  keywords = {OMR;optical music recognition;optically scanned music;bulk processing;music notation;digital database;},
  url = {http://digital-library.theiet.org/content/conferences/10.1049/cp_19990367}
}
@article{Bainbridge2001,
  author = {Bainbridge, David and Bell, Tim},
  journal = {Computers and the Humanities},
  title = {The Challenge of Optical Music Recognition},
  year = {2001},
  issn = {1572-8412},
  number = {2},
  pages = {95--121},
  volume = {35},
  abstract = {This article describes the challenges posed by optical musicrecognition
	-- a topic in computer science that aims to convert scannedpages
	of music into an on-line format. First, the problem is described;then
	a generalised framework for software is presented that emphasises
	keystages that must be solved: staff line identification, musical
	objectlocation, musical feature classification, and musical semantics.
	Next,significant research projects in the area are reviewed, showing
	how eachfits the generalised framework. The article concludes by
	discussingperhaps the most open question in the field: how to compare
	the accuracy and success of rival systems, highlighting certain steps
	thathelp ease the task.},
  doi = {10.1023/A:1002485918032},
  file = {:pdfs/2001 - The challenge of optical music recognition.pdf:PDF},
  isbn = {0010-4817},
  keywords = {document image analysis, musical data acquisition, optical music recognition, pattern, to classify}
}
@inproceedings{Bainbridge2001a,
  author = {Bainbridge, David and Bernbom, Gerry and Davidson, Mary Wallace and Dillon, Andrew P. and Dovey, Matthey and Dunn, Jon W. and Fingerhut, Michael and Fujinaga, Ichiro and Isaacson, Eric J.},
  booktitle = {1st ACM/IEEE-CS Joint Conference on Digital Libraries},
  title = {Digital Music Libraries --- Research and Development},
  year = {2001},
  address = {Roanoke, Virginia, USA},
  pages = {446--448},
  doi = {10.1145/379437.379765},
  file = {:pdfs/2001 - Digital Music Libraries Research and Development.pdf:PDF}
}
@article{Bainbridge2003,
  author = {Bainbridge, David and Bell, Tim},
  journal = {Software: Practice and Experience},
  title = {A music notation construction engine for optical music recognition},
  year = {2003},
  issn = {1097-024X},
  number = {2},
  pages = {173--200},
  volume = {33},
  abstract = {Optical music recognition (OMR) systems are used to convert music scanned from paper into a format suitable for playing or editing on a computer. These systems generally have two phases: recognizing the graphical symbols (such as note-heads and lines) and determining the musical meaning and relationships of the symbols (such as the pitch and rhythm of the notes). In this paper we explore the second phase and give a two-step approach that admits an economical representation of the parsing rules for the system. The approach is flexible and allows the system to be extended to new notations with little effort—the current system can parse common music notation, Sacred Harp notation and plainsong. It is based on a string grammar and a customizable graph that specifies relationships between musical objects. We observe that this graph can be related to printing as well as recognizing music notation, bringing the opportunity for cross-fertilization between the two areas of research. Copyright © 2003 John Wiley & Sons, Ltd.},
  doi = {10.1002/spe.502},
  file = {:pdfs/2003 - A music notation construction engine for optical music recognition - Bainbridge and Bell.pdf:PDF},
  keywords = {optical music recognition, music notation construction, definite clause grammars, graph traversal},
  publisher = {John Wiley \& Sons, Ltd.}
}
@inproceedings{Bainbridge2006,
  author = {Bainbridge, David and Bell, Tim},
  booktitle = {7th International Conference on Music Information Retrieval},
  title = {Identifying music documents in a collection of images},
  year = {2006},
  address = {Victoria, Canada},
  pages = {47--52},
  abstract = {Digital libraries and search engines are now well-equipped to find images of documents based on queries. Many images of music scores are now available, often mixed up with textual documents and images. For example, using the Google “images” search feature, a search for “Beethoven” will return a number of scores and manuscripts as well as pictures of the composer. In this paper we report on an investigation into methods to mechanically determine if a particular document is indeed a score, so that the user can specify that only musical scores should be returned. The goal is to find a minimal set of features that can be used as a quick test that will be applied to large numbers of documents.
A variety of filters were considered, and two promising ones (run-length ratios and Hough transform) were evaluated. We found that a method based around run-lengths in vertical scans (RL) that out-performs a comparable algorithm using the Hough transform (HT). On a test set of 1030 images, RL achieved recall and precision of 97.8% and 88.4% respectively while HT achieved 97.8% and 73.5%. In terms of processor time, RL was more than five times as fast as HT.},
  file = {:pdfs/2006 - Identifying Music Documents in a Collection of Images.pdf:PDF},
  url = {http://hdl.handle.net/10092/141}
}
@inproceedings{Bainbridge2014,
  author = {Bainbridge, David and Hu, Xiao and Downie, J. Stephen},
  booktitle = {1st International Workshop on Digital Libraries for Musicology},
  title = {A Musical Progression with Greenstone: How Music Content Analysis and Linked Data is Helping Redefine the Boundaries to a Music Digital Library},
  year = {2014},
  publisher = {Association for Computing Machinery},
  abstract = {Despite the recasting of the web's technical capabilities through Web 2.0, conventional digital library software architectures-from which many of our leading Music Digital Libraries (MDLs) are formed-result in digital resources that are, surprisingly, disconnected from other online sources of information, and embody a "read-only" mindset. Leveraging from Music Information Retrieval (MIR) techniques and Linked Open Data (LOD), in this paper we demonstrate a new form of music digital library that encompasses management, discovery, delivery, and analysis of the musical content it contains. Utilizing open source tools such as Greenstone, audioDB, Meandre, and Apache Jena we present a series of transformations to a musical digital library sourced from audio files that steadily increases the level of support provided to the user for musicological study. While the seed for this work was motivated by better supporting musicologists in a digital library, the developed software architecture alters the boundaries to what is conventionally thought of as a digital library- and in doing so challenges core assumptions made in mainstream digital library software design. Copyright 2014 ACM.},
  affiliation = {University of Waikato, Hamilton, New Zealand; University of Hong Kong, Hong Kong; University of Illinois, Urbana-Champaign, IL, United States},
  doi = {10.1145/2660168.2660170},
  file = {:pdfs/2014 - A Musical Progression with Greenstone.pdf:PDF},
  isbn = {9781450330022},
  keywords = {Audio acoustics; Open source software; Software architecture; Software design, Digital music libraries; Embedded workflow; Linked open data (LOD); Music content analysis; Music digital libraries; Music information retrieval; Musicology analysis; Technical capabilities, Digital libraries}
}
@inproceedings{Balke2015,
  author = {Balke, Stefan and Achankunju, Sanu Pulimootil and M{\"{u}}ller, Meinard},
  booktitle = {International Conference on Acoustics, Speech and Signal Processing},
  title = {Matching Musical Themes Based on Noisy {OCR} and {OMR} Input},
  year = {2015},
  pages = {703--707},
  publisher = {Institute of Electrical and Electronics Engineers Inc.},
  abstract = {In the year 1948, Barlow and Morgenstern published the book 'A Dictionary of Musical Themes', which contains 9803 important musical themes from the Western classical music literature. In this paper, we deal with the problem of automatically matching these themes to other digitally available sources. To this end, we introduce a processing pipeline that automatically extracts from the scanned pages of the printed book textual metadata using Optical Character Recognition (OCR) as well as symbolic note information using Optical Music Recognition (OMR). Due to the poor printing quality of the book, the OCR and OMR results are quite noisy containing numerous extraction errors. As one main contribution, we adjust alignment techniques for matching musical themes based on the OCR and OMR input. In particular, we show how the matching quality can be substantially improved by fusing the OCR- and OMR-based matching results. Finally, we report on our experiments within the challenging Barlow and Morgenstern scenario, which also indicates the potential of our techniques when considering other sources of musical themes such as digital music archives and the world wide web.},
  affiliation = {International Audio Laboratories Erlangen, Friedrich-Alexander-Universität (FAU), Germany},
  doi = {10.1109/ICASSP.2015.7178060},
  file = {:pdfs/2015 - Matching Musical Themes Based on Noisy OCR and OMR Input.pdf:PDF},
  isbn = {9781467369978},
  issn = {1520-6149},
  keywords = {others}
}
@article{Balke2018,
  author = {Balke, Stefan and Dittmar, Christian and Abe{\ss}er, Jakob and Frieler, Klaus and Pfleiderer, Martin and M{\"{u}}ller, Meinard},
  journal = {Frontiers in Digital Humanities},
  title = {Bridging the Gap: Enriching YouTube Videos with Jazz Music Annotations},
  year = {2018},
  issn = {2297-2668},
  pages = {1--11},
  volume = {5},
  abstract = {Web services allow permanent access to music from all over the world. Especially in the case of web services with user-supplied content, e.g., YouTube(TM), the available metadata is often incomplete or erroneous. On the other hand, a vast amount of high-quality and musically relevant metadata has been annotated in research areas such as Music Information Retrieval (MIR). Although they have great potential, these musical annotations are ofter inaccessible to users outside the academic world. With our contribution, we want to bridge this gap by enriching publicly available multimedia content with musical annotations available in research corpora, while maintaining easy access to the underlying data. Our web-based tools offer researchers and music lovers novel possibilities to interact with and navigate through the content. In this paper, we consider a research corpus called the Weimar Jazz Database (WJD) as an illustrating example scenario. The WJD contains various annotations related to famous jazz solos. First, we establish a link between the WJD annotations and corresponding YouTube videos employing existing retrieval techniques. With these techniques, we were able to identify 988 corresponding YouTube videos for 329 solos out of 456 solos contained in the WJD. We then embed the retrieved videos in a recently developed web-based platform and enrich the videos with solo transcriptions that are part of the WJD. Furthermore, we integrate publicly available data resources from the Semantic Web in order to extend the presented information, for example, with a detailed discography or artists-related information. Our contribution illustrates the potential of modern web-based technologies for the digital humanities, and novel ways for improving access and interaction with digitized multimedia content.},
  doi = {10.3389/fdigh.2018.00001},
  file = {:pdfs/2018 - Bridging the Gap - Enrichting YouTube Videos with Jazz Music Annotations.pdf:PDF}
}
@inproceedings{Baro2016,
  author = {Bar{\'{o}}, Arnau and Riba, Pau and Forn{\'{e}}s, Alicia},
  booktitle = {15th International Conference on Frontiers in Handwriting Recognition},
  title = {Towards the recognition of compound music notes in handwritten music scores},
  year = {2016},
  pages = {465--470},
  publisher = {Institute of Electrical and Electronics Engineers Inc.},
  abstract = {The recognition of handwritten music scores still remains an open
	problem. The existing approaches can only deal with very simple handwritten
	scores mainly because of the variability in the handwriting style
	and the variability in the composition of groups of music notes (i.e.
	compound music notes). In this work we focus on this second problem
	and propose a method based on perceptual grouping for the recognition
	of compound music notes. Our method has been tested using several
	handwritten music scores of the CVC-MUSCIMA database and compared
	with a commercial Optical Music Recognition (OMR) software. Given
	that our method is learning-free, the obtained results are promising.},
  affiliation = {Computer Vision Center, Dept. of Computer Science, Universitat Autònoma de Barcelona, Bellaterra, Catalonia, Spain},
  author_keywords = {Hand-drawn symbol recognition; Handwritten music scores; Optical music recognition; Perceptual grouping},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/icfhr/BaroRF16},
  doi = {10.1109/ICFHR.2016.0092},
  file = {:pdfs/2016 - Towards the recognition of compound music notes in handwritten music scores.pdf:PDF},
  isbn = {9781509009817},
  issn = {2167-6445},
  keywords = {Pattern recognition; Software testing, Hand-drawn symbols; Handwriting Styles; Music notes; Music scores; Optical music recognition; Perceptual grouping, Character recognition}
}
@inproceedings{Baro2017,
  author = {Bar{\'o}, Arnau and Riba, Pau and Calvo-Zaragoza, Jorge and Forn{\'e}s, Alicia},
  booktitle = {14th International Conference on Document Analysis and Recognition},
  title = {Optical Music Recognition by Recurrent Neural Networks},
  year = {2017},
  address = {Kyoto, Japan},
  organization = {IEEE},
  pages = {25--26},
  doi = {10.1109/ICDAR.2017.260},
  file = {:pdfs/2017 - Optical Music Recognition by Recurrent Neural Networks.pdf:PDF},
  issn = {2379-2140}
}
@inproceedings{Baro2018,
  author = {Bar{\'{o}}, Arnau and Riba, Pau and Forn{\'{e}}s, Alicia},
  booktitle = {1st International Workshop on Reading Music Systems},
  title = {A Starting Point for Handwritten Music Recognition},
  year = {2018},
  address = {Paris, France},
  editor = {Calvo-Zaragoza, Jorge and Haji{\v{c}} jr., Jan and Pacha, Alexander},
  pages = {5--6},
  file = {:pdfs/2018 - A Starting Point for Handwritten Music Recognition.pdf:PDF},
  url = {https://sites.google.com/view/worms2018/proceedings}
}
@article{Baro2019,
  author = {Bar{\'{o}}, Arnau and Riba, Pau and Calvo-Zaragoza, Jorge and Forn{\'{e}}s, Alicia},
  journal = {Pattern Recognition Letters},
  title = {From Optical Music Recognition to Handwritten Music Recognition: A baseline},
  year = {2019},
  issn = {0167-8655},
  pages = {1--8},
  volume = {123},
  abstract = {Optical Music Recognition (OMR) is the branch of document image analysis that aims to convert images of musical scores into a computer-readable format. Despite decades of research, the recognition of handwritten music scores, concretely the Western notation, is still an open problem, and the few existing works only focus on a specific stage of OMR. In this work, we propose a full Handwritten Music Recognition (HMR) system based on Convolutional Recurrent Neural Networks, data augmentation and transfer learning, that can serve as a baseline for the research community.},
  doi = {https://doi.org/10.1016/j.patrec.2019.02.029},
  file = {:pdfs/2019 - From Optical Music Recognition to Handwritten Music Recognition_ a Baseline.pdf:PDF},
  keywords = {Optical music recognition, Handwritten music recognition, Document image analysis and recognition, Deep neural networks, LSTM},
  url = {http://www.sciencedirect.com/science/article/pii/S0167865518303386}
}
@inproceedings{Baro2021,
  author = {Bar\'{o}, Arnau and Badal, Carles and Torras, Pau and Forn\'{e}s, Alicia},
  booktitle = {Proceedings of the 3rd International Workshop on Reading Music Systems},
  title = {Handwritten Historical Music Recognition through Sequence-to-Sequence with Attention Mechanism},
  year = {2021},
  address = {Alicante, Spain},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander},
  pages = {55--59},
  file = {:pdfs/2021 - Handwritten Historical Music Recognition through Sequence to Sequence with Attention Mechanism.pdf:PDF},
  url = {https://sites.google.com/view/worms2021/proceedings}
}
@mastersthesis{Baro-Mas2017,
  author = {Bar{\'{o}}-Mas, Arnau},
  school = {Universitat Aut{\`{o}}noma de Barcelona},
  title = {Optical Music Recognition by Long Short-Term Memory Recurrent Neural Networks},
  year = {2017},
  file = {:pdfs/2017 - Optical Music Recognition by Long Short-Term memory Recurrent Neural Networks.pdf:PDF},
  pages = {1--20},
  url = {http://www.cvc.uab.es/people/afornes/students/Master_ABaro2017.pdf}
}
@inproceedings{Barton2002,
  author = {Barton, Louis W. G.},
  booktitle = {2nd International Conference on Web Delivering of Music},
  title = {The {NEUMES} Project: digital transcription of medieval chant manuscripts},
  year = {2002},
  pages = {211--218},
  abstract = {This paper introduces the NEUMES Project from a top-down perspective. The purpose of the project is to design a software infrastructure for digital transcription of medieval chant manuscripts, such that transcriptions can be interoperable across many types of applications programs. Existing software for modern music does not provide an effective solution. A distributed library of chant document resources for the Web is proposed, to encompass photographic images, transcriptions, and searchable databases of manuscript descriptions. The NEUMES encoding scheme for chant transcription is presented, with NeumesXML serving as a 'wrapper' for transmission, storage, and editorial markup of transcription data. A scenario of use is given and future directions for the project are briefly discussed.},
  doi = {10.1109/WDM.2002.1176213},
  file = {:pdfs/2002 - The NEUMES Project_ Digital Transcription of Medieval Chant Manuscripts.pdf:PDF},
  keywords = {music;hypermedia markup languages;data structures;history;open systems;Internet;NEUMES Project;software infrastructure;digital transcription;medieval chant manuscripts;interoperability;distributed library;chant document resources;Web;photographic images;searchable databases;encoding scheme;NeumesXML;editorial markup;storage;transmission;Writing;Books;Libraries;Inspection;Encoding;Uncertainty;Shape;Scholarships;Lighting}
}
@inproceedings{Barton2005,
  author = {Barton, Louis W. G. and Caldwell, John A. and Jeavons, Peter G.},
  booktitle = {5th ACM/IEEE-CS Joint Conference on Digital Libraries},
  title = {E-library of Medieval Chant Manuscript Transcriptions},
  year = {2005},
  address = {Denver, CO, USA},
  pages = {320--329},
  publisher = {ACM},
  acmid = {1065458},
  doi = {10.1145/1065385.1065458},
  file = {:pdfs/2005 - E Library of Medieval Chant Manuscript Transcriptions.pdf:PDF},
  isbn = {1-58113-876-8},
  keywords = {XML, chant, comparison, data representation, digital libraries, medieval manuscripts, musical notation, search, transcription}
}
@incollection{Baumann1992,
  author = {Baumann, Stephan and Dengel, Andreas},
  booktitle = {Advances in Structural and Syntactic Pattern Recognition},
  publisher = {World Scientific},
  title = {Transforming Printed Piano Music into {MIDI}},
  year = {1992},
  pages = {363--372},
  abstract = {This paper decribes a recognition system for transforming printed piano music into the international standard MIDI for acoustic output generation. Because of the system is adapted for processing musical scores, it follows a top-down strategy in order to take advantage of the hierarchical structuring. Applying a decision tree classifier and various musical rules, the system comes up with a recognition rate of 80 to 100\% depending on the musical complexity of the input. The resulting symbolic representation in terms of so called MIDI-EVENTs can be easily understood by musical devices such as synthesizers, expanders, or keyboards.},
  doi = {10.1142/9789812797919_0030},
  file = {:pdfs/1992 - Transforming Printed Piano Music into MIDI.pdf:PDF}
}
@techreport{Baumann1993,
  author = {Baumann, Stephan},
  institution = {Deutsches Forschungszentrum für Künstliche Intelligenz GmbH},
  title = {Document recognition of printed scores and transformation into {MIDI}},
  year = {1993},
  doi = {10.22028/D291-24925},
  file = {:pdfs/1993 - Document Recognition of Printed Scores and Transformation into MIDI.pdf:PDF}
}
@inproceedings{Baumann1995,
  author = {Baumann, Stephan},
  booktitle = {3rd International Conference on Document Analysis and Recognition},
  title = {A Simplified Attributed Graph Grammar for High-Level Music Recognition},
  year = {1995},
  pages = {1080--1083},
  publisher = {IEEE},
  doi = {10.1109/ICDAR.1995.602096},
  file = {:pdfs/1995 - A Simplified Attributed Graph Grammar for High-Level Music Recognition.pdf:PDF},
  isbn = {0-8186-7128-9}
}
@inproceedings{Baumann1995a,
  author = {Baumann, Stephan and Tombre, Karl},
  booktitle = {Document Analysis Systems},
  title = {Report of the line drawing and music recognition working group},
  year = {1995},
  editor = {Spitz, A. Lawrence and Dengel, Andreas},
  pages = {1080--1083},
  doi = {10.1142/9789812797933}
}
@inproceedings{Bellini2001,
  author = {Bellini, Pierfrancesco and Bruno, Ivan and Nesi, Paolo},
  booktitle = {1st International Conference on WEB Delivering of Music},
  title = {Optical music sheet segmentation},
  year = {2001},
  pages = {183--190},
  publisher = {Institute of Electrical {\&} Electronics Engineers ({IEEE})},
  abstract = {The optical music recognition problem has been addressed in several ways, obtaining suitable results only when simple music constructs are processed. The most critical phase of the optical music recognition process is the first analysis of the image sheet. The first analysis consists of segmenting the acquired sheet into smaller parts which may be processed to recognize the basic symbols. The segmentation module of the O3 {MR} system (Object Oriented Optical Music Recognition) system is presented. The proposed approach is based on the adoption of projections for the extraction of basic symbols that constitute a graphic element of the music notation. A set of examples is also included.},
  doi = {10.1109/wdm.2001.990175},
  file = {:pdfs/2001 - Optical Music Sheet Segmentation.pdf:PDF},
  groups = {recognition},
  isbn = {0769512844}
}
@incollection{Bellini2004,
  author = {Bellini, Pierfrancesco and Bruno, Ivan and Nesi, Paolo},
  booktitle = {Visual Perception of Music Notation: On-Line and Off Line Recognition},
  publisher = {IGI Global},
  title = {An Off-Line Optical Music Sheet Recognition},
  year = {2004},
  pages = {40--77},
  doi = {10.4018/978-1-59140-298-5.ch002},
  file = {:pdfs/2004 - An Off-line Optical Music Sheet Recognition.pdf:PDF},
  groups = {interpretation}
}
@article{Bellini2007,
  author = {Bellini, Pierfrancesco and Bruno, Ivan and Nesi, Paolo},
  journal = {Computer Music Journal},
  title = {Assessing Optical Music Recognition Tools},
  year = {2007},
  number = {1},
  pages = {68--93},
  volume = {31},
  doi = {10.1162/comj.2007.31.1.68},
  file = {:pdfs/2007 - Assessing Optical Music Recognition Tools.pdf:PDF},
  groups = {evaluation},
  publisher = {MIT Press}
}
@incollection{Bellini2008,
  author = {Bellini, Pierfrancesco and Bruno, Ivan and Nesi, Paolo},
  booktitle = {Interactive Multimedia Music Technologies},
  publisher = {IGI Global},
  title = {Optical Music Recognition: Architecture and Algorithms},
  year = {2008},
  address = {Hershey, PA, USA},
  editor = {Ng, Kia and Nesi, Paolo},
  pages = {80--110},
  abstract = {Optical music recognition is a key problem for coding western music sheets in the digital world. This problem has been addressed in several manners obtaining suitable results only when simple music constructs are processed. To this end, several different strategies have been followed, to pass from the simple music sheet image to a complete and consistent representation of music notation symbols (symbolic music notation or representation). Typically, image processing, pattern recognition and symbolic reconstruction are the technologies that have to be considered and applied in several manners the architecture of the so called OMR (Optical Music Recognition) systems. In this chapter, the O3MR (Object Oriented Optical Music Recognition) system is presented. It allows producing from the image of a music sheet the symbolic representation and save it in XML format (WEDELMUSIC XML and MUSICXML). The algorithms used in this process are those of the image processing, image segmentation, neural network pattern recognition, and symbolic reconstruction and reasoning. Most of the solutions can be applied in other field of image understanding. The development of the O3MR solution with all its algorithms has been partially supported by the European Commission, in the IMUTUS Research and Development project, while the related music notation editor has been partially funded by the research and development WEDELMUSIC project of the European Commission. The paper also includes a methodology for the assessment of other OMR systems. The set of metrics proposed has been used to assess the quality of results produce by the O3MR with respect the best OMR on market.},
  file = {:pdfs/2008 - Optical Music Recognition - Architecture and Algorithms - Sample.pdf:PDF},
  issn = {9781599041506},
  journal = {Interactive Multimedia Music Technologies},
  refid = {24555},
  url = {http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/978-1-59904-150-6.ch005}
}
@inproceedings{Beran1999,
  author = {Beran, Tom{\'a}{\v{s}} and Macek, Tom{\'a}{\v{s}}},
  booktitle = {Machine Learning and Data Mining in Pattern Recognition},
  title = {Recognition of Printed Music Score},
  year = {1999},
  editor = {Perner, Petra and Petrou, Maria},
  pages = {174--179},
  publisher = {Springer Berlin Heidelberg},
  abstract = {This article describes our implementation of the Optical Music Recognition System (OMR). The system implemented in our project is based on the binary neural network ADAM. ADAM has been used for recognition of music symbols. Preprocessing was implemented by conventional techniques. We decomposed the OMR process into several phases. The results of these phases are summarized.},
  doi = {10.1007/3-540-48097-8_14},
  file = {:pdfs/1999 - Recognition of Printed Music Score.pdf:PDF},
  isbn = {978-3-540-48097-6}
}
@inproceedings{Blostein1990,
  author = {Blostein, Dorothea and Haken, Lippold},
  booktitle = {10th International Conference on Pattern Recognition},
  title = {Template matching for rhythmic analysis of music keyboard input},
  year = {1990},
  pages = {767--770},
  abstract = {A system that recognizes common rhythmic patterns through template matching is described. The use of template matching gives the user the unusual ability to modify the set of templates used for analysis. This modification effects a tradeoff between the temporal accuracy required of the input and the complexity of the recognizable rhythm patterns that happen to be common in a particular piece of music. The evolving implementation of this algorithm has received heavy use over a six-year period and has proven itself as a practical and reliable input method for fast music transcription. It is concluded that templates demonstrably provide the necessary temporal context for accurate rhythm recognition.<>},
  doi = {10.1109/ICPR.1990.118213},
  file = {:pdfs/1990 - Template Matching for Rhythmic Analysis of Music Keyboard Input.pdf:PDF},
  keywords = {acoustic signal processing;computerised pattern recognition;computerised signal processing;music;rhythmic pattern recognition;music keyboard input;template matching;music transcription;Multiple signal classification;Keyboards;Timing;Pattern recognition;Music;Rhythm;Computer errors;Councils;Laboratories;Information science}
}
@article{Blostein1991,
  author = {Blostein, Dorothea and Haken, Lippold},
  journal = {Communications of the ACM},
  title = {Justification of Printed Music},
  year = {1991},
  issn = {0001-0782},
  number = {3},
  pages = {88--99},
  volume = {34},
  acmid = {102874},
  address = {New York, NY, USA},
  doi = {10.1145/102868.102874},
  file = {:pdfs/1991 - Justification of Printed Music.pdf:PDF},
  issue_date = {March 1991},
  publisher = {ACM}
}
@incollection{Blostein1992,
  author = {Blostein, Dorothea and Baird, Henry S.},
  booktitle = {Structured Document Image Analysis},
  publisher = {Springer Berlin Heidelberg},
  title = {A Critical Survey of Music Image Analysis},
  year = {1992},
  isbn = {978-3-642-77281-8},
  pages = {405--434},
  abstract = {The research literature concerning the automatic analysis of images of printed and handwritten music notation, for the period 1966 through 1990, is surveyed and critically examined.},
  doi = {10.1007/978-3-642-77281-8_19},
  file = {:pdfs/1992 - A Critical Survey of Music Image Analysis.pdf:PDF}
}
@incollection{Blostein1992a,
  author = {Blostein, Dorothea and Carter, Nicholas Paul},
  booktitle = {Structured Document Image Analysis},
  publisher = {Springer Berlin Heidelberg},
  title = {Recognition of Music Notation: SSPR'90 Working Group Report},
  year = {1992},
  isbn = {978-3-642-77281-8},
  pages = {573--574},
  abstract = {This report summarizes the discussions of the Working Group on the Recognition of Music Notation, of the IAPR 1990 Workshop on Syntactic and Structural Pattern Recognition, Murray Hill, NJ, 13--15 June 1990. The participants were: D. Blostein, N. Carter, R. Haralick, T. Itagaki, H. Kato, H. Nishida, and R. Siromoney. The discussion was moderated by Nicholas Carter and recorded by Dorothea Blostein.},
  doi = {10.1007/978-3-642-77281-8_32},
  file = {:pdfs/1992 - Recognition of Music Notation_ SSPR'90 Working Group Report.pdf:PDF},
  url = {https://doi.org/10.1007/978-3-642-77281-8_32}
}
@article{Blostein1999,
  author = {Blostein, Dorothea and Haken, Lippold},
  journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
  title = {Using diagram generation software to improve diagram recognition: a case study of music notation},
  year = {1999},
  issn = {0162-8828},
  number = {11},
  pages = {1121--1136},
  volume = {21},
  abstract = {Diagrams are widely used in society to transmit information such as circuit designs, music, mathematical formulae, architectural plans, and molecular structure. Computers must process diagrams both as images (marks on paper) and as information. A diagram recognizer translates from image to information and a diagram generator translates from information to image. Current technology for diagram generation is ahead of the technology for diagram recognition. Diagram generators have extensive knowledge of notational conventions which relate to readability and aesthetics, whereas current diagram recognizers focus on the hard constraints of the notation. To create a recognizer capable of exploiting layout information, it is expedient to reuse the expertise in existing diagram generators. In particular, we discuss the use of Lime (our editor and generator for music notation) to proofread and correct the raw output of MIDIScan (a third-party commercial recognizer for music notation). Over the past several years, this combination of software has been distributed to thousands of users.},
  doi = {10.1109/34.809106},
  file = {:pdfs/1999 - Using Diagram Generation Software to Improve Diagram Recognition_ a Case Study of Music Notation.pdf:PDF},
  keywords = {music;diagrams;document image processing;image recognition;diagram generation software;diagram recognition;music notation;notational conventions;readability;aesthetics;layout information;Lime;proofreading;correction;raw output;MIDIScan;Computer aided software engineering;Multiple signal classification;Image recognition;Mathematics;Computer errors;Error correction;Character recognition;Software systems;Circuit synthesis;Image analysis}
}
@inproceedings{Bonnici2018,
  author = {Bonnici, Alexandra and Abela, Julian and Zammit, Nicholas and Azzopardi, George},
  booktitle = {ACM Symposium on Document Engineering},
  title = {Automatic Ornament Localisation, Recognition and Expression from Music Sheets},
  year = {2018},
  address = {Halifax, NS, Canada},
  pages = {25:1--25:11},
  publisher = {ACM},
  acmid = {3209536},
  doi = {10.1145/3209280.3209536},
  file = {:pdfs/2018 - Automatic Ornament Localisation, Recognition and Expression from Music Sheets.pdf:PDF},
  isbn = {978-1-4503-5769-2},
  url = {http://doi.acm.org/10.1145/3209280.3209536}
}
@inproceedings{Bountouridis2017,
  author = {Bountouridis, Dimitrios and Wiering, Frans and Brown, Dan and Veltkamp, Remco C.},
  booktitle = {Computational Intelligence in Music, Sound, Art and Design},
  title = {Towards Polyphony Reconstruction Using Multidimensional Multiple Sequence Alignment},
  year = {2017},
  address = {Cham},
  editor = {Correia, Jo{\~a}o and Ciesielski, Vic and Liapis, Antonios},
  pages = {33--48},
  publisher = {Springer International Publishing},
  abstract = {The digitization of printed music scores through the process of optical music recognition is imperfect. In polyphonic scores, with two or more simultaneous voices, errors of duration or position can lead to badly aligned and inharmonious digital transcriptions. We adapt biological sequence analysis tools as a post-processing step to correct the alignment of voices. Our multiple sequence alignment approach works on multiple musical dimensions and we investigate the contribution of each dimension to the correct alignment. Structural information, such musical phrase boundaries, is of major importance; therefore, we propose the use of the popular bioinformatics aligner Mafft which can incorporate such information while being robust to temporal noise. Our experiments show that a harmony-aware Mafft outperforms sophisticated, multidimensional alignment approaches and can achieve near-perfect polyphony reconstruction.},
  doi = {10.1007/978-3-319-55750-2_3},
  file = {:pdfs/2017 - Towards polyhony reconstruction using multidimensional multiple sequence alignment.pdf:PDF},
  isbn = {978-3-319-55750-2}
}
@inproceedings{Bruder2003,
  author = {Bruder, Ilvio and Finger, Andreas and Heuer, Andreas and Ignatova, Temenushka},
  booktitle = {Digital Libraries: Technology and Management of Indigenous Knowledge for Global Access},
  title = {Towards a Digital Document Archive for Historical Handwritten Music Scores},
  year = {2003},
  address = {Berlin, Heidelberg},
  editor = {Sembok, Tengku Mohd Tengku and Zaman, Halimah Badioze and Chen, Hsinchun and Urs, Shalini R. and Myaeng, Sung-Hyon},
  pages = {411--414},
  publisher = {Springer Berlin Heidelberg},
  abstract = {Contemporary digital libraries and archives of music scores focus mainly on providing efficient storage and access methods for their data. However, digital archives of historical music scores can enable musicologists not only to easily store and access research material, but also to derive new knowledge from existing data. In this paper we present the first steps in building a digital archive of historical music scores from the 17th and 18th century. Along with the architectural and accessibility aspects of the system, we describe an integrated approach for classification and identification of the scribes of music scores.},
  doi = {10.1007/978-3-540-24594-0_41},
  file = {:pdfs/2003 - Towards a Digital Document Archive for Historical Handwritten Music Scores.pdf:PDF},
  isbn = {978-3-540-24594-0}
}
@inproceedings{Bugge2011,
  author = {Bugge, Esben Paul and Juncher, Kim Lundsteen and Mathiasen, Brian Soborg and Simonsen, Jakob Grue},
  booktitle = {12th International Society for Music Information Retrieval Conference},
  title = {Using Sequence Alignment and Voting To Improve Optical Music Recognition From Multiple Recognizers},
  year = {2011},
  pages = {405--410},
  abstract = {Digitalizing sheet music using Optical Music Recognition ({OMR}) is error-prone, especially when using noisy images created from scanned prints. Inspired by {DNA}-sequence alignment, we devise a method to use multiple sequence alignment to automatically compare output from multiple third party{OMR}tools and perform automatic error-correction of pitch and duration of notes. We perform tests on a corpus of 49 one-page scores of varying quality. Our method on average reduces the amount of errors from an ensemble of 4 commercial {OMR} tools. The method achieves, on average, fewer errors than each recognizer by itself, but statistical tests show that it is sig- nificantly better than only 2 of the 4 commercial recogniz- ers. The results suggest that recognizers may be improved somewhat by sequence alignment and voting, but that more elaborate methods may be needed to obtain substantial im- provements. All software, scanned music data used for testing, and experiment protocols are open source and available at: http://code.google.com/p/omr-errorcorrection/},
  file = {:pdfs/2011 - Using Sequence Alignment and Voting to Improve Optical Music Recognition from Multiple Recognizers.pdf:PDF},
  isbn = {9780615548654},
  url = {http://www.ismir2011.ismir.net/papers/PS3-9.pdf}
}
@inproceedings{Bui2014,
  author = {Bui, Hoang-Nam and Na, Iin-Seop and Kim, Soo-Hyung},
  booktitle = {22nd International Conference on Pattern Recognition},
  title = {Staff Line Removal Using Line Adjacency Graph and Staff Line Skeleton for Camera-Based Printed Music Scores},
  year = {2014},
  pages = {2787--2789},
  abstract = {On camera-based music scores, curved and uneven staff-lines tend to
	incur more frequently, and with the loss in performance of binarization
	methods, line thickness variation and space variation between lines
	are inevitable. We propose a novel and effective staff-line removal
	method based on following 3 main ideas. First, the state-of-the-art
	staff-line detection method, Stable Path, is used to extract staff-line
	skeletons of the music score. Second, a line adjacency graph (LAG)
	model is exploited in a different manner of over segmentation to
	cluster pixel runs generated from the run-length encoding (RLE) of
	the image. Third, a two-pass staff-line removal pipeline called filament
	filtering is applied to remove clusters lying on the staff-line.
	Our method shows impressive results on music score images captured
	from cameras, and gives high performance when applied to the ICDAR/GREC
	2013 database.},
  doi = {10.1109/ICPR.2014.480},
  file = {:pdfs/2014 - Staff Line Removal Using Line Adjacency Graph and Staff Line Skeleton for Camera-Based Printed Music Scores.pdf:PDF},
  issn = {1051-4651},
  keywords = {filtering theory;graph theory;image coding;image denoising;image segmentation;music;runlength codes;visual databases;ICDAR-GREC 2013 database;LAG model;binarization methods;camera-based printed music scores;cluster pixel;filament filtering;image RLE;line adjacency graph;line thickness variation;music score images;over segmentation;run-length encoding;space variation;stable path;staff line skeleton;staff-line detection method;two-pass staff-line removal pipeline;Cameras;Databases;Educational institutions;Music;Skeleton;Text analysis;line adjacency graph;music score recognition;optical music recognition;staff-line}
}
@inproceedings{Bulis1992,
  author = {Bulis, Alex and Almog, Roy and Gerner, Moti and Shimony, Uri},
  booktitle = {International Computer Music Conference},
  title = {Computerized recognition of hand-written musical notes},
  year = {1992},
  pages = {110--112},
  file = {:pdfs/1992 - Computerized recognition of hand-written musical notes.pdf:PDF},
  url = {http://hdl.handle.net/2027/spo.bbp2372.1992.029}
}
@article{Bullen2008,
  author = {Bullen, Andrew H.},
  journal = {code4lib Journal},
  title = {Bringing Sheet Music to Life: My Experiences with {OMR}},
  year = {2008},
  issn = {1940-5758},
  number = {84},
  volume = {3},
  file = {:pdfs/2008 - Bringing Sheet Music to Life - My Experiences with OMR.pdf:PDF},
  url = {http://journal.code4lib.org/articles/84}
}
@inproceedings{Burgoyne2007,
  author = {Burgoyne, John Ashley and Pugin, Laurent and Eustace, Greg and Fujinaga, Ichiro},
  booktitle = {8th International Conference on Music Information Retrieval},
  title = {A Comparative Survey of Image Binarisation Algorithms for Optical Recognition on Degraded Musical Sources},
  year = {2007},
  file = {:pdfs/2007 - A Comparative Survey of Image Binarisation Algorithms for Optical Recognition on Degraded Musical Sources.pdf:PDF},
  url = {http://ismir2007.ismir.net/proceedings/ISMIR2007_p509_burgoyne.pdf}
}
@inproceedings{Burgoyne2008,
  author = {Burgoyne, John Ashley and Devaney, Johanna and Pugin, Laurent and Fujinaga, Ichiro},
  booktitle = {9th International Conference on Music Information Retrieval},
  title = {Enhanced Bleedthrough Correction for Early Music Documents with Recto-Verso Registration},
  year = {2008},
  address = {Philadelphia, PA},
  pages = {407--412},
  file = {:pdfs/2008 - Enhanced Bleedthrough Correction for Early Music Documents with Recto Verso Registration.pdf:PDF},
  url = {http://www.ismir2008.ismir.net/papers/ISMIR2008_221.pdf}
}
@inproceedings{Burgoyne2009,
  author = {Burgoyne, John Ashley and Ouyang, Yue and Himmelman, Tristan and Devaney, Johanna and Pugin, Laurent and Fujinaga, Ichiro},
  booktitle = {10th International Society for Music Information Retrieval Conference},
  title = {Lyric Extraction and Recognition on Digital Images of Early Music Sources},
  year = {2009},
  address = {Kobe, Japan},
  pages = {723--727},
  file = {:pdfs/2009 - Lyric Extraction and Recognition on Digital Images of Early Music Sources.pdf:PDF},
  url = {http://ismir2009.ismir.net/proceedings/OS8-3.pdf}
}
@incollection{Burgoyne2015,
  author = {Burgoyne, John Ashley and Fujinaga, Ichiro and Downie, J. Stephen},
  booktitle = {A New Companion to Digital Humanities},
  publisher = {Wiley Blackwell},
  title = {Music Information Retrieval},
  year = {2015},
  editor = {Schreibman, Susan and Siemens, Ray and Unsworth, John},
  isbn = {9781118680605},
  pages = {213--228},
  abstract = {Music information retrieval (MIR) is "a multidisciplinary research
	endeavor that strives to develop innovative content-based searching
	schemes, novel interfaces, and evolving networked delivery mechanisms
	in an effort to make the world's vast store of music accessible to
	all." MIR was born from computational musicology in the 1960s and
	has since grown to have links with music cognition and audio engineering,
	a dedicated annual conference (ISMIR) and an annual evaluation campaign
	(MIREX). MIR combines machine learning with expert human knowledge
	to use digital music data - images of music scores, "symbolic" data
	such as MIDI files, audio, and metadata about musical items - for
	information retrieval, classification and estimation, or sequence
	labeling. This chapter gives a brief history of MIR, introduces classical
	MIR tasks from optical music recognition to music recommendation
	systems, and outlines some of the key questions and directions for
	future developments in MIR. © 2016 John Wiley & Sons, Ltd.},
  affiliation = {Music Cognition Group, University of Amsterdam, Netherlands; Schulich School of Music, McGill University, Canada; Graduate School of Library and Information Science, University of Illinois, United States},
  author_keywords = {Audio engineering; Classification; Computational musicology; Evaluation; ISMIR; Machine learning; MIREX; Music cognition; Music information retrieval (MIR); Sequence labeling},
  correspondence_address1 = {Burgoyne, J.A.; Music Cognition Group, University of AmsterdamNetherlands},
  doi = {10.1002/9781118680605.ch15},
  file = {:pdfs/2015 - Music Information Retrieval.pdf:PDF},
  journal = {A New Companion to Digital Humanities}
}
@inproceedings{Burlet2012,
  author = {Burlet, Gregory and Porter, Alastair and Hankinson, Andrew and Fujinaga, Ichiro},
  booktitle = {13th International Society for Music Information Retrieval Conference},
  title = {Neon.js: Neume Editor Online},
  year = {2012},
  address = {Porto, Portugal},
  pages = {121--126},
  file = {:pdfs/2012 - Neon.js - Neume Editor Online.pdf:PDF},
  url = {http://ismir2012.ismir.net/event/papers/121_ISMIR_2012.pdf}
}
@phdthesis{Byrd1984,
  author = {Byrd, Donald},
  school = {Indiana University},
  title = {Music Notation by Computer},
  year = {1984},
  file = {:pdfs/1984 - Music Notation by Computer.pdf:PDF},
  keywords = {music notation},
  url = {https://dl.acm.org/citation.cfm?id=911809}
}
@article{Byrd2003,
  author = {Byrd, Donald and Isaacson, Eric},
  journal = {Computer Music Journal},
  title = {A Music Representation Requirement Specification for Academia},
  year = {2003},
  issn = {01489267, 15315169},
  number = {4},
  pages = {43--57},
  volume = {27},
  file = {:pdfs/2003 - A Music Representation Requirement Specification for Academia.pdf:PDF},
  publisher = {The MIT Press},
  url = {http://www.jstor.org/stable/3681900}
}
@inproceedings{Byrd2006,
  author = {Byrd, Donald and Schindele, Megan},
  booktitle = {7th International Conference on Music Information Retrieval},
  title = {Prospects for Improving {OMR} with Multiple Recognizers},
  year = {2006},
  pages = {41--46},
  file = {:pdfs/2006 - Prospects for Improving OMR with Multiple Recognizers.pdf:PDF},
  isbn = {1-55058-349-2},
  keywords = {classifier, omr, optical music recognition, to classify},
  url = {http://ismir2006.ismir.net/PAPERS/ISMIR06155_Paper.pdf}
}
@inproceedings{Byrd2009,
  author = {Byrd, Donald},
  booktitle = {Knowledge representation for intelligent music processing},
  title = {Studying Music is Difficult and Important: Challenges of Music Knowledge Representation},
  year = {2009},
  address = {Wadern, Germany},
  editor = {Eleanor Selfridge-Field and Frans Wiering and Geraint A. Wiggins},
  number = {09051},
  organization = {Leibniz-Center for Informatics},
  publisher = {Schloss Dagstuhl - Leibniz-Zentrum fuer Informatik, Germany},
  series = {Dagstuhl Seminar Proceedings},
  file = {:pdfs/2009 - Studying Music Is Difficult and Important - Challenges of Music Knowledge Representation.pdf:PDF},
  issn = {1862-4405},
  url = {http://drops.dagstuhl.de/opus/volltexte/2009/1987}
}
@techreport{Byrd2010,
  author = {Byrd, Donald and Guerin, William and Schindele, Megan and Knopke, Ian},
  institution = {Indiana University},
  title = {{OMR} Evaluation and Prospects for Improved {OMR} via Multiple Recognizers},
  year = {2010},
  address = {Bloomington, IN, USA},
  file = {:pdfs/2010 - OMR evaluation and prospects for improved OMR via multiple recognizers.pdf:PDF},
  publisher = {Indiana University},
  url = {http://homes.soic.indiana.edu/donbyrd/MROMR2010Pap/OMREvaluation+Prospects4MROMR.doc}
}
@article{Byrd2015,
  author = {Byrd, Donald and Simonsen, Jakob Grue},
  journal = {Journal of New Music Research},
  title = {Towards a Standard Testbed for Optical Music Recognition: Definitions, Metrics, and Page Images},
  year = {2015},
  issn = {0929-8215},
  number = {3},
  pages = {169--195},
  volume = {44},
  abstract = {We posit that progress in Optical Music Recognition (OMR) has been held up for years by the absence of anything resembling the standard testbeds in use in other fields that face difficult evaluation problems. One example of such a field is text information retrieval (IR), where the Text Retrieval Conference (TREC) has annually-renewed IR tasks with accompanying data sets. In music informatics, the Music Information Retrieval Exchange (MIREX), with its annual tests and meetings held during the ISMIR conference, is a close analog to TREC; but MIREX has never had an OMR track or a collection of music such a track could employ. We describe why the absence of an OMR testbed is a problem and how this problem may be mitigated. To aid in the establishment of a standard testbed, we provide (1) a set of definitions for the complexity of music notation; (2) a set of performance metrics for OMR tools that gauge score complexity and graphical quality; and (3) a small corpus of music for use as a baseline for a proper OMR testbed.},
  affiliation = {Indiana University, United States; Department of Computer Science, University of Copenhagen (DIKU), Denmark},
  author_keywords = {empirical evaluation; notation; notation complexity; optical music recognition},
  doi = {10.1080/09298215.2015.1045424},
  file = {:pdfs/2015 - Towards a Standard Testbed for Optical Music Recognition_ Definitions, Metrics, and Page Images.pdf:PDF},
  publisher = {Taylor and Francis Ltd.}
}
@techreport{Byrd2016,
  author = {Byrd, Donald and Isaacson, Eric},
  institution = {Indiana University, Bloomington},
  title = {A Music Representation Requirement Specification for Academia},
  year = {2016},
  file = {:pdfs/2016 - A Music Representation Requirement Specification for Academia.pdf:PDF},
  url = {http://homes.sice.indiana.edu/donbyrd/Papers/MusicRepReqForAcad.doc}
}
@inproceedings{Calvo-Zaragoza2014,
  author = {Calvo-Zaragoza, Jorge and Oncina, Jose},
  booktitle = {22nd International Conference on Pattern Recognition},
  title = {Recognition of Pen-Based Music Notation: The {HOMUS} Dataset},
  year = {2014},
  pages = {3038--3043},
  publisher = {Institute of Electrical \& Electronics Engineers (IEEE)},
  abstract = {A profitable way of digitizing a new musical composition is by using a pen-based (online) system, in which the score is created with the sole effort of the composition itself. However, the development of such systems is still largely unexplored. Some studies have been carried out but the use of particular little datasets has led to avoid objective comparisons between different approaches. To solve this situation, this work presents the Handwritten Online Musical Symbols (HOMUS) dataset, which consists of 15200 samples of 32 types of musical symbols from 100 different musicians. Several alternatives of recognition for the two modalities -online, using the strokes drawn by the pen, and offline, using the image generated after drawing the symbol- are also presented. Some experiments are included aimed to draw main conclusions about the recognition of these data. It is expected that this work can establish a binding point in the field of recognition of online handwritten music notation and serve as a baseline for future developments.},
  doi = {10.1109/ICPR.2014.524},
  file = {:pdfs/2014 - Recognition of Pen-Based Music Notation - The HOMUS dataset.pdf:PDF},
  groups = {datasets},
  issn = {1051-4651},
  keywords = {handwritten character recognition;image recognition;information retrieval;light pens;music;HOMUS dataset;data recognition;handwritten online musical symbols dataset;image generation;musical composition digitization;online handwritten music notation recognition;online modality recognition;pen-based music notation recognition;symbol drawing;Error analysis;FCC;Handwriting recognition;Hidden Markov models;Kernel;Music;Support vector machines}
}
@article{Calvo-Zaragoza2015,
  author = {Calvo-Zaragoza, Jorge and Barbancho, Isabel and Tard{\'o}n, Lorenzo J. and Barbancho, Ana M.},
  journal = {Pattern Analysis and Applications},
  title = {Avoiding staff removal stage in optical music recognition:{~}application to scores written in white mensural notation},
  year = {2015},
  issn = {1433-755X},
  number = {4},
  pages = {933--943},
  volume = {18},
  abstract = {Staff detection and removal is one of the most important issues in
	optical music recognition (OMR) tasks since common approaches for
	symbol detection and classification are based on this process. Due
	to its complexity, staff detection and removal is often inaccurate,
	leading to a great number of errors in posterior stages. For this
	reason, a new approach that avoids this stage is proposed in this
	paper, which is expected to overcome these drawbacks. Our approach
	is put into practice in a case of study focused on scores written
	in white mensural notation. Symbol detection is performed by using
	the vertical projection of the staves. The cross-correlation operator
	for template matching is used at the classification stage. The goodness
	of our proposal is shown in an experiment in which our proposal attains
	an extraction rate of 96 {\%} and a classification rate of 92 {\%},
	on average. The results found have reinforced the idea of pursuing
	a new research line in OMR systems without the need of the removal
	of staff lines.},
  doi = {10.1007/s10044-014-0415-5},
  file = {:pdfs/2015 - Avoiding staff removal stage in optical music recognition - application to scores written in white mensural notation.pdf:PDF}
}
@article{Calvo-Zaragoza2015a,
  author = {Calvo-Zaragoza, Jorge and Oncina, Jose},
  journal = {Lecture Notes in Computer Science},
  title = {Clustering of strokes from pen-based music notation: An experimental study},
  year = {2015},
  issn = {0302-9743},
  pages = {633--640},
  volume = {9117},
  abstract = {A comfortable way of digitizing a new music composition is by using
	a pen-based recognition system, in which the digital score is created
	with the sole effort of the composition itself. In this kind of systems,
	the input consist of a set of pen strokes. However, it is hitherto
	unclear the different types of strokes that must be considered for
	this task. This paper presents an experimental study on automatic
	labeling of these strokes using the well-known k-medoids algorithm.
	Since recognition of pen-based music scores is highly related to
	stroke recognition, it may be profitable to repeat the process when
	new data is received through user interaction. Therefore, our intention
	is not to propose some stroke labeling but to show which stroke dissimilarities
	perform better within the clustering process. Results show that there
	can be found good methods in the trade-off between cluster complexity
	and classification accuracy, whereas others offer a very poor performance.
	© Springer International Publishing Switzerland 2015.},
  affiliation = {Departamento de Lenguajes y Sistemas Informáticos, Universidad de Alicante, Alicante, Spain},
  correspondence_address1 = {Calvo-Zaragoza, J.; Departamento de Lenguajes y Sistemas Informáticos, Universidad de AlicanteSpain},
  doi = {10.1007/978-3-319-19390-8_71},
  editor = {Cardoso J.S., Paredes R., Pardo X.M.},
  file = {:pdfs/2015 - Clustering of Strokes from Pen-Based Music Notation - An Experimental Study.pdf:PDF},
  isbn = {9783319193892},
  keywords = {Clustering algorithms; Economic and social effects; Graphical user interfaces; Image analysis, Automatic labeling; Classification accuracy; Clustering process; K-medoids algorithms; Music composition; Poor performance; Recognition systems; Stroke recognition, Pattern recognition},
  publisher = {Springer Verlag}
}
@inproceedings{Calvo-Zaragoza2016c,
  author = {Calvo-Zaragoza, Jorge and Vigliensoni, Gabriel and Fujinaga, Ichiro},
  booktitle = {3rd International workshop on Digital Libraries for Musicology},
  title = {Document Analysis for Music Scores via Machine Learning},
  year = {2016},
  address = {New York, USA},
  organization = {ACM},
  pages = {37--40},
  publisher = {ACM},
  acmid = {2970047},
  doi = {10.1145/2970044.2970047},
  file = {:pdfs/2016 - Document Analysis for Music Scores via Machine Learning.pdf:PDF},
  isbn = {978-1-4503-4751-8},
  keywords = {Document Analysis, Machine Learning, Optical Music Recognition}
}
@inproceedings{Calvo-Zaragoza2016d,
  author = {Calvo-Zaragoza, Jorge and Rizo, David and I{\~{n}}esta, Jos{\'{e}} Manuel},
  booktitle = {17th International Society for Music Information Retrieval Conference},
  title = {Two (note) heads are better than one: pen-based multimodal interaction with music scores},
  year = {2016},
  address = {New York City},
  editor = {Devaney, J. et al.},
  pages = {509--514},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/ismir/Calvo-ZaragozaR16},
  file = {:pdfs/2016 - Two (Note) Heads are Better than one - Pen-based Multimodal Interaction with Music Scores.pdf:PDF},
  isbn = {978-0-692-75506-8},
  url = {https://wp.nyu.edu/ismir2016/wp-content/uploads/sites/2294/2016/07/006_Paper.pdf}
}
@article{Calvo-Zaragoza2016e,
  author = {Calvo-Zaragoza, Jorge and Mic{\'o}, Luisa and Oncina, Jose},
  journal = {International Journal on Document Analysis and Recognition},
  title = {Music staff removal with supervised pixel classification},
  year = {2016},
  number = {3},
  pages = {211--219},
  volume = {19},
  abstract = {This work presents a novel approach to tackle the music staff removal.
	This task is devoted to removing the staff lines from an image of
	a music score while maintaining the symbol information. It represents
	a key step in the performance of most optical music recognition systems.
	In the literature, staff removal is usually solved by means of image
	processing procedures based on the intrinsics of music scores. However,
	we propose to model the problem as a supervised learning classification
	task. Surprisingly, although there is a strong background and a vast
	amount of research concerning machine learning, the classification
	approach has remained unexplored for this purpose. In this context,
	each foreground pixel is labelled as either staff or symbol. We use
	pairs of scores with and without staff lines to train classification
	algorithms. We test our proposal with several well-known classification
	techniques. Moreover, in our experiments no attempt of tuning the
	classification algorithms has been made, but the parameters were
	set to the default setting provided by the classification software
	libraries. The aim of this choice is to show that, even with this
	straightforward procedure, results are competitive with state-of-the-art
	algorithms. In addition, we also discuss several advantages of this
	approach for which conventional methods are not applicable such as
	its high adaptability to any type of music score.},
  doi = {10.1007/s10032-016-0266-2},
  file = {:pdfs/2016 - Music Staff Removal with Supervised Pixel Classification.pdf:PDF},
  keywords = {Artificial intelligence; Image processing; Learning systems; Optical data processing; Supervised learning, Classification algorithm; Classification approach; Classification tasks; Classification technique; Conventional methods; Optical music recognition; Pixel classification; State-of-the-art algorithms, Pixels},
  publisher = {Springer}
}
@article{Calvo-Zaragoza2017,
  author = {Calvo-Zaragoza, Jorge and Pertusa, Antonio and Oncina, Jose},
  journal = {Machine Vision and Applications},
  title = {Staff-line detection and removal using a convolutional neural network},
  year = {2017},
  issn = {1432-1769},
  pages = {1--10},
  abstract = {Staff-line removal is an important preprocessing stage for most optical music recognition systems. Common procedures to solve this task involve image processing techniques. In contrast to these traditional methods based on hand-engineered transformations, the problem can also be approached as a classification task in which each pixel is labeled as either staff or symbol, so that only those that belong to symbols are kept in the image. In order to perform this classification, we propose the use of convolutional neural networks, which have demonstrated an outstanding performance in image retrieval tasks. The initial features of each pixel consist of a square patch from the input image centered at that pixel. The proposed network is trained by using a dataset which contains pairs of scores with and without the staff lines. Our results in both binary and grayscale images show that the proposed technique is very accurate, outperforming both other classifiers and the state-of-the-art strategies considered. In addition, several advantages of the presented methodology with respect to traditional procedures proposed so far are discussed.},
  doi = {10.1007/s00138-017-0844-4},
  file = {:pdfs/2017 - Staff-line detection and removal using a convolutional neural network.pdf:PDF}
}
@inproceedings{Calvo-Zaragoza2017a,
  author = {Calvo-Zaragoza, Jorge and Toselli, Alejandro H. and Vidal, Enrique},
  booktitle = {15th International Conference on Frontiers in Handwriting Recognition},
  title = {Early handwritten music recognition with Hidden Markov Models},
  year = {2017},
  pages = {319--324},
  publisher = {Institute of Electrical and Electronics Engineers Inc.},
  abstract = {This work presents a statistical method to tackle the Handwritten
	Music Recognition task for Early notation, which comprises more than
	200 different symbols. Unlike previous approaches to deal with music
	notation, our strategy is to perform a holistic recognition without
	any previous segmentation or staff removal process. The input consists
	of a page of a music book, which is processed to extract and normalize
	the staves contained. Then, a feature extraction process is applied
	to define such sections as a sequence of numerical vectors. The recognition
	is based on the use of Hidden Markov Models for the optical processing
	and smoothed N-grams as language model. Experimentation results over
	a historical archive of Hispanic music reported an error around 40
	%, which confirms our proposal as a good starting point taking into
	account the difficulty of the task.},
  author_keywords = {Handwritten music recognition; Hidden markov models; N-grams; Statistical recognition},
  doi = {10.1109/ICFHR.2016.0067},
  file = {:pdfs/2016 - Early Handwritten Music Recognition with Hidden Markov Models.pdf:PDF},
  isbn = {9781509009817},
  issn = {2167-6445},
  keywords = {Character recognition; Computational linguistics; Feature extraction; Markov processes, Historical archive; Language model; Music notation; Music recognition; N-grams; Optical processing; Removal process; Statistical recognition, Hidden Markov models}
}
@inproceedings{Calvo-Zaragoza2017b,
  author = {Calvo-Zaragoza, Jorge and Toselli, Alejandro and Vidal, Enrique},
  booktitle = {14th International Conference on Document Analysis and Recognition},
  title = {Handwritten Music Recognition for Mensural Notation: Formulation, Data and Baseline Results},
  year = {2017},
  address = {Kyoto, Japan},
  pages = {1081--1086},
  abstract = {Music is a key element for cultural transmission, and so large collections of music manuscripts have been preserved over the centuries. In order to develop computational tools for analysis, indexing and retrieval from these sources, it is necessary to transcribe the content to some machine-readable format. In this paper we discuss the Handwritten Music Recognition problem, which refers to the development of automatic transcription systems for musical manuscripts. We focus on mensural notation, one of the most widespread varieties of Western classical music. For that, we present a labeled corpus containing 576 staves, along with a baseline recognition system based on a combination of hidden Markov models and N-gram language models. The baseline error obtained at symbol level is about 40 % which, given the difficulty of the task, can be considered a good starting point for future developments. Our aim is that these data and preliminary results help to promote this research field, serving as a reference in future developments.},
  doi = {10.1109/ICDAR.2017.179},
  file = {:pdfs/2017 - Handwritten Music Recognition for Mensural Notation - Formulation, Data and Baseline Results.pdf:PDF},
  issn = {2379-2140},
  keywords = {hidden Markov models;music;speech recognition;indexing;retrieval;machine-readable format;automatic transcription systems;musical manuscripts;mensural notation;Western classical music;baseline recognition system;hidden Markov models;baseline error;cultural transmission;computational tools;handwritten music recognition problem;labeled corpus;n-gram language models;Music;Handwriting recognition;Hidden Markov models;Task analysis;Image segmentation;Shape;Natural languages;Handwritten Music Recognition;Optical Music Recognition;Mensural notation;Hidden Markov Models}
}
@inproceedings{Calvo-Zaragoza2017c,
  author = {Calvo-Zaragoza, Jorge and Valero-Mas, Jose J. and Pertusa, Antonio},
  booktitle = {18th International Society for Music Information Retrieval Conference},
  title = {End-to-end Optical Music Recognition using Neural Networks},
  year = {2017},
  address = {Suzhou, China},
  file = {:pdfs/2017 - End-To-End Optical Music Recognition using Neural Networks.pdf:PDF},
  isbn = {978-981-11-5179-8},
  url = {https://ismir2017.smcnus.org/wp-content/uploads/2017/10/34_Paper.pdf}
}
@inproceedings{Calvo-Zaragoza2017d,
  author = {Calvo-Zaragoza, Jorge and Vigliensoni, Gabriel and Fujinaga, Ichiro},
  booktitle = {18th International Society for Music Information Retrieval Conference},
  title = {One-step detection of background, staff lines, and symbols in medieval music manuscripts with convolutional neural networks},
  year = {2017},
  address = {Suzhou, China},
  file = {:pdfs/2017 - One-Step Detection of Background, Staff Lines, And Symbols in Medieval Music Manuscripts with Convolutional Neural Networks.pdf:PDF},
  isbn = {978-981-11-5179-8},
  url = {https://ismir2017.smcnus.org/wp-content/uploads/2017/10/162_Paper.pdf}
}
@inproceedings{Calvo-Zaragoza2017e,
  author = {Calvo-Zaragoza, Jorge and Vigliensoni, Gabriel and Fujinaga, Ichiro},
  booktitle = {3rd International Conference on Technologies for Music Notation and Representation},
  title = {A machine learning framework for the categorization of elements in images of musical documents},
  year = {2017},
  address = {A Coru{\~{n}}a, Spain},
  publisher = {University of A Coru{\~{n}}a},
  file = {:pdfs/2017 - A Machine Learning Framework for the Categorization of Elements in Images of Musical Documents.pdf:PDF},
  url = {http://www.udc.es/grupos/ln/tenor2017/sections/node/5-unified_categorization.pdf}
}
@inproceedings{Calvo-Zaragoza2017f,
  author = {Calvo-Zaragoza, Jorge and Gallego, Antonio-Javier and Pertusa, Antonio},
  booktitle = {14th International Conference on Document Analysis and Recognition},
  title = {Recognition of Handwritten Music Symbols with Convolutional Neural Codes},
  year = {2017},
  address = {Kyoto, Japan},
  pages = {691--696},
  abstract = {There are large collections of music manuscripts preserved over the centuries. In order to analyze these documents it is necessary to transcribe them into a machine-readable format. This process can be done automatically using Optical Music Recognition (OMR) systems, which typically consider segmentation plus classification workflows. This work is focused on the latter stage, presenting a comprehensive study for classification of handwritten musical symbols using Convolutional Neural Networks (CNN). The power of these models lies in their ability to transform the input into a meaningful representation for the task at hand, and that is why we study the use of these models to extract features (Neural Codes) for other classifiers. For the evaluation we consider four datasets containing different configurations and notation styles, along with a number of network models, different image preprocessing techniques and several supervised learning classifiers. Our results show that a remarkable accuracy can be achieved using the proposed framework, which significantly outperforms the state of the art in all datasets considered.},
  doi = {10.1109/ICDAR.2017.118},
  file = {:pdfs/2017 - Recognition of Handwritten Music Symbols with Convolutional Neural Codes.pdf:PDF},
  issn = {2379-2140},
  keywords = {convolution;feature extraction;handwritten character recognition;image classification;image representation;image segmentation;learning (artificial intelligence);music;neural nets;optical character recognition;Convolutional Neural Networks;handwritten musical symbols;Optical Music Recognition systems;machine-readable format;music manuscripts;Music;Feature extraction;Task analysis;Convolutional codes;Training;Support vector machines;Convolutional neural networks;Handwritten Music Symbols;Optical Music Recognition;Convolutional Neural Networks;Neural Codes}
}
@inproceedings{Calvo-Zaragoza2017g,
  author = {Calvo-Zaragoza, Jorge and Vigliensoni, Gabriel and Fujinaga, Ichiro},
  booktitle = {7th International Conference on Image Processing Theory, Tools and Applications},
  title = {Pixelwise classification for music document analysis},
  year = {2017},
  pages = {1--6},
  abstract = {Content within musical documents not only contains music symbol but also include different elements such as staff lines, text, or frontispieces. Before attempting to automatically recognize components in these layers, it is necessary to perform an analysis of the musical documents in order to detect and classify each of these constituent parts. The obstacle for this analysis is the high heterogeneity amongst music collections, especially with ancient documents, which makes it difficult to devise methods that can be generalizable to a broader range of sources. In this paper we propose a data-driven document analysis framework based on machine learning that focuses on classifying regions of interest at pixel level. For that, we make use of Convolutional Neural Networks trained to infer the category of each pixel. The main advantage of this approach is that it can be applied regardless of the type of document provided, as long as training data is available. Since this work represents first efforts in that direction, our experimentation focuses on reporting a baseline classification using our framework. The experiments show promising performance, achieving an accuracy around 90% in two corpora of old music documents.},
  doi = {10.1109/IPTA.2017.8310134},
  file = {:pdfs/2017 - Pixelwise Classification for Music Document Analysis.pdf:PDF},
  keywords = {Convolutional neural networks;Layout;Microsoft Windows;Music;Task analysis;Text analysis;Training;Convolutional Neural Network;Document Image Analysis;Music Archives;Optical Music Recognition;Pixel classification}
}
@inproceedings{Calvo-Zaragoza2017h,
  author = {Calvo-Zaragoza, Jorge and Vigliensoni, Gabriel and Fujinaga, Ichiro},
  booktitle = {15th International Conference on Machine Vision Applications},
  title = {Pixel-wise binarization of musical documents with convolutional neural networks},
  year = {2017},
  pages = {362--365},
  doi = {10.23919/MVA.2017.7986876},
  file = {:pdfs/2017 - Pixel Wise Binarization of Musical Documents with Convolutional Neural Networks.pdf:PDF}
}
@article{Calvo-Zaragoza2017i,
  author = {Calvo-Zaragoza, Jorge and Jose Oncina},
  journal = {Expert Systems with Applications},
  title = {Recognition of pen-based music notation with finite-state machines},
  year = {2017},
  issn = {0957-4174},
  pages = {395--406},
  volume = {72},
  abstract = {This work presents a statistical model to recognize pen-based music compositions using stroke recognition algorithms and finite-state machines. The series of strokes received as input is mapped onto a stochastic representation, which is combined with a formal language that describes musical symbols in terms of stroke primitives. Then, a Probabilistic Finite-State Automaton is obtained, which defines probabilities over the set of musical sequences. This model is eventually crossed with a semantic language to avoid sequences that does not make musical sense. Finally, a decoding strategy is applied in order to output a hypothesis about the musical sequence actually written. Comprehensive experimentation with several decoding algorithms, stroke similarity measures and probability density estimators are tested and evaluated following different metrics of interest. Results found have shown the goodness of the proposed model, obtaining competitive performances in all metrics and scenarios considered.},
  doi = {10.1016/j.eswa.2016.10.041},
  file = {:pdfs/2017 - Recognition of Pen Based Music Notation with Finite State Machines.pdf:PDF},
  keywords = {Pen-based recognition, Optical music recognition, Finite-State machines}
}
@inproceedings{Calvo-Zaragoza2017j,
  author = {Calvo-Zaragoza, Jorge and Vigliensoni, Gabriel and Fujinaga, Ichiro},
  booktitle = {Pattern Recognition and Image Analysis},
  title = {Staff-Line Detection on Grayscale Images with Pixel Classification},
  year = {2017},
  address = {Cham},
  editor = {Alexandre, Lu{\'i}s A. and Salvador S{\'a}nchez, Jos{\'e} and Rodrigues, Jo{\~a}o M. F.},
  pages = {279--286},
  publisher = {Springer International Publishing},
  abstract = {Staff-line detection and removal are important processing steps in most Optical Music Recognition systems. Traditional methods make use of heuristic strategies based on image processing techniques with binary images. However, binarization is a complex process for which it is difficult to achieve perfect results. In this paper we describe a novel staff-line detection and removal method that deals with grayscale images directly. Our approach uses supervised learning to classify each pixel of the image as symbol, staff, or background. This classification is achieved by means of Convolutional Neural Networks. The features of each pixel consist of a square window from the input image centered at the pixel to be classified. As a case of study, we performed experiments with the CVC-Muscima dataset. Our approach showed promising performance, outperforming state-of-the-art algorithms for staff-line removal.},
  file = {:pdfs/2017 - Staff Line Detection on Grayscale Images with Pixel Classification.pdf:PDF},
  isbn = {978-3-319-58838-4},
  url = {https://link.springer.com/chapter/10.1007%2F978-3-319-58838-4_31}
}
@inproceedings{Calvo-Zaragoza2017k,
  author = {Calvo-Zaragoza, Jorge and Zhang, K{\'{e}} and Saleh, Zeyad and Vigliensoni, Gabriel and Fujinaga, Ichiro},
  booktitle = {2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)},
  title = {Music Document Layout Analysis through Machine Learning and Human Feedback},
  year = {2017},
  month = {Nov},
  pages = {23--24},
  volume = {02},
  abstract = {Music documents often include musical symbols as well as other relevant elements such as staff lines, text, and decorations. To detect and separate these constituent elements, we propose a layout analysis framework based on machine learning that focuses on pixel-level classification of the image. For that, we make use of supervised learning classifiers trained to infer the category of each pixel. In addition, our scenario considers a human-aided computing approach in which the user is part of the recognition loop, providing feedback where relevant errors are made.},
  doi = {10.1109/ICDAR.2017.259},
  issn = {2379-2140},
  keywords = {document image processing;feedback;image classification;learning (artificial intelligence);music;music document layout analysis;machine learning;human feedback;music documents;musical symbols;staff lines;layout analysis framework;supervised learning classifiers;human-aided computing approach;pixel-level image classification;image recognition;Layout;Text analysis;Music;Task analysis;Machine learning algorithms;Algorithm design and analysis;Multiple signal classification;Music Document Layout Analysis;Optical Music Recognition;Machine Learning;Human-aided computing}
}
@article{Calvo-Zaragoza2018,
  author = {Calvo-Zaragoza, Jorge and Rizo, David},
  journal = {Applied Sciences},
  title = {End-to-End Neural Optical Music Recognition of Monophonic Scores},
  year = {2018},
  issn = {2076-3417},
  number = {4},
  volume = {8},
  abstract = {Optical Music Recognition is a field of research that investigates how to computationally decode music notation from images. Despite the efforts made so far, there are hardly any complete solutions to the problem. In this work, we study the use of neural networks that work in an end-to-end manner. This is achieved by using a neural model that combines the capabilities of convolutional neural networks, which work on the input image, and recurrent neural networks, which deal with the sequential nature of the problem. Thanks to the use of the the so-called Connectionist Temporal Classification loss function, these models can be directly trained from input images accompanied by their corresponding transcripts into music symbol sequences. We also present the Printed Music Scores dataset, containing more than 80,000 monodic single-staff real scores in common western notation, that is used to train and evaluate the neural approach. In our experiments, it is demonstrated that this formulation can be carried out successfully. Additionally, we study several considerations about the codification of the output musical sequences, the convergence and scalability of the neural models, as well as the ability of this approach to locate symbols in the input score.},
  doi = {10.3390/app8040606},
  file = {:pdfs/2018 - End-to-End Neural Optical Music Recognition of Monophonic Scores.pdf:PDF},
  url = {http://www.mdpi.com/2076-3417/8/4/606}
}
@article{Calvo-Zaragoza2018a,
  author = {Calvo-Zaragoza, Jorge and Castellanos, Francisco J. and Vigliensoni, Gabriel and Fujinaga, Ichiro},
  journal = {Applied Sciences},
  title = {Deep Neural Networks for Document Processing of Music Score Images},
  year = {2018},
  issn = {2076-3417},
  number = {5},
  volume = {8},
  abstract = {There is an increasing interest in the automatic digitization of medieval music documents. Despite efforts in this field, the detection of the different layers of information on these documents still poses difficulties. The use of Deep Neural Networks techniques has reported outstanding results in many areas related to computer vision. Consequently, in this paper, we study the so-called Convolutional Neural Networks (CNN) for performing the automatic document processing of music score images. This process is focused on layering the image into its constituent parts (namely, background, staff lines, music notes, and text) by training a classifier with examples of these parts. A comprehensive experimentation in terms of the configuration of the networks was carried out, which illustrates interesting results as regards to both the efficiency and effectiveness of these models. In addition, a cross-manuscript adaptation experiment was presented in which the networks are evaluated on a different manuscript from the one they were trained. The results suggest that the CNN is capable of adapting its knowledge, and so starting from a pre-trained CNN reduces (or eliminates) the need for new labeled data.},
  doi = {10.3390/app8050654},
  file = {:pdfs/2018 - Deep Neural Networks for Document Processing of Music Score Images.pdf:PDF},
  keywords = {optical music recognition; music document processing; music score images; medieval manuscripts; convolutional neural networks},
  url = {http://www.mdpi.com/2076-3417/8/5/654}
}
@inproceedings{Calvo-Zaragoza2018b,
  author = {Calvo-Zaragoza, Jorge and David Rizo},
  booktitle = {19th International Society for Music Information Retrieval Conference},
  title = {Camera-PrIMuS: Neural End-to-End Optical Music Recognition on Realistic Monophonic Scores},
  year = {2018},
  address = {Paris, France},
  pages = {248--255},
  file = {:pdfs/2018 - Camera PrIMuS_ Neural End to End Optical Music Recognition on Realistic Monophonic Scores.pdf:PDF},
  isbn = {978-2-9540351-2-3},
  url = {http://ismir2018.ircam.fr/doc/pdfs/33_Paper.pdf}
}
@inproceedings{Calvo-Zaragoza2018c,
  author = {Calvo-Zaragoza, Jorge},
  booktitle = {1st International Workshop on Reading Music Systems},
  title = {Why WoRMS?},
  year = {2018},
  address = {Paris, France},
  editor = {Calvo-Zaragoza, Jorge and Haji{\v{c}} jr., Jan and Pacha, Alexander},
  pages = {7--8},
  file = {:pdfs/2018 - Why WoRMS.pdf:PDF},
  url = {https://sites.google.com/view/worms2018/proceedings}
}
@inproceedings{Calvo-Zaragoza2018d,
  author = {Calvo-Zaragoza, Jorge and Haji{\v{c}} jr., Jan and Pacha, Alexander},
  booktitle = {Graphics Recognition, Current Trends and Evolutions},
  title = {Discussion Group Summary: Optical Music Recognition},
  year = {2018},
  editor = {Forn{\'{e}}s, Alicia and Lamiroy Bart},
  pages = {152--157},
  publisher = {Springer International Publishing},
  series = {Lecture Notes in Computer Science},
  abstract = {This document summarizes the discussion of the interest group on Optical Music Recognition (OMR) that took place in the 12th IAPR International Workshop on Graphics Recognition, and presents the main conclusions drawn during the session: OMR should revisit how it describes itself, and the OMR community should intensify its collaboration both internally and with other stakeholders.},
  doi = {10.1007/978-3-030-02284-6_12},
  file = {:pdfs/2018 - Discussion Group Summary - Optical Music Recognition.pdf:PDF},
  isbn = {978-3-030-02283-9}
}
@inproceedings{Calvo-Zaragoza2018e,
  author = {Calvo-Zaragoza, Jorge and Toselli, Alejandro H. and Vidal, Enrique},
  booktitle = {16th International Conference on Frontiers in Handwriting Recognition},
  title = {Probabilistic Music-Symbol Spotting in Handwritten Scores},
  year = {2018},
  address = {Niagara Falls, USA},
  pages = {558--563},
  abstract = {Content-based search on musical manuscripts is usually performed assuming that there are accurate transcripts of the sources in a symbolic, structured format. Given that current systems for Handwritten Music Recognition are far from offering guarantees about their accuracy, this traditional approach does not represent a scalable scenario. In this work we propose a probabilistic framework for Music-Symbol Spotting (MSS), that allows for content-based music search directly over the images of the manuscripts. By means of statistical recognition systems, a probabilistic index is built upon which the search can be carried out efficiently. Our experiments over a dataset of an Early handwritten music manuscript in Mensural notation demonstrates that this MSS framework can be presented as a promising alternative to the traditional approach for content-based music search.},
  doi = {10.1109/ICFHR-2018.2018.00103},
  file = {:pdfs/2018 - Probabilistic Music Symbol Spotting in Handwritten Scores.pdf:PDF}
}
@article{Calvo-Zaragoza2019,
  author = {Calvo-Zaragoza, Jorge and Toselli, Alejandro H. and Vidal, Enrique},
  journal = {Pattern Analysis and Applications},
  title = {Hybrid hidden Markov models and artificial neural networks for handwritten music recognition in mensural notation},
  year = {2019},
  issn = {1433-755X},
  month = {Mar},
  abstract = {In this paper, we present a hybrid approach using hidden Markov models (HMM) and artificial neural networks to deal with the task of handwritten Music Recognition in mensural notation. Previous works have shown that the task can be addressed with Gaussian density HMMs that can be trained and used in an end-to-end manner, that is, without prior segmentation of the symbols. However, the results achieved using that approach are not sufficiently accurate to be useful in practice. In this work, we hybridize HMMs with deep multilayer perceptrons (MLPs), which lead to remarkable improvements in optical symbol modeling. Moreover, this hybrid architecture maintains important advantages of HMMs such as the ability to properly model variable-length symbol sequences through segmentation-free training, and the simplicity and robustness of combining optical models with N-gram language models, which provide statistical a priori information about regularities in musical symbol concatenation observed in the training data. The results obtained with the proposed hybrid MLP-HMM approach outperform previous works by a wide margin, achieving symbol-level error rates around 26{\%}, as compared with about 40{\%} reported in previous works.},
  day = {30},
  doi = {10.1007/s10044-019-00807-1},
  file = {:pdfs/2019 - Hybrid Hidden Markov Models and Artificial Neural Networks for Handwritten Music Recognition in Mensural Notation.pdf:PDF}
}
@article{Calvo-Zaragoza2019a,
  author = {Calvo-Zaragoza, Jorge and Haji{\v{c}} jr., Jan and Pacha, Alexander},
  journal = {Computing Research Repository},
  title = {Understanding Optical Music Recognition},
  year = {2019},
  file = {:pdfs/2019 - Understanding Optical Music Recognition.pdf:PDF},
  url = {https://arxiv.org/abs/1908.03608}
}
@article{Calvo-Zaragoza2019b,
  author = {Calvo-Zaragoza, Jorge and Toselli, Alejandro H. and Vidal, Enrique},
  journal = {Pattern Recognition Letters},
  title = {Handwritten Music Recognition for Mensural notation with convolutional recurrent neural networks},
  year = {2019},
  issn = {0167-8655},
  pages = {115--121},
  volume = {128},
  abstract = {Optical Music Recognition is the technology that allows computers to read music notation, which is also referred to as Handwritten Music Recognition when it is applied over handwritten notation. This technology aims at efficiently transcribing written music into a representation that can be further processed by a computer. This is of special interest to transcribe the large amount of music written in early notations, such as the Mensural notation, since they represent largely unexplored heritage for the musicological community. Traditional approaches to this problem are based on complex strategies with many explicit rules that only work for one particular type of manuscript. Machine learning approaches offer the promise of generalizable solutions, based on learning from just labelled examples. However, previous research has not achieved sufficiently acceptable results for handwritten Mensural notation. In this work we propose the use of deep neural networks, namely convolutional recurrent neural networks, which have proved effective in other similar domains such as handwritten text recognition. Our experimental results achieve, for the first time, recognition results that can be considered effective for transcribing handwritten Mensural notation, decreasing the symbol-level error rate of previous approaches from 25.7% to 7.0%.},
  doi = {https://doi.org/10.1016/j.patrec.2019.08.021},
  file = {:pdfs/2019 - Handwritten Music Recognition for Mensural Notation with Convolutional Recurrent Neural Networks.pdf:PDF},
  keywords = {Handwritten Music Recognition, Optical Music Recognition, Mensural notation, Convolutional recurrent neural networks},
  url = {http://www.sciencedirect.com/science/article/pii/S0167865519302338}
}
@article{Calvo-Zaragoza2020,
  author = {Calvo-Zaragoza, Jorge and Haji\v{c} Jr., Jan and Pacha, Alexander},
  journal = {ACM Comput. Surv.},
  title = {Understanding Optical Music Recognition},
  year = {2020},
  issn = {0360-0300},
  number = {4},
  volume = {53},
  abstract = {For over 50 years, researchers have been trying to teach computers to read music notation, referred to as Optical Music Recognition (OMR). However, this field is still difficult to access for new researchers, especially those without a significant musical background: Few introductory materials are available, and, furthermore, the field has struggled with defining itself and building a shared terminology. In this work, we address these shortcomings by (1) providing a robust definition of OMR and its relationship to related fields, (2) analyzing how OMR inverts the music encoding process to recover the musical notation and the musical semantics from documents, and (3) proposing a taxonomy of OMR, with most notably a novel taxonomy of applications. Additionally, we discuss how deep learning affects modern OMR research, as opposed to the traditional pipeline. Based on this work, the reader should be able to attain a basic understanding of OMR: its objectives, its inherent structure, its relationship to other fields, the state of the art, and the research opportunities it affords.},
  address = {New York, NY, USA},
  articleno = {77},
  doi = {10.1145/3397499},
  issue_date = {September 2020},
  keywords = {music scores, music notation, Optical music recognition},
  numpages = {35},
  publisher = {Association for Computing Machinery},
  url = {https://doi.org/10.1145/3397499}
}
@inproceedings{Campos2016,
  author = {Bosch Campos, Vicente and Calvo-Zaragoza, Jorge and Toselli, Alejandro H. and Vidal Ruiz, Enrique},
  booktitle = {15th International Conference on Frontiers in Handwriting Recognition},
  title = {Sheet Music Statistical Layout Analysis},
  year = {2016},
  pages = {313--318},
  abstract = {In order to provide access to the contents of ancient music scores
	to researchers, the transcripts of both the lyrics and the musical
	notation is required. Before attempting any type of automatic or
	semi-automatic transcription of sheet music, an adequate layout analysis
	(LA) is needed. This LA must provide not only the locations of the
	different image regions, but also adequate region labels to distinguish
	between different region types such as staff, lyric, etc. To this
	end, we adapt a stochastic framework for LA based on Hidden Markov
	Models that we had previously introduced for detection and classification
	of text lines in typical handwritten text images. The proposed approach
	takes a scanned music score image as input and, after basic preprocessing,
	simultaneously performs region detection and region classification
	in an integrated way. To assess this statistical LA approach several
	experiments were carried out on a representative sample of a historical
	music archive, under different difficulty settings. The results show
	that our approach is able to tackle these structured documents providing
	good results not only for region detection but also for classification
	of the different regions.},
  doi = {10.1109/ICFHR.2016.0066},
  file = {:pdfs/2016 - Sheet Music Statistical Layout Analysis.pdf:PDF},
  issn = {2167-6445},
  keywords = {handwriting recognition;handwritten character recognition;hidden Markov models;image classification;music;statistical analysis;text detection;handwritten text image;hidden Markov model;sheet music;statistical LA;statistical layout analysis;text line classification;text line detection;Feature extraction;Handwriting recognition;Hidden Markov models;Layout;Music;Text recognition;Training;Document Layout Analysis;Hidden Markov Models;text region detection}
}
@inproceedings{Capela2008,
  author = {Capela, Artur and Cardoso, Jamie dos Santos and Rebelo, Ana and Guedes, Carlos},
  booktitle = {International Computer Music Conference},
  title = {Integrated recognition system for music scores},
  year = {2008},
  pages = {3--6},
  file = {:pdfs/2008 - Integrated Recognition System for Music Scores.pdf:PDF},
  url = {http://hdl.handle.net/2027/spo.bbp2372.2008.114}
}
@inproceedings{Capela2008a,
  author = {Capela, Artur and Rebelo, Ana and Cardoso, Jamie dos Santos and Guedes, Carlos},
  booktitle = {International Conference on Signal Processing and Multimedia Applications},
  title = {Staff Line Detection and Removal with Stable Paths},
  year = {2008},
  file = {:pdfs/2008 - Staff Line Detection and Removal with Stable Paths.pdf:PDF},
  url = {http://www.inescporto.pt/~arebelo/publications/2008ACapelaSIGMAP.pdf}
}
@misc{CapellaScan,
  author = {{capella-software AG}},
  howpublished = {\url{https://www.capella-software.com}},
  title = {Capella Scan},
  year = {1996},
  comment = {Copyright inside Capella Scan states 1996 - 2018},
  url = {https://www.capella-software.com}
}
@inproceedings{Cardoso2008,
  author = {Cardoso, Jamie dos Santos and Capela, Artur and Rebelo, Ana and Guedes, Carlos},
  booktitle = {15th International Conference on Image Processing},
  title = {A connected path approach for staff detection on a music score},
  year = {2008},
  pages = {1005--1008},
  abstract = {The preservation of many music works produced in the past entails their digitalization and consequent accessibility in an easy-to-manage digital format. Carrying this task manually is very time consuming and error prone. While optical music recognition systems usually perform well on printed scores, the processing of handwritten musical scores by computers remain far from ideal. One of the fundamental stages to carry out this task is the staff line detection. In this paper a new method for the automatic detection of music staff lines based on a connected path approach is presented. Lines affected by curvature, discontinuities, and inclination are robustly detected. Experimental results show that the proposed technique consistently outperforms well-established algorithms.},
  doi = {10.1109/ICIP.2008.4711927},
  file = {:pdfs/2008 - A Connected Path Approach for Staff Detection on a Music Score.pdf:PDF},
  issn = {1522-4880},
  keywords = {handwriting recognition;image recognition;music;object detection;connected path approach;optical music recognition;handwritten musical score;staff line detection;curvature detection;Cultural differences;Biomedical optical imaging;Optical character recognition software;Handwriting recognition;Image analysis;Ordinary magnetoresistance;Detectors;Multiple signal classification;Computer errors;Optical computing;Music;optical character recognition;document image processing;image analysis}
}
@article{Cardoso2009,
  author = {Cardoso, Jamie dos Santos and Capela, Artur and Rebelo, Ana and Guedes, Carlos and Pinto da Costa, Joaquim},
  journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
  title = {Staff Detection with Stable Paths},
  year = {2009},
  issn = {0162-8828},
  number = {6},
  pages = {1134--1139},
  volume = {31},
  abstract = {The preservation of musical works produced in the past requires their digitalization and transformation into a machine-readable format. The processing of handwritten musical scores by computers remains far from ideal. One of the fundamental stages to carry out this task is the staff line detection. We investigate a general-purpose, knowledge-free method for the automatic detection of music staff lines based on a stable path approach. Lines affected by curvature, discontinuities, and inclination are robustly detected. Experimental results show that the proposed technique consistently outperforms well-established algorithms.},
  doi = {10.1109/TPAMI.2009.34},
  file = {:pdfs/2009 - Staff Detection with Stable Paths.pdf:PDF},
  groups = {staff-removal},
  keywords = {document image processing;handwritten character recognition;music;automatic detection;digitalization;handwritten musical scores processing;machine-readable format;music staff lines;stable path approach;staff line detection;Character recognition;Degradation;Design methodology;Document image processing;Image analysis;Music information retrieval;Optical character recognition software;Ordinary magnetoresistance;Robustness;Writing;document image processing;image analysis;optical character recognition;optical music recognition;0},
  publisher = {Institute of Electrical \& Electronics Engineers (IEEE)}
}
@article{Carter1988,
  author = {Carter, Nicholas Paul and Bacon, Richard A. and Messenger, T.},
  journal = {Computers and the Humanities},
  title = {The acquisition, representation and reconstruction of printed music by computer: A review},
  year = {1988},
  issn = {1572-8412},
  number = {2},
  pages = {117--136},
  volume = {22},
  abstract = {Material published on the subject of Acquisition, Representation and Reconstruction of printed music by computer is reviewed.},
  doi = {10.1007/BF00057651},
  url = {https://doi.org/10.1007/BF00057651}
}
@incollection{Carter1992,
  author = {Carter, Nicholas Paul and Bacon, Richard A.},
  booktitle = {Structured Document Image Analysis},
  publisher = {Springer Berlin Heidelberg},
  title = {Automatic Recognition of Printed Music},
  year = {1992},
  address = {Berlin, Heidelberg},
  isbn = {978-3-642-77281-8},
  pages = {456--465},
  abstract = {There is a need for an automatic recognition system for printed music scores. The work presented here forms the basis of an omnifont, size-independent system with significant tolerance of noise and rotation of the original image. A structural decomposition technique is used based on an original transformation of the line adjacency graph. An example of output is given in the form of a data file and its score reconstruction.},
  doi = {10.1007/978-3-642-77281-8_21},
  file = {:pdfs/1992 - Automatic Recognition of Printed Music.pdf:PDF},
  url = {https://doi.org/10.1007/978-3-642-77281-8_21}
}
@incollection{Carter1992a,
  author = {Carter, Nicholas Paul},
  booktitle = {Advances in Structural and Syntactic Pattern Recognition},
  publisher = {World Scientific},
  title = {A New Edition of Walton's Fa{\c{c}}ade Using Automatic Score Recognition},
  year = {1992},
  pages = {352--362},
  abstract = {The availability of an automatic recognition system for printed music will facilitate applications such as musicological analysis, point-of-sale printing, creation of large format or braille scores and computer-based production of new editions. An example of the last of these possibilities is described here. A score-reading system is under development which makes use of a structural decomposition technique that is intended to be tolerant of significant variation in font, size, notation and noise in the source images. A description is given of the first "real-world" task to be undertaken using the system, i.e. the production of a new edition of Façade by William Walton. Sample output files and their corresponding reconstructions are given together with a discussion of the problems involved and the implications for future work.},
  doi = {10.1142/9789812797919_0029},
  file = {:pdfs/1992 - A New Edition of Walton's Facade Using Automatic Score Recognition.pdf:PDF}
}
@article{Carter1992b,
  author = {Carter, Nicholas Paul},
  journal = {Machine Vision and Applications},
  title = {Segmentation and preliminary recognition of madrigals notated in white mensural notation},
  year = {1992},
  issn = {1432-1769},
  number = {3},
  pages = {223--229},
  volume = {5},
  abstract = {An automatic music score-reading system will facilitate applications including computer-based editing of new editions, production of databases for musicological research, and creation of braille or large-format scores for the blind or partially-sighted. The work described here deals specifically with initial processing of images containing early seventeenth century madrigals notated in white mensural notation. The problems of segmentation involved in isolating the musical symbols from the word-underlay and decorative graphics are compounded by the poor quality of the originals which present a significant challenge to any recognition system. The solution described takes advantage of structural decomposition techniques based on a novel transformation of the line adjacency graph which have been developed during work on a score-reading system for conventional music notation.},
  doi = {10.1007/BF02627000},
  url = {https://doi.org/10.1007/BF02627000}
}
@inproceedings{Carter1994,
  author = {Carter, Nicholas Paul},
  booktitle = {International Symposium on Electronic Imaging: Science and Technology},
  title = {Conversion of the Haydn symphonies into electronic form using automatic score recognition: a pilot study},
  year = {1994},
  pages = {2181 - 2181 - 12},
  abstract = {As part of the development of an automatic recognition system for printed music scores, a series of `real-world' tasks are being undertaken. The first of these involves the production of a new edition of an existing 104-page, engraved, chamber-music score for Oxford University Press. The next substantial project, which is described here, has begun with a pilot study with a view to conversion of the 104 Haydn symphonies from a printed edition into machine- readable form. The score recognition system is based on a structural decomposition approach which provides advantages in terms of speed and tolerance of significant variations in font, scale, rotation and noise. Inevitably, some editing of the output data files is required, partially due to the limited vocabulary of symbols supported by the system and their permitted superimpositions. However, the possibility of automatically processing the bulk of the contents of over 600 pages of orchestral score in less than a day of compute time makes the conversion task manageable. The influence that this undertaking is having on the future direction of system development also is discussed.},
  doi = {10.1117/12.171115},
  url = {https://doi.org/10.1117/12.171115}
}
@inproceedings{Castellanos2018,
  author = {Castellanos, Fancisco J. and Calvo-Zaragoza, Jorge and Vigliensoni, Gabriel and Fujinaga, Ichiro},
  booktitle = {19th International Society for Music Information Retrieval Conference},
  title = {Document Analysis of Music Score Images with Selectional Auto-Encoders},
  year = {2018},
  address = {Paris, France},
  pages = {256--263},
  file = {:pdfs/2018 - Document Analysis of Music Score Images with Selectional Auto Encoders.pdf:PDF},
  isbn = {978-2-9540351-2-3},
  url = {http://ismir2018.ircam.fr/doc/pdfs/93_Paper.pdf}
}
@article{Castellanos2020,
  author = {Castellanos, Francisco J. and Gallego, Antonio-Javier and Calvo-Zaragoza, Jorge},
  journal = {Expert Systems with Applications},
  title = {Automatic scale estimation for music score images},
  year = {2020},
  issn = {0957-4174},
  pages = {113590},
  abstract = {Optical Music Recognition (OMR) is the research field focused on the automatic reading of music from scanned images. Its main goal is to encode the content into a digital and structured format with the advantages that this entails. This discipline is traditionally aligned to a workflow whose first step is the document analysis. This step is responsible of recognizing and detecting different sources of information—e.g. music notes, staff lines and text—to extract them and then processing automatically the content in the following steps of the workflow. One of the most difficult challenges it faces is to provide a generic solution to analyze documents with diverse resolutions. The endless number of existing music sources does not meet a standard that normalizes the data collections, giving complete freedom for a wide variety of image sizes and scales, thereby making this operation unsustainable. In the literature, this question is commonly overlooked and a uniform scale is assumed. In this paper, a machine learning-based approach to estimate the scale of music documents with respect to a reference scale is presented. Our goal is to propose a robust and generalizable method to adapt the input image to the requirements of an OMR system. For this, two goal-directed case studies are included to evaluate the proposed approach over common task within the OMR workflow, comparing the behavior with other state-of-the-art methods. Results suggest that it is necessary to perform this additional step in the first stage of the workflow to correct the scale of the input images. In addition, it is empirically demonstrated that our specialized approach is more promising than image augmentation strategies for the multi-scale challenge.},
  doi = {10.1016/j.eswa.2020.113590},
  keywords = {Optical Music Recognition, Scale Estimation, Scale Correction, Music Score Images Analysis},
  url = {http://www.sciencedirect.com/science/article/pii/S0957417420304140}
}
@inproceedings{Castellanos2021,
  author = {Castellanos, Francisco J. and Gallego, Antonio-Javier},
  booktitle = {Proceedings of the 3rd International Workshop on Reading Music Systems},
  title = {Unsupervised Neural Document Analysis for Music Score Images},
  year = {2021},
  address = {Alicante, Spain},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander},
  pages = {50--54},
  file = {:pdfs/2021 - Unsupervised Neural Document Analysis for Music Score Images.pdf:PDF},
  url = {https://sites.google.com/view/worms2021/proceedings}
}
@inproceedings{Castellanos2023,
  author = {Castellanos, Francisco J. and Gallego, Antonio Javier and Fujinaga, Ichiro},
  booktitle = {Proceedings of the 5th International Workshop on Reading Music Systems},
  title = {A Preliminary Study of Few-shot Learning for Layout Analysis of Music Scores},
  year = {2023},
  address = {Milan, Italy},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {44--48},
  doi = {10.48550/arXiv.2311.04091},
  file = {:pdfs/2023 - A Preliminary Study of Few Shot Learning for Layout Analysis of Music Scores.pdf:PDF},
  url = {https://sites.google.com/view/worms2023/proceedings}
}
@inproceedings{Castro2007,
  author = {Castro, Pedro and Caldas Pinto, J. R.},
  booktitle = {Image Analysis and Recognition},
  title = {Methods for Written Ancient Music Restoration},
  year = {2007},
  address = {Berlin, Heidelberg},
  editor = {Kamel, Mohamed and Campilho, Aur{\'e}lio},
  pages = {1194--1205},
  publisher = {Springer Berlin Heidelberg},
  abstract = {Degradation in old documents has been a matter of concern for a long time. With the easy access to information provided by technologies such as the Internet, new ways have arisen for consulting those documents without exposing them to yet more dangers of degradation. While restoration methods are present in the literature in relation to text documents and artworks, little attention has been given to the restoration of ancient music. This paper describes and compares different methods to restore images of ancient music documents degraded over time. Six different methods were tested, including global and adaptive thresholding, color clustering and edge detection. In this paper we conclude that those based on the Sauvola's thresholding algorithm are the better suited for our proposed goal of ancient music restoration.},
  doi = {10.1007/978-3-540-74260-9_106},
  file = {:pdfs/2007 - Methods for Written Ancient Music Restoration.pdf:PDF},
  isbn = {978-3-540-74260-9}
}
@inproceedings{Castro2007a,
  author = {Castro, Pedro and Almeida, R. J. and Caldas Pinto, J. R.},
  booktitle = {Progress in Pattern Recognition, Image Analysis and Applications},
  title = {Restoration of Double-Sided Ancient Music Documents with Bleed-Through},
  year = {2007},
  address = {Berlin, Heidelberg},
  editor = {Rueda, Luis and Mery, Domingo and Kittler, Josef},
  pages = {940--949},
  publisher = {Springer Berlin Heidelberg},
  abstract = {Access to collections of cultural heritage is increasingly becoming a topic of interest for institutions like libraries. With the easy access to information provided by technologies such as the Internet, new ways exist for consulting ancient documents without exposing them to more dangers of degradation. One of those types of documents is written ancient music. These documents suffer from multiple kinds of degradation, where bleed-through outstands as the most damaging. This paper proposes a new method based on the Takagi Sugeno fuzzy classification algorithm to classify the pixels as bleed-through, after performing a general background restoration. This method is applied to a set of double-sided ancient music documents, and the obtained results compared with methods present in the literature.},
  doi = {10.1007/978-3-540-76725-1_97},
  file = {:pdfs/2007 - Restoration of Double Sided Ancient Music Documents with Bleed through.pdf:PDF},
  isbn = {978-3-540-76725-1}
}
@article{Chanda2014,
  author = {Chanda, Sukalpa and Das, Debleena and Pal, Umapada and Kimura, Fumitaka},
  journal = {14th International Conference on Frontiers in Handwriting Recognition},
  title = {Offline Hand-Written Musical Symbol Recognition},
  year = {2014},
  pages = {405--410},
  doi = {10.1109/ICFHR.2014.74},
  file = {:pdfs/2014 - Offline Hand-Written Musical Symbol Recognition.pdf:PDF},
  groups = {recognition},
  isbn = {978-1-4799-4334-0},
  keywords = {- musical score, character recognition, mqdf, offline musical symbol recognition, svm, to classify},
  publisher = {Institute of Electrical {\&} Electronics Engineers ({IEEE})},
  url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6981053}
}
@article{Chen2013,
  author = {Chen, Yung-Sheng and Chen, Feng-Sheng and Teng, Chin-Hung},
  journal = {International Journal of Pattern Recognition and Artificial Intelligence},
  title = {An Optical Music Recognition System for Skew or Inverted Musical Scores},
  year = {2013},
  number = {07},
  volume = {27},
  doi = {10.1142/S0218001413530054},
  file = {:pdfs/2013 - An optical Music Recognition System for Skew or inverted musical scores.pdf:PDF}
}
@article{Chen2014,
  author = {Chen, Gen-Fang and Sheu, Jia-Shing},
  journal = {EURASIP Journal on Audio, Speech, and Music Processing},
  title = {An optical music recognition system for traditional Chinese Kunqu Opera scores written in Gong-Che Notation},
  year = {2014},
  issn = {1687-4722},
  number = {1},
  pages = {7},
  volume = {2014},
  abstract = {This paper presents an optical music recognition (OMR) system to process
	the handwritten musical scores of Kunqu Opera written in Gong-Che
	Notation (GCN). First, it introduces the background of Kunqu Opera
	and GCN. Kunqu Opera is one of the oldest forms of musical activity,
	spanning the sixteenth to eighteenth centuries, and GCN has been
	the most popular notation for recording musical works in China since
	the seventh century. Many Kunqu Operas that use GCN are available
	as original manuscripts or photocopies, and transforming these versions
	into a machine-readable format is a pressing need. The OMR system
	comprises six stages: image pre-processing, segmentation, feature
	extraction, symbol recognition, musical semantics, and musical instrument
	digital interface (MIDI) representation. This paper focuses on the
	symbol recognition stage and obtains the musical information with
	Bayesian, genetic algorithm, and K-nearest neighbor classifiers.
	The experimental results indicate that symbol recognition for Kunqu
	Opera's handwritten musical scores is effective. This work will help
	to preserve and popularize Chinese cultural heritage and to store
	Kunqu Opera scores in a machine-readable format, thereby ensuring
	the possibility of spreading and performing original Kunqu Opera
	musical scores.},
  doi = {10.1186/1687-4722-2014-7},
  file = {:pdfs/2014 - An optical music recognition system for traditional chinese kunqu Opera scores written in Gong-Che Notation.pdf:PDF}
}
@inproceedings{Chen2014a,
  author = {Chen, Liang and Jin, Rong and Raphael, Christopher},
  booktitle = {CHI'14 Workshop on Human-Centred Machine Learning},
  title = {Optical Music Recognition with Human Labeled Constraints},
  year = {2014},
  address = {Toronto, Canada},
  file = {:pdfs/2014 - Optical Music Recognition with Human Labeled Constraints.pdf:PDF},
  url = {http://www.doc.gold.ac.uk/~mas02mg/HCML2016/HCML2016_paper_3.pdf}
}
@inproceedings{Chen2015,
  author = {Chen, Liang and Jin, Rong and Raphael, Christopher},
  booktitle = {Mathematics and Computation in Music},
  title = {Renotation from Optical Music Recognition},
  year = {2015},
  address = {Cham},
  pages = {16--26},
  publisher = {Springer International Publishing},
  doi = {10.1007/978-3-319-20603-5_2},
  file = {:pdfs/2015 - Renotation from Optical Music Recognition.pdf:PDF},
  school = {School of Informatics and Computing, Indiana University}
}
@inproceedings{Chen2015a,
  author = {Chen, Liang and Raphael, Christopher},
  booktitle = {Extended abstracts for the Late-Breaking Demo Session of the 16th International Society for Music Information Retrieval Conference},
  title = {Ceres: An Interactive Optical Music Recognition System},
  year = {2015},
  address = {M{\'{a}}laga, Spain},
  file = {:pdfs/2015 - Ceres - an Interactive Optical Music Recognition System.pdf:PDF},
  url = {http://ismir2015.uma.es/LBD/LBD10.pdf}
}
@inproceedings{Chen2016,
  author = {Chen, Liang and Duan, Kun},
  booktitle = {Winter Conference on Applications of Computer Vision},
  title = {MIDI-assisted egocentric optical music recognition},
  year = {2016},
  publisher = {Institute of Electrical and Electronics Engineers Inc.},
  abstract = {Egocentric vision has received increasing attention in recent years
	due to the vast development of wearable devices and their applications.
	Although there are numerous existing work on egocentric vision, none
	of them solve Optical Music Recognition (OMR) problem. In this paper,
	we propose a novel optical music recognition approach for egocentric
	device (e.g. Google Glass) with the assistance of MIDI data. We formulate
	the problem as a structured sequence alignment problem as opposed
	to the blind recognition in traditional OMR systems. We propose a
	linear-chain Conditional Random Field (CRF) to model the note event
	sequence, which translates the relative temporal relations contained
	by MIDI to spatial constraints over the egocentric observation. We
	performed evaluations to compare the proposed approach with several
	different baselines and proved that our approach achieved the highest
	recognition accuracy. We view our work as the first step towards
	egocentric optical music recognition, and believe it will bring insights
	for next-generation music pedagogy and music entertainment.},
  affiliation = {Indiana University, Bloomington, IN, United States; GE Global Research, Niskayuna, NY, United States},
  doi = {10.1109/WACV.2016.7477714},
  file = {:pdfs/2016 - MIDI-Assisted Egocentric Optical Music Recognition.pdf:PDF},
  isbn = {9781509006410}
}
@article{Chen2016a,
  author = {Chen, Liang and Raphael, Christopher},
  journal = {Electronic Imaging},
  title = {Human-Directed Optical Music Recognition},
  year = {2016},
  number = {17},
  pages = {1--9},
  volume = {2016},
  doi = {10.2352/ISSN.2470-1173.2016.17.DRR-053},
  file = {:pdfs/2016 - Human-Directed Optical Music Recognition.pdf:PDF},
  publisher = {Society for Imaging Science and Technology}
}
@inproceedings{Chen2016b,
  author = {Chen, Liang and Stolterman, Erik and Raphael, Christopher},
  booktitle = {17th International Society for Music Information Retrieval Conference},
  title = {Human-Interactive Optical Music Recognition},
  year = {2016},
  editor = {Michael I. Mandel and Johanna Devaney and Douglas Turnbull and George Tzanetakis},
  pages = {647--653},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/ismir/ChenSR16},
  file = {:pdfs/2016 - Human Interactive Optical Music Recognition.pdf:PDF},
  isbn = {978-0-692-75506-8},
  url = {https://wp.nyu.edu/ismir2016/wp-content/uploads/sites/2294/2016/07/106_Paper.pdf}
}
@inproceedings{Chen2016e,
  author = {Chen, Liang and Jin, Rong and Zhang, Simo and Lee, Stefan and Chen, Zhenhua and Crandall, David},
  booktitle = {Extended abstracts for the Late-Breaking Demo Session of the 17th International Society for Music Information Retrieval Conference},
  title = {A Hybrid {HMM-RNN} Model for Optical Music Recognition},
  year = {2016},
  file = {:pdfs/2016 - A Hybrid HMM RNN Model for Optical Music Recognition.pdf:PDF},
  url = {https://wp.nyu.edu/ismir2016/wp-content/uploads/sites/2294/2016/08/chen-hybrid.pdf}
}
@inproceedings{Chen2017,
  author = {Chen, Liang and Jin, Rong and Raphael, Christopher},
  booktitle = {4th International Workshop on Digital Libraries for Musicology},
  title = {Human-Guided Recognition of Music Score Images},
  year = {2017},
  publisher = {ACM Press},
  doi = {10.1145/3144749.3144752},
  file = {:pdfs/2017 - Human-Guided Recognition of Music Score Images.pdf:PDF}
}
@inproceedings{Chen2017b,
  author = {Chen, Liang and Raphael, Christopher},
  booktitle = {14th Sound and Music Computing Conference},
  title = {Renotation of Optical Music Recognition Data},
  year = {2017},
  address = {Espoo, Finland},
  file = {:pdfs/2017 - Renotation of Optical Music Recognition Data.pdf:PDF},
  url = {http://smc2017.aalto.fi/media/materials/proceedings/SMC17_p287.pdf}
}
@inproceedings{Chen2018,
  author = {Chen, Liang and Raphael, Christopher},
  booktitle = {1st International Workshop on Reading Music Systems},
  title = {Optical Music Recognition and Human-in-the-loop Computation},
  year = {2018},
  address = {Paris, France},
  editor = {Calvo-Zaragoza, Jorge and Haji{\v{c}} jr., Jan and Pacha, Alexander},
  pages = {11--12},
  file = {:pdfs/2018 - Optical Music Recognition and Human in the Loop Computation.pdf:PDF},
  url = {https://sites.google.com/view/worms2018/proceedings}
}
@inproceedings{Chhabra1998,
  author = {Chhabra, Atul K.},
  booktitle = {Graphics Recognition Algorithms and Systems},
  title = {Graphic symbol recognition: An overview},
  year = {1998},
  address = {Berlin, Heidelberg},
  editor = {Tombre, Karl and Chhabra, Atul K.},
  pages = {68--79},
  publisher = {Springer Berlin Heidelberg},
  abstract = {Symbol recognition is one of the primary stages of any graphics recognition system. This paper reviews the current state of the art in graphic symbol recognition and raises some open issues that need further investigation. Work on symbol recognition tends to be highly application specific. Therefore, this review presents the symbol recognition methods in the context of specific applications.},
  doi = {10.1007/3-540-64381-8_40},
  file = {:pdfs/1998 - Graphic Symbol Recognition - an Overview.pdf:PDF},
  isbn = {978-3-540-69766-4}
}
@inproceedings{Choi2017,
  author = {Choi, Kwon-Young and Co{\"u}asnon, Bertrand and Ricquebourg, Yann and Zanibbi, Richard},
  booktitle = {14th International Conference on Document Analysis and Recognition},
  title = {Bootstrapping Samples of Accidentals in Dense Piano Scores for {CNN}-Based Detection},
  year = {2017},
  address = {Kyoto, Japan},
  organization = {IAPR TC10 (Technical Committee on Graphics Recognition)},
  publisher = {IEEE Computer Society},
  abstract = {State-of-the-art Optical Music Recognition system often fails to process
	dense and damaged music scores, where many symbols can present complex
	segmentation problems. We propose to resolve these segmentation problems
	by using a CNNbased detector trained with few manually annotated
	data. A data augmentation bootstrapping method is used to accurately
	train a deep learning model to do the localization and classification
	of an accidental symbol associated with a note head, or the note
	head if there is no accidental. Using 5-fold cross-validation, we
	obtain an average of 98.5% localization with an IoU score over 0.5
	and a classification accuracy of 99.2%.},
  doi = {10.1109/ICDAR.2017.257},
  file = {:pdfs/2017 - Bootstrapping Samples of Accidentals in Dense Piano Scores for CNN-Based Detection.pdf:PDF},
  isbn = {978-1-5386-3586-5},
  issn = {2379-2140}
}
@inproceedings{Choi2018,
  author = {Choi, Kwon-Young and Co{\"{u}}asnon, Bertrand and Ricquebourg, Yann and Zanibbi, Richard},
  booktitle = {1st International Workshop on Reading Music Systems},
  title = {Music Symbol Detection with Faster {R-CNN} Using Synthetic Annotations},
  year = {2018},
  address = {Paris, France},
  editor = {Calvo-Zaragoza, Jorge and Haji{\v{c}} jr., Jan and Pacha, Alexander},
  pages = {9--10},
  file = {:pdfs/2018 - Music Symbol Detection with Faster R CNN Using Synthetic Annotations.pdf:PDF},
  url = {https://sites.google.com/view/worms2018/proceedings}
}
@inproceedings{Choudhury2000,
  author = {Choudhury, G. Sayeed and Droetboom, M. and Tim DiLauro and Fujinaga, Ichiro and Harrington, Brian},
  booktitle = {1st International Symposium on Music Information Retrieval},
  title = {Optical Music Recognition System within a Large-Scale Digitization Project},
  year = {2000},
  file = {:pdfs/2000 - Optical Music Recognition System within a Large-Scale Digitization Project.pdf:PDF},
  url = {http://jhir.library.jhu.edu/handle/1774.2/32794}
}
@article{Choudhury2000a,
  author = {Choudhury, G. Sayeed and Requardt, Cynthia and Fujinaga, Ichiro and DiLauro, Tim and Brown, Elisabeth W. and Warner, James W. and Harrington, Brian},
  journal = {First Monday},
  title = {Digital workflow management: The Lester S. Levy digitized collection of sheet music},
  year = {2000},
  number = {6},
  volume = {5},
  abstract = {The paper describes the development of a set of workflow management tools (WMS) that will reduce the manual input necessary to manage the workflow of large-scale digitization projects. The WMS will also support the path from physical object and/or digitized material into a digital library repository by providing effective tools for perusing multimedia elements. The Lester S. Levy Collection of Sheet Music Project at the Milton S. Eisenhower Library at The Johns Hopkins University provides an ideal testbed for the development and evaluation of the WMS. Building upon previous effort to digitize the entire collection of over 29000 pieces of sheet music, optical music recognition (OMR) software will create sound files and full-text lyrics. The combination of image, text and sound files provide a comprehensive multimedia environment. The functionality of the collection will be enhanced by the incorporation of metadata, the implementation of a disk based search engine for lyrics, and the development of toolkits for searching sound files (0 Refs.) music; search engines; workflow management software},
  doi = {10.5210/fm.v5i6.756},
  file = {:pdfs/2000 - Digital Workflow Management_ the Lester S. Levy Digitized Collection of Sheet Music.pdf:PDF},
  keywords = {digital workflow management; Lester S Levy digitized collection; sheet music; workflow management tools; large-scale digitization projects; digital library repository; multimedia elements; WMS; optical music recognition; OMR software; sound files; full-text lyrics; multimedia environment; metadata; disk based search engine (Office automation); C7820 (Humanities computing); C6130M (Multimedia); C6160M; C7250N (Front end systems for online searching)}
}
@article{Choudhury2001,
  author = {Choudhury, G. Sayeed and DiLauro, Tim and Droettboom, Michael and Fujinaga, Ichiro and MacMillan, Karl},
  journal = {D-Lib Magazine},
  title = {Strike Up the Score: Deriving searchable and playable digital formats from sheet music},
  year = {2001},
  issn = {1082-9873},
  number = {2},
  volume = {7},
  doi = {10.1045/february2001-choudhury},
  file = {:pdfs/2001 - Strike up the Score - Deriving Searchable and Playable Digital Formats from Sheet Music.pdf:PDF},
  url = {http://www.dlib.org/dlib/february01/choudhury/02choudhury.html}
}
@inproceedings{Church2014,
  author = {Church, Maura and Cuthbert, Michael Scott},
  booktitle = {15th International Society for Music Information Retrieval Conference},
  title = {Improving Rhythmic Transcriptions via Probability Models Applied Post-{OMR}},
  year = {2014},
  editor = {Hsin-Min Wang and Yi-Hsuan Yang and Jin Ha Lee},
  pages = {643--648},
  file = {:pdfs/2014 - Improving Rhythmic Transcriptions Via Probability Models Applied Post OMR.pdf:PDF},
  url = {http://www.terasoft.com.tw/conf/ismir2014/proceedings/T116_357_Paper.pdf}
}
@article{Clarke1988,
  author = {Clarke, Alastair T. and Brown, B. Malcom and Thorne, M. P.},
  journal = {Microprocessing and Microprogramming},
  title = {Using a micro to automate data acquisition in music publishing},
  year = {1988},
  issn = {0165-6074},
  note = {Supercomputers: Technology and Applications},
  number = {1},
  pages = {549--553},
  volume = {24},
  abstract = {With the number of computer applications involving music information growing, and the transition from traditional music printing methods to computer typesetting that is being faced by music publishers, there is an increasing need for an efficient and accurate method of getting musical information into computers. This paper describes some of the technical problems encountered in developing a system, based upon the IBM PC and a low-cost scanning device, to automatically recognise the printed music notation on a sheet of music that is fed through the scanner.},
  doi = {10.1016/0165-6074(88)90109-3},
  url = {http://www.sciencedirect.com/science/article/pii/0165607488901093}
}
@article{Clarke1989,
  author = {Clarke, Alastair T. and Brown, B. Malcom and Thorne, M. P.},
  journal = {Microprocessing and Microprogramming},
  title = {Coping with some really rotten problems in automatic music recognition},
  year = {1989},
  issn = {0165-6074},
  note = {Fifteenth EUROMICRO Symposium on Microprocessing and Microprogramming},
  number = {1},
  pages = {547--550},
  volume = {27},
  abstract = {This paper describes some of the problems encountered, and some of the techniques that have been used and implemented, during the development of an Optical Character Recognition system for printed music. It focuses on the recognition of chords and clusters, subdivision into single “lines” of music, and translation into musical code. Whereas other, mainframe based, music recognition systems have rarely attacked these problems, our methods have given some considerable success with an IBM PC.},
  doi = {10.1016/0165-6074(89)90108-7},
  url = {http://www.sciencedirect.com/science/article/pii/0165607489901087}
}
@inproceedings{Clarke1993,
  author = {Clarke, Alastair T. and Brown, B. Malcom and Thorne, M. P.},
  booktitle = {Machine Vision Applications, Architectures, and Systems Integration},
  title = {Recognizing musical text},
  year = {1993},
  abstract = {This paper reports on some recent developments in a software product that recognizes printed music notation. There are a number of computer systems available which assist in the task of printing music; however the full potential of these systems cannot be realized until the musical text has been entered into the computer. It is this problem that we address in this paper. The software we describe, which uses computationally inexpensive methods, is designed to analyze a music score, previously read by a flat bed scanner, and to extract the musical information that it contains. The paper discusses the methods used to recognize the musical text: these involve sampling the image at strategic points and using this information to estimate the musical symbol. It then discusses some hard problems that have been encountered during the course of the research; for example the recognition of chords and note clusters. It also reports on the progress that has been made in solving these problems and concludes with a discussion of work that needs to be undertaken over the next five years in order to transform this research prototype into a commercial product.},
  doi = {10.1117/12.150288}
}
@inproceedings{Clausen2002,
  author = {Clausen, Michael and Kurth, Frank},
  booktitle = {2nd International Conference on Web Delivering of Music},
  title = {A unified approach to content-based and fault tolerant music identification},
  year = {2002},
  pages = {56--65},
  abstract = {In this paper we propose a unified approach to content-based search in different kinds of music data. Our approach is based on a general algorithmic framework for searching patterns of complex objects in large databases. In particular we describe how this approach may be used to allow for polyphonic search in polyphonic scores as well as for the identification of PCM audio material. We give an overview on the various aspects of our technology including fault tolerant search methods. Several areas of application are suggested. We give an overview on several prototypic systems we developed for those applications including the notify! and the audentify! systems.},
  doi = {10.1109/WDM.2002.1176194},
  file = {:pdfs/2002 - A Unified Approach to Content Based and Fault Tolerant Music Identification.pdf:PDF},
  keywords = {music;information retrieval;content-based retrieval;computational complexity;content based retrieval;music information retrieval;music identification;PCM audio material;score-based music;algorithmic complexities;large databases;polyphonic search;fault tolerant search;Fault tolerance;Multiple signal classification;Fault diagnosis;Databases;Prototypes;Computer science;Phase change materials;Search methods;Music information retrieval;Content based retrieval}
}
@article{Clausen2004,
  author = {Clausen, Michael and Kurth, Frank},
  journal = {IEEE Transactions on Multimedia},
  title = {A unified approach to content-based and fault-tolerant music recognition},
  year = {2004},
  issn = {1520-9210},
  number = {5},
  pages = {717--731},
  volume = {6},
  abstract = {In this paper, we propose a unified approach to fast index-based music recognition. As an important area within the field of music information retrieval (MIR), the goal of music recognition is, given a database of musical pieces and a query document, to locate all occurrences of that document within the database, up to certain possible errors. In particular, the identification of the query with regard to the database becomes possible. The approach presented in this paper is based on a general algorithmic framework for searching complex patterns of objects in large databases. We describe how this approach may be applied to two important music recognition tasks: The polyphonic (musical score-based) search in polyphonic score data and the identification of pulse-code modulation audio material from a given acoustic waveform. We give an overview on the various aspects of our technology including fault-tolerant search methods. Several areas of application are suggested. We describe several prototypic systems we have developed for those applications including the notify! and the audentify! systems for score- and waveform-based music recognition, respectively.},
  doi = {10.1109/TMM.2004.834859},
  file = {:pdfs/2004 - A Unified Approach to Content Based and Fault Tolerant Music Recognition.pdf:PDF},
  keywords = {content-based retrieval;database indexing;fault tolerant computing;query formulation;pattern recognition;audio databases;audio signal processing;music;pulse code modulation;content-based retrieval;fault-tolerant search method;index-based music recognition;music information retrieval;pattern searching;polyphonic search method;pulse-code modulation;audio material;acoustic waveform;audio identification;transposition-invariant search;Fault tolerance;Multiple signal classification;Databases;Music information retrieval;Pulse modulation;Acoustic pulses;Acoustic waves;Content based retrieval;Acoustic materials;Search methods}
}
@inproceedings{Colesnicov2019,
  author = {Colesnicov, Alexandru and Cojocaru, Svetlana and Luca, Mihaela and Malahov, Ludmila},
  booktitle = {Proceedings of the Fifth Conference of Mathematical Society of Moldova},
  title = {On Digitization of Documents with Script Presentable Content},
  year = {2019},
  abstract = {The paper is dedicated to details of the digitization of printed documents that include formalized script presentable content, in connection with the revitalization of the cultural heritage. We discuss the process and the necessary software by an example of music, as the recognition of scores is a solved task.},
  file = {:pdfs/2019 - On Digitization of Documents with Script Presentable Content.pdf:PDF},
  url = {https://ibn.idsi.md/sites/default/files/imag_file/321-324_7.pdf}
}
@inproceedings{Coueasnon1994,
  author = {Co{\"u}asnon, Bertrand and Camillerapp, Jean},
  booktitle = {International Association for Pattern Recognition Workshop on Document Analysis Systems},
  title = {Using Grammars to Segment and Recognize Music Scores},
  year = {1994},
  address = {Kaiserslautern, Germany},
  pages = {15--27},
  file = {:pdfs/1994 - Using Grammars To Segment and Recognize Music Scores.pdf:PDF},
  url = {ftp://ftp.idsa.prd.fr/local/IMADOC/couasnon/Articles/das94.ps}
}
@inproceedings{Coueasnon1995,
  author = {Co{\"u}asnon, Bertrand and Brisset, Pascal and St{\'{e}}phan,Igor},
  booktitle = {3rd International Conference on the Practical Application of Prolog},
  title = {Using Logic Programming Languages For Optical Music Recognition},
  year = {1995},
  abstract = {Optical Music Recognition is a particular form of document analysis in which there is much knowledge about document structure. Indeed there exists an important set of rules for musical notation, but current systems do not fully use them. We propose a new solution using a grammar to guide the segmentation of the graphical ob jects and their recognition. The grammar is essentially a description of the relations (relative position and size, adjacency, etc) between the graphical ob jects. Inspired by Denite Clause Grammar techniques, the grammar can be directly implemented in Prolog, a higher-order dialect of Prolog. Moreover, the translation from the grammar into Prolog code can be done automatically. Our approach is justied by the rst encouraging results obtained with a prototype for music score recognition.},
  file = {:pdfs/1995 - Using Logic Programming Languages For Optical Music Recognition.pdf:PDF},
  keywords = {Document analysis, Optical Music Recognition, DCG, Grammar Trans},
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.29.8439}
}
@inproceedings{Coueasnon1995a,
  author = {Co{\"u}asnon, Bertrand and Camillerapp, Jean},
  booktitle = {3rd International Conference on Document Analysis and Recognition},
  title = {A Way to Separate Knowledge From Program in Structured Document Analysis: Application to Optical Music Recognition},
  year = {1995},
  pages = {1092--1097},
  abstract = {Optical Music Recognition is a form of document analysis for which a priori knowledge is particularly important. Musical notation is governed by a substantial set of rules, but current systems fail to use them adequately. In complex scores, existing systems cannot overcome the well-known segmentation problems of document analysis, due mainly to the high density of music information. This paper proposes a new method of recognition which uses a grammar in order to formalize the syntactic rules and represent the context. However, where objects touch, there is a discrepancy between the way the existing knowledge (grammar) will describe an object and the way it is recognized, since touching objects have to be segmented first. Following a description of the grammar, this paper shall go on to propose the use of an operator to modify the way the grammar parses the image so that the system can deal with certain touching objects (e.g. where an accidental touches a notehead).},
  doi = {10.1109/ICDAR.1995.602099},
  file = {:pdfs/1995 - A Way to Separate Knowledge from Program in Structured Document Analysis_ Application to Optical Music Recognition.pdf:PDF},
  keywords = {music;document image processing;image segmentation;character recognition;optical music recognition;document analysis;a priori knowledge;musical notation;segmentation problems;syntactic rules;grammar;touching objects;Text analysis;Multiple signal classification;Particle beam optics;Labeling;Information analysis;Image segmentation;Image analysis;Image recognition;Joining processes;Ultraviolet sources}
}
@inproceedings{Coueasnon1995b,
  author = {Co{\"{u}}asnon, Bertrand and R{\'{e}}tif, Bernard},
  booktitle = {International Computer Music Conference},
  title = {Using a grammar for a reliable full score recognition system},
  year = {1995},
  pages = {187--194},
  abstract = {Optical Music Recognition needs to be reliable to avoid users to detect and correct errors by controlling all the recognized score. Reliability can be reach by improving the recognition quality (on segmentation problems) and by making the system able to detect itself its recognition errors. This is possible only by using as much as possible the musical knowledge. Therefore, we propose a grammar to formalize the musical knowledge on full cores with polyphonic staves. We then show how this grammar can help detection of most of errors on note duration. The presented system is in an implementation phase but is already able to deal with full scores and to point on errors.},
  file = {:pdfs/1995 - Using a Grammar for a Reliable Full Score Recognition System.pdf:PDF},
  url = {https://pdfs.semanticscholar.org/3b97/949f436f929ed11ee76358e07fa1a61d2e01.pdf}
}
@inproceedings{Coueasnon2001,
  author = {Co{\"u}asnon, Bertrand},
  booktitle = {6th International Conference on Document Analysis and Recognition},
  title = {DMOS: a generic document recognition method, application to an automatic generator of musical scores, mathematical formulae and table structures recognition systems},
  year = {2001},
  pages = {215--220},
  abstract = {Genericity in structured document recognition is a difficult challenge.
	We therefore propose a new generic document recognition method, called
	DMOS (Description and MOdification of Segmentation), that is made
	up of a new grammatical formalism, called EPF (Enhanced Position
	Formalism) and an associated parser which is able to introduce context
	in segmentation. We implement this method to obtain a generator of
	document recognition systems. This generator can automatically produce
	new recognition systems. It is only necessary to describe the document
	with an EPF grammar, which is then simply compiled. In this way,
	we have developed various recognition systems: one on musical scores,
	one on mathematical formulae and one on recursive table structures.
	We have also defined a specific application to damaged military forms
	of the 19th Century. We have been able to test the generated system
	on 5,000 of these military forms. This has permitted us to validate
	the DMOS method on a real-world application},
  doi = {10.1109/ICDAR.2001.953786},
  file = {:pdfs/2001 - DMOS - A Generic Document Recognition Method, Application to an Automatic Generator of Musical Scores, Mathematical Formulae and Table Structures Recognition Systems.pdf:PDF},
  keywords = {data structures;document image processing;grammars;history;image recognition;image segmentation;mathematics computing;military computing;music;program compilers;DMOS;EPF grammatical formalism;Enhanced Position Formalism;automatic musical score generator;compilation;damaged military forms;generic document recognition method;mathematical formulae;parser;recursive table structure recognition;segmentation context;System testing}
}
@article{Craig-McFeely2008,
  author = {Craig-McFeely, Julia},
  journal = {Digital Medievalist},
  title = {Digital Image Archive of Medieval Music: The evolution of a digital resource},
  year = {2008},
  volume = {3},
  doi = {http://doi.org/10.16995/dm.16},
  file = {:pdfs/2008 - Digital Image Archive of Medieval Music_ the Evolution of a Digital Resource.pdf:PDF}
}
@inproceedings{Crawford2018,
  author = {Crawford, Tim and Badkobeh, Golnaz and Lewis, David},
  booktitle = {19th International Society for Music Information Retrieval Conference},
  title = {Searching Page-Images of Early Music Scanned with OMR: A Scalable Solution Using Minimal Absent Words},
  year = {2018},
  address = {Paris, France},
  pages = {233--239},
  file = {:pdfs/2018 - Searching Page Images of Early Music Scanned with OMR - a Scalable Solution Using Minimal Absent Words.pdf:PDF},
  isbn = {978-2-9540351-2-3},
  url = {http://ismir2018.ircam.fr/doc/pdfs/210_Paper.pdf}
}
@inproceedings{Dalitz2005,
  author = {Dalitz, Christoph and Karsten, Thomas},
  booktitle = {6th International Conference on Music Information Retrieval},
  title = {Using the Gamera framework for building a lute tablature recognition system},
  year = {2005},
  address = {London, UK},
  pages = {478--481},
  file = {:pdfs/2005 - Using the Gamera Framework for Building a Lute Tablature Recognition System.pdf:PDF},
  url = {http://ismir2005.ismir.net/proceedings/2012.pdf}
}
@article{Dalitz2008,
  author = {Dalitz, Christoph and Droettboom, Michael and Pranzas, Bastian and Fujinaga, Ichiro},
  journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
  title = {A Comparative Study of Staff Removal Algorithms},
  year = {2008},
  issn = {0162-8828},
  number = {5},
  pages = {753--766},
  volume = {30},
  abstract = {This paper presents a quantitative comparison of different algorithms for the removal of stafflines from music images. It contains a survey of previously proposed algorithms and suggests a new skeletonization-based approach. We define three different error metrics, compare the algorithms with respect to these metrics, and measure their robustness with respect to certain image defects. Our test images are computer-generated scores on which we apply various image deformations typically found in real-world data. In addition to modern western music notation, our test set also includes historic music notation such as mensural notation and lute tablature. Our general approach and evaluation methodology is not specific to staff removal but applicable to other segmentation problems as well.},
  doi = {10.1109/tpami.2007.70749},
  file = {:pdfs/2008 - A Comparative Study of Staff Removal Algorithms.pdf:PDF},
  groups = {staff-removal},
  keywords = {image recognition;image segmentation;computer-generated scores;error metrics;historic music notation;image defects;image deformations;lute tablature;mensural notation;modern western music notation;music images;real-world data;skeletonization-based approach;staff removal algorithms;staffline removal;Music (Optical Recognition);Performance evaluation;Pixel classification;Segmentation;Algorithms;Artificial Intelligence;Automatic Data Processing;Documentation;Image Enhancement;Image Interpretation, Computer-Assisted;Information Storage and Retrieval;Music;Pattern Recognition, Automated;Reproducibility of Results;Sensitivity and Specificity;Subtraction Technique},
  publisher = {Institute of Electrical \& Electronics Engineers (IEEE)}
}
@article{Dalitz2008a,
  author = {Dalitz, Christoph and Michalakis, Georgios K. and Pranzas, Christine},
  journal = {International Journal of Document Analysis and Recognition},
  title = {Optical recognition of psaltic Byzantine chant notation},
  year = {2008},
  issn = {1433-2825},
  number = {3},
  pages = {143--158},
  volume = {11},
  abstract = {This paper describes a document recognition system for the modern neume based notation of Byzantine music. We propose algorithms for page segmentation, lyrics removal, syntactical symbol grouping and the determination of characteristic page dimensions. All algorithms are experimentally evaluated on a variety of printed books for which we also give an optimal feature set for a nearest neighbour classifier. The system is based on the Gamera framework for document image analysis. Given that we cover all aspects of the recognition process, the paper can also serve as an illustration how a recognition system for a non standard document type can be designed from scratch.},
  doi = {10.1007/s10032-008-0074-4},
  file = {:pdfs/2008 - Optical Recognition of Psaltic Byzantine Chant Notation.pdf:PDF},
  url = {https://doi.org/10.1007/s10032-008-0074-4}
}
@inproceedings{Dalitz2009,
  author = {Dalitz, Christoph and Pranzas, Christine},
  booktitle = {10th International Conference on Document Analysis and Recognition},
  title = {German Lute Tablature Recognition},
  year = {2009},
  pages = {371--375},
  abstract = {This paper describes a document recognition system for 16th century German staffless lute tablature notation. We present methods for page layout analysis, symbol recognition and symbol layout analysis and report error rates for these methods on a variety of historic prints. Page layout analysis is based on horizontal separator lines, which may interfere with other symbols. The proposed algorithm for their detection and removal is also applicable to other single staff line detection problems (like percussion notation), for which common staff line removal algorithms fail.},
  doi = {10.1109/ICDAR.2009.52},
  file = {:pdfs/2009 - German Lute Tablature Recognition.pdf:PDF},
  issn = {1520-5363},
  keywords = {document image processing;error statistics;German lute tablature recognition;document recognition system;German staffless lute tablature notation;page layout analysis;symbol recognition;symbol layout analysis;error rates;percussion notation;staff line removal algorithms;Instruments;Text analysis;Error analysis;Particle separators;Rhythm;Image recognition;Image analysis;Libraries;Natural languages;Music;optical music recognition;staff line removal}
}
@inproceedings{Damm2008,
  author = {Damm, David and Fremerey, Christian and Kurth, Frank and M\"{u}ller, Meinard and Clausen, Michael},
  booktitle = {10th International Conference on Multimodal Interfaces},
  title = {Multimodal Presentation and Browsing of Music},
  year = {2008},
  address = {Chania, Greece},
  pages = {205--208},
  publisher = {ACM},
  acmid = {1452436},
  doi = {10.1145/1452392.1452436},
  file = {:pdfs/2008 - Multimodal Presentation and Browsing of Music.pdf:PDF},
  isbn = {978-1-60558-198-9},
  keywords = {music alignment, music browsing, music information retrieval, music navigation, music synchronization},
  url = {http://doi.acm.org/10.1145/1452392.1452436}
}
@techreport{Dan1996,
  author = {Dan, Lee Sau},
  institution = {The University of Waikato, New Zealand},
  title = {Automatic Optical Music recognition},
  year = {1996},
  abstract = {In this pro ject, the topic of automatic optical music recognition was studied. It is the conversion of an optically sampled image of a musical score into a representation that can be conveniently stored in computer storage and retrieved for various purpose. It is analogous
to optical character recognition. Optical character recognition recognizes text characters in the input images and output the text in a machine-readable format. Similarly, an optical music recognition system recognizes the symbols on a musical score and output the results in a binary format. Subsequent processing on this output can provide a wide variety of applications, such as reprinting and archiving.},
  file = {:pdfs/1996 - Automatic Optical Music Recognition.pdf:PDF},
  url = {https://www.cs.waikato.ac.nz/~davidb/omr/ftp/lee_report.ps.gz}
}
@mastersthesis{Desaedeleer2006,
  author = {Desaedeleer, Arnaud F.},
  school = {University of London},
  title = {Reading Sheet Music},
  year = {2006},
  abstract = {Optical Music Recognition is the process of recognising a printed
	music score and converting it to a format that is understood by computers.
	This process involves detecting all musical elements present in the
	music score in such a way that the score can be represented digitally.
	For example, the score could be recognised and played back through
	the computer speakers. Much research has been carried out in this
	area and several approaches to performing OMR have been suggested.
	A more recent approach involves segmenting the image using a neural
	network to recognise the segmented symbols from which the score can
	be reconstructed. This project will survey the different techniques
	that have been used to perform OMR on printed music scores and an
	application by the name of OpenOMR will be developed. One of the
	aims is to create an open source project in which developers in the
	open source community will be able to contribute their ideas in order
	to enhance this application and progress the research in the OMR
	field.},
  file = {:pdfs/2006 - Reading Sheet Music - Master Thesis.pdf:PDF},
  url = {https://sourceforge.net/projects/openomr/}
}
@phdthesis{Diener1990,
  author = {Diener, Glendon Ross},
  school = {Stanford University},
  title = {Modeling music notation: A three-dimensional approach},
  year = {1990},
  address = {Palo Alto, CA},
  file = {:pdfs/1990 - Modeling Music Notation - a Three Dimensional Approach.pdf:PDF},
  url = {ftp://ftp-ccrma.stanford.edu/pub/Publications/Theses/GRDThesis.ps.Z}
}
@inproceedings{Diet2007,
  author = {Diet, J{\"u}rgen and Kurth, Frank},
  booktitle = {8th International Conference on Music Information Retrieval},
  title = {The Probado Music Repository at the Bavarian State Library},
  year = {2007},
  address = {Vienna, Austria},
  pages = {501--504},
  file = {:pdfs/2007 - The PROBADO Music Repository at the Bavarian State Library.pdf:PDF},
  url = {http://ismir2007.ismir.net/proceedings/ISMIR2007_p501_diet.pdf}
}
@article{Diet2018,
  author = {Diet, J{\"{u}}rgen},
  journal = {BIBLIOTHEK -- Forschung und Praxis},
  title = {Optical Music Recognition in der Bayerischen Staatsbibliothek},
  year = {2018},
  doi = {10.18452/18953},
  file = {:pdfs/2018 - Optical Music Recognition in der Bayerischen Staatsbibliothek.pdf:PDF},
  language = {German}
}
@inproceedings{Diet2018a,
  author = {Diet, J{\"{u}}rgen},
  booktitle = {5th International Conference on Digital Libraries for Musicology},
  title = {Innovative MIR Applications at the Bayerische Staatsbibliothek},
  year = {2018},
  address = {Paris, France},
  abstract = {This short position paper gives an insight into the digitization of music prints in the Bayerische Staatsbibliothek and describes two music information retrieval applications in the Bayerische Staatsbibliothek. One of them is a melody search application based on OMR data that has been generated with 40.000 pages of digitized music prints containing all compositions of L. van Beethoven, G. F. Händel, F. Liszt, and F. Schubert. The other one is the incipit search in the International Inventory of Musical Sources (Répertoire International des Sources Musicales, RISM).},
  file = {:pdfs/2018 - Innovative MIR Applications at the Bayerische Staatsbibliothek.pdf:PDF},
  keywords = {Optical Music Recognition; Digial Library; Digitalization},
  url = {https://dlfm.web.ox.ac.uk/sites/default/files/dlfm/documents/media/diet-innovative-mir-bsb.pdf}
}
@article{Ding2014,
  author = {Ding, Ing-Jr and Yen, Chih-Ta and Chang, Che-Wei and Lin, He-Zhong},
  journal = {Journal of Vibroengineering},
  title = {Optical music recognition of the singer using formant frequency estimation of vocal fold vibration and lip motion with interpolated GMM classifiers},
  year = {2014},
  issn = {1392-8716},
  number = {5},
  pages = {2572--2581},
  volume = {16},
  abstract = {The main work of this paper is to identify the musical genres of the
	singer by performing the optical detection of lip motion. Recently,
	optical music recognition has attracted much attention. Optical music
	recognition in this study is a type of automatic techniques in information
	engineering, which can be used to determine the musical style of
	the singer. This paper proposes a method for optical music recognition
	where acoustic formant analysis of both vocal fold vibration and
	lip motion are employed with interpolated Gaussian mixture model
	(GMM) estimation to perform musical genre classification of the singer.
	The developed approach for such classification application is called
	GMM-Formant. Since humming and voiced speech sounds cause periodic
	vibrations of the vocal folds and then the corresponding motion of
	the lip, the proposed GMM-Formant firstly operates to acquire the
	required formant information. Formant information is important acoustic
	feature data for recognition classification. The proposed GMM-Formant
	method then uses linear interpolation for combining GMM likelihood
	estimates and formant evaluation results appropriately. GMM-Formant
	will effectively adjust the estimated formant feature evaluation
	outcomes by referring to certain degree of the likelihood score derived
	from GMM calculations. The superiority and effectiveness of presented
	GMM-Formant are demonstrated by a series of experiments on musical
	genre classification of the singer.},
  affiliation = {Department of Electrical Engineering, National Formosa University, Yunlin, Taiwan},
  author_keywords = {Acoustic formant; Gaussian mixture model; Lip motion; Musical genre classification; Vocal fold vibration},
  correspondence_address1 = {Yen, C.-T.; Department of Electrical Engineering, National Formosa UniversityTaiwan},
  file = {:pdfs/2014 - Optical Music Recognition of the Singer Using Formant Frequency Estimation of Vocal Fold Vibration and Lip Motion with Interpolated GMM Classifiers.pdf:PDF},
  keywords = {Frequency estimation; Gaussian distribution; Speech; Vibration analysis, Formant frequency estimation; Gaussian Mixture Model; Information engineerings; Linear Interpolation; Lip motions; Musical genre classification; Optical music recognition; Vocal fold vibration, Classification (of information)},
  publisher = {Vibromechanika},
  url = {https://www.jvejournals.com/article/14921}
}
@article{Dinh2016,
  author = {Dinh, Cong Minh and Yang, Hyung-Jeong and Lee, Guee-Sang and Kim, Soo-Hyung},
  journal = {IEICE Transactions on Information and Systems},
  title = {Fast lyric area extraction from images of printed Korean music scores},
  year = {2016},
  issn = {0916-8532},
  number = {6},
  pages = {1576--1584},
  volume = {E99D},
  abstract = {In recent years, optical music recognition (OMR) has been extensively
	developed, particularly for use with mobile devices that require
	fast processing to recognize and play live the notes in images captured
	from sheet music. However, most techniques that have been developed
	thus far have focused on playing back instrumental music and have
	ignored the importance of lyric extraction, which is time consuming
	and affects the accuracy of the OMR tools. The text of the lyrics
	adds complexity to the page layout, particularly when lyrics touch
	or overlap musical symbols, in which case it is very difficult to
	separate them from each other. In addition, the distortion that appears
	in captured musical images makes the lyric lines curved or skewed,
	making the lyric extraction problem more complicated. This paper
	proposes a new approach in which lyrics are detected and extracted
	quickly and effectively. First, in order to resolve the distortion
	problem, the image is undistorted by a method using information of
	stave lines and bar lines. Then, through the use of a frequency count
	method and heuristic rules based on projection, the lyric areas are
	extracted, the cases where symbols touch the lyrics are resolved,
	and most of the information from the musical notation is kept even
	when the lyrics and music notes are overlapping. Our algorithm demonstrated
	a short processing time and remarkable accuracy on two test datasets
	of images of printed Korean musical scores: The first set included
	three hundred scanned musical images; the second set had two hundred
	musical images that were captured by a digital camera. © 2016 The
	Institute of Electronics, Information and Communication Engineers.},
  affiliation = {School of Electronic and Computer Science, Chonnam National University, Gwangju, South Korea},
  author_keywords = {Lyric area; Lyric detection; Lyric extraction; Optical music recognition},
  doi = {10.1587/transinf.2015EDP7296},
  file = {:pdfs/2016 - Fast Lyric Area Extraction from Images of Printed Korean Music Scores.pdf:PDF},
  keywords = {Extraction; Heuristic methods, Fast Processing; Frequency counts; Heuristic rules; Lyric area; Musical notation; Musical symbols; Optical music recognition; Short processing time, Image processing},
  publisher = {Maruzen Co., Ltd.}
}
@article{Dorfer2016,
  author = {Dorfer, Matthias and Arzt, Andreas and Widmer, Gerhard},
  journal = {Computing Research Repository},
  title = {Towards End-to-End Audio-Sheet-Music Retrieval},
  year = {2016},
  volume = {abs/1612.05070},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.uni-trier.de/rec/bib/journals/corr/DorferAW16a},
  file = {:pdfs/2016 - Towards End to End Audio Sheet Music Retrieval.pdf:PDF},
  url = {http://arxiv.org/abs/1612.05070}
}
@inproceedings{Dorfer2016a,
  author = {Dorfer, Matthias and Arzt, Andreas and Widmer, Gerhard},
  booktitle = {17th International Society for Music Information Retrieval Conference},
  title = {Towards Score Following In Sheet Music Images},
  year = {2016},
  editor = {Michael I. Mandel and Johanna Devaney and Douglas Turnbull and George Tzanetakis},
  pages = {789--795},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/ismir/DorferAW16},
  file = {:pdfs/2016 - Towards Score Following in Sheet Music Images.pdf:PDF},
  isbn = {978-0-692-75506-8},
  url = {https://wp.nyu.edu/ismir2016/wp-content/uploads/sites/2294/2016/07/027_Paper.pdf}
}
@article{Dorfer2018,
  author = {Dorfer, Matthias and Haji{\v{c}} jr., Jan and Arzt, Andreas and Frostel, Harald and Widmer, Gerhard},
  journal = {Transactions of the International Society for Music Information Retrieval},
  title = {Learning Audio--Sheet Music Correspondences for Cross-Modal Retrieval and Piece Identification},
  year = {2018},
  number = {1},
  pages = {22--33},
  volume = {1},
  doi = {10.5334/tismir.12},
  file = {:pdfs/2018 - Learning Audio Sheet Music Correspondences for Cross Modal Retrieval and Piece Identification.pdf:PDF}
}
@inproceedings{Dorfer2018a,
  author = {Dorfer, Matthias and Henkel, Florian and Widmer, Gerhard},
  booktitle = {19th International Society for Music Information Retrieval Conference},
  title = {Learning To Listen, Read And Follow: Score Following As A Reinforcement Learning Game},
  year = {2018},
  address = {Paris, France},
  pages = {784--791},
  file = {:pdfs/2018 - Learning to Listen, Read and Follow - Score Following As a Reinforcement Learning Game.pdf:PDF},
  isbn = {978-2-9540351-2-3},
  url = {http://ismir2018.ircam.fr/doc/pdfs/45_Paper.pdf}
}
@article{Dovey2004,
  author = {Dovey, Matthew J.},
  journal = {Journal of the American Society for Information Science and Technology},
  title = {Overview of the {OMRAS} Project: Online Music Retrieval and Searching},
  year = {2004},
  number = {12},
  pages = {1100--1107},
  volume = {55},
  abstract = {Until recently, most research on music information retrieval concentrated on monophonic music. Online Music Retrieval and Searching (OMRAS) is a three-year project funded under the auspices of the JISC (Joint Information Systems Committee)/NSF (National Science Foundation) International Digital Library Initiative which began in 1999 and whose remit was to investigate the issues surrounding polyphonic music information retrieval. Here we outline the work OMRAS has achieved in pattern matching, document retrieval, and audio transcription, as well as some prototype work in how to implement these techniques into library systems.},
  doi = {10.1002/asi.20063},
  file = {:pdfs/2004 - Overview of the OMRAS Project - Online Music Retrieval and Searching.pdf:PDF},
  url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/asi.20063}
}
@techreport{Droettboom2001,
  author = {Droettboom, Michael and Fujinaga, Ichiro},
  institution = {John Hopkins University},
  title = {Interpreting the semantics of music notation using an extensible and object-oriented system},
  year = {2001},
  file = {:pdfs/2001 - Interpreting the Semantics of Music Notation Using an Extensible and Object Oriented System.pdf:PDF},
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.84.7545&rep=rep1&type=pdf}
}
@inproceedings{Droettboom2002,
  author = {Droettboom, Michael and Fujinaga, Ichiro and MacMillan, Karl},
  booktitle = {Structural, Syntactic, and Statistical Pattern Recognition},
  title = {Optical Music Interpretation},
  year = {2002},
  address = {Berlin, Heidelberg},
  editor = {Caelli, Terry and Amin, Adnan and Duin, Robert P. W. and de Ridder, Dick and Kamel, Mohamed},
  pages = {378--387},
  publisher = {Springer Berlin Heidelberg},
  abstract = {A system to convert digitized sheet music into a symbolic music representation is presented. A pragmatic approach is used that conceptualizes this primarily two-dimensional structural recognition problem as a one-dimensional one. The transparency of the implementation owes a great deal to its implementation in a dynamic, object-oriented language. This system is a part of a locally developed end-to-end solution for the conversion of digitized sheet music into symbolic form.},
  doi = {10.1007/3-540-70659-3_39},
  file = {:pdfs/2002 - Optical Music Interpretation.pdf:PDF},
  isbn = {978-3-540-70659-5}
}
@inproceedings{Droettboom2002a,
  author = {Droettboom, Michael and Fujinaga, Ichiro and MacMillan, Karl and Chouhury, G. Sayeed and DiLauro, Tim and Patton, Mark and Anderson, Teal},
  booktitle = {Joint Conference on Digital Libraries},
  title = {Using the Gamera framework for the recognition of cultural heritage materials},
  year = {2002},
  address = {London, UK},
  pages = {12--17},
  comment = {https://dl.acm.org/citation.cfm?id=544223},
  file = {:pdfs/2002 - Using the Gamera Framework for the Recognition of Cultural Heritage Materials.pdf:PDF},
  url = {http://droettboom.com/papers/p74-droettboom.pdf}
}
@inproceedings{Droettboom2004,
  author = {Droettboom, Michael and Fujinaga, Ichiro},
  booktitle = {5th International Conference on Music Information Retrieval},
  title = {Symbol-level groundtruthing environment for {OMR}},
  year = {2004},
  pages = {497--500},
  file = {:pdfs/2004 - Symbol Level Groundtruthing Environment for OMR.pdf:PDF},
  groups = {evaluation},
  url = {http://ismir2004.ismir.net/proceedings/p090-page-497-paper117.pdf}
}
@inproceedings{Dutta2010,
  author = {Dutta, Anjan and Pal, Umapada and Forn{\'{e}}s, Alicia and Llados, Josep},
  booktitle = {20th International Conference on Pattern Recognition},
  title = {An Efficient Staff Removal Approach from Printed Musical Documents},
  year = {2010},
  pages = {1965--1968},
  abstract = {Staff removal is an important preprocessing step of the Optical Music Recognition (OMR). The process aims to remove the stafflines from a musical document and retain only the musical symbols, later these symbols are used effectively to identify the music information. This paper proposes a simple but robust method to remove stafflines from printed musical scores. In the proposed methodology we have considered a staffline segment as a horizontal linkage of vertical black runs with uniform height. We have used the neighbouring properties of a staffline segment to validate it as a true segment. We have considered the dataset along with the deformations described in for evaluation purpose. From experimentation we have got encouraging results.},
  doi = {10.1109/ICPR.2010.484},
  file = {:pdfs/2010 - An Efficient Staff Removal Approach from Printed Musical Documents.pdf:PDF},
  issn = {1051-4651},
  keywords = {document image processing;music;optical character recognition;efficient staff removal approach;printed musical documents;optical music recognition;musical symbols;music information;printed musical scores;staffline segment;Pixel;Error analysis;Emulation;Image segmentation;Couplings;Pattern recognition;Computer vision;OMR;musical scores;staffline;staffline segments;staffline height;staffspace height;staff removal}
}
@inproceedings{Egozy2022,
  author = {Egozy, Eran and Clester, Ian},
  booktitle = {Proceedings of the 4th International Workshop on Reading Music Systems},
  title = {Computer-Assisted Measure Detection in a Music Score-Following Application},
  year = {2022},
  address = {Online},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {33--36},
  doi = {10.48550/arXiv.2211.13285},
  file = {:pdfs/2022 - Computer Assisted Measure Detection in a Music Score Following Application.pdf:PDF},
  url = {https://sites.google.com/view/worms2022/proceedings}
}
@inproceedings{Eipert2019,
  author = {Eipert, Tim and Herrman, Felix and Wick, Christoph and Puppe, Frank and Haug, Andreas},
  booktitle = {2nd International Workshop on Reading Music Systems},
  title = {Editor Support for Digital Editions of Medieval Monophonic Music},
  year = {2019},
  address = {Delft, The Netherlands},
  editor = {Jorge Calvo-Zaragoza and Alexander Pacha},
  pages = {4--7},
  file = {:pdfs/2019 - Editor Support for Digital Editions of Medieval Monophonic Music.pdf:PDF},
  url = {https://sites.google.com/view/worms2019/proceedings}
}
@inproceedings{Elezi2018,
  author = {Elezi, Ismail and Tuggener, Lukas and Pelillo, Marcello and Stadelmann, Thilo},
  booktitle = {1st International Workshop on Reading Music Systems},
  title = {DeepScores and Deep Watershed Detection: current state and open issues},
  year = {2018},
  address = {Paris, France},
  editor = {Calvo-Zaragoza, Jorge and Haji{\v{c}} jr., Jan and Pacha, Alexander},
  pages = {13--14},
  file = {:pdfs/2018 - DeepScores and Deep Watershed Detection - Current State and Open Issues.pdf:PDF},
  url = {https://sites.google.com/view/worms2018/proceedings}
}
@mastersthesis{Elezi2020,
  author = {Elezi, Ismail},
  school = {Ca' Foscari, University of Venice},
  title = {Exploiting Contextual Information with Deep Neural Networks},
  year = {2020},
  type = {mathesis},
  file = {:pdfs/2020 - Exploiting Contextual Information with Deep Neural Networks.pdf:PDF},
  url = {https://arxiv.org/pdf/2006.11706.pdf}
}
@mastersthesis{Essmayr1994,
  author = {Essmayr, Wolfgang},
  school = {Johannes Kepler University Linz},
  title = {Optische-Musik-Erkennung ({OME}), Erkennung von Notenschrift},
  year = {1994},
  address = {Austria},
  comment = {Official bibtex-key: https://www.jku.at/forschung/forschungs-dokumentation/publikation/12148/},
  file = {:pdfs/1994 - Optische Musik Erkennung (OME), Erkennung Von Notenschrift.pdf:PDF},
  language = {German},
  url = {https://www.cs.waikato.ac.nz/~davidb/omr/ftp/94-we.ps.gz}
}
@incollection{Fahmy1993,
  author = {Fahmy, Hoda M. and Blostein, Dorothea},
  booktitle = {Advances in Structural and Syntactic Pattern Recognition},
  publisher = {World Scientific},
  title = {Graph Grammar Processing of Uncertain Data},
  year = {1993},
  pages = {373--382},
  abstract = {Abstract Graph grammars may be used to extract the information content from diagrams where there is uncertainty about symbol identity. The input to the graph grammar is derived from the output of a symbol recognizer. We propose a way in which uncertainty can be represented by a graph and a method which extracts the information content of the diagram. We consider the application of graph grammars to the recognition of diagrams such as music scores.},
  doi = {10.1142/9789812797919_0031},
  file = {:pdfs/1993 - Graph Grammar Processing of Uncertain Data.pdf:PDF}
}
@article{Fahmy1993a,
  author = {Fahmy, Hoda M. and Blostein, Dorothea},
  journal = {Machine Vision and Applications},
  title = {A graph grammar programming style for recognition of music notation},
  year = {1993},
  issn = {1432-1769},
  number = {2},
  pages = {83--99},
  volume = {6},
  abstract = {Graph grammars are a promising tool for solving picture processing problems. However, the application of graph grammars to diagram recognition has been limited to rather simple analysis of local symbol configurations. This paper introduces the Build-Weed-Incorporate programming style for graph grammars and shows its application in determining the meaning of complex diagrams, where the interaction among physically distant symbols is semantically important. Diagram recognition can be divided into two stages: symbol recognition and high-level recognition. Symbol recognition has been studied extensively in the literature. In this work we assume the existence of a symbol recognizer and use a graph grammar to assemble the diagram's information content from the symbols and their spatial relationships. The Build-Weed-Incorporate approach is demonstrated by a detailed discussion of a graph grammar for high-level recognition of music notation.},
  doi = {10.1007/BF01211933},
  url = {https://doi.org/10.1007/BF01211933}
}
@inproceedings{Fahmy1994,
  author = {Fahmy, Hoda M. and Blostein, Dorothea},
  booktitle = {International Symposium on Electronic Imaging: Science and Technology},
  title = {Graph-rewriting approach to discrete relaxation: application to music recognition},
  year = {1994},
  pages = {2181 - 2181 - 12},
  abstract = {In image analysis, low-level recognition of the primitives plays a very important role. Once the primitives of the image are recognized, depending on the application, many types of analyses can take place. It is likely that associated with each object or primitive is a set of possible interpretations, herein referred to as the label set. The low-level recognizer may associate a probability with each label in the label set. We can use the constraints of the application domain to reduce the ambiguity in the object's identity. This process is variously termed constraint satisfaction, labeling, or relaxation. In this paper, we focus on the discrete form of relaxation. Our contribution lies in the development of a graph-rewriting approach which does not assume the degree of localness is high. We apply our approach to the recognition of music notation, where non-local interactions between primitives must be used in order to reduce ambiguity in the identity of the primitives. We use graph-rewriting rules to express not only binary constraints, but also higher-order notational constraints.},
  doi = {10.1117/12.171116}
}
@article{Fahmy1998,
  author = {Fahmy, Hoda M. and Blostein, Dorothea},
  journal = {International Journal of Pattern Recognition and Artificial Intelligence},
  title = {A graph-rewriting paradigm for discrete relaxation: Application to sheet-music recognition},
  year = {1998},
  number = {6},
  pages = {763--799},
  volume = {12},
  abstract = {In image analysis, recognition of the primitives plays an important role. Subsequent analysis is used to interpret the arrangement of primitives. This subsequent analysis must make allowance for errors or ambiguities in the recognition of primitives. In this paper, we assume that the primitive recognizer produces a set of possible interpretations for each primitive. To reduce this primitive-recognition ambiguity, we use contextual information in the image, and apply constraints from the image domain. This process is variously termed constraint satisfaction, labeling or discrete relaxation. Existing methods for discrete relaxation are limited in that they assume a priori knowledge of the neighborhood model: before relaxation begins, the system is told (or can determine) which sets of primitives are related by constraints. These methods do not apply to image domains in which complex analysis is necessary to determine which primitives are related by constraints. For example, in music notation, we must recognize which notes belong to one measure, before it is possible to apply the constraint that the number of beats in the measure should match the time signature. Such constraints can be handled by our graph-rewriting paradigm for discrete relaxation: here neighborhood-model construction is interleaved with constraint-application. In applying this approach to the recognition of simple music notation, we use approximately 180 graph-rewriting rules to express notational constraints and semantic-interpretation rules far music notation. The graph rewriting rules express both binary and higher-order notational constraints. As image-interpretation proceeds, increasingly abstract levels of interpretation are assigned to (groups of) primitives. This allows application of higher-level constraints, which can be formulated only after partial interpretation of the image.},
  doi = {10.1142/S0218001498000439},
  keywords = {constraints; discrete relaxation; label set; neighborhood model; interaction model; graph rewriting; document-image analysis; diagram understanding; graphics recognition; optical music recognition Grammars; languages; systems}
}
@article{Fang2015,
  author = {Fang, Yang and Gui-fa, Teng},
  journal = {International Journal of Machine Learning and Cybernetics},
  title = {Visual music score detection with unsupervised feature learning method based on K-means},
  year = {2015},
  issn = {1868-8071},
  number = {2},
  pages = {277--287},
  volume = {6},
  abstract = {Automatic music score detection plays important role in the optical
	music recognition (OMR). In a visual image, the characteristic of
	the music scores is frequently degraded by illumination, distortion
	and other background elements. In this paper, to reduce the influences
	to OMR caused by those degradations especially the interference of
	Chinese character, an unsupervised feature learning detection method
	is proposed for improving the correctness of music score detection.
	Firstly, a detection framework was constructed. Then sub-image block
	features were extracted by simple unsupervised feature learning (UFL)
	method based on K-means and classified by SVM. Finally, music score
	detection processing was completed by connecting component searching
	algorithm based on the sub-image block label. Taking Chinese text
	as the main interferences, the detection rate was compared between
	UFL method and texture feature method based on 2D Gabor filter in
	the same framework. The experiment results show that unsupervised
	feature learning method gets less error detection rate than Gabor
	texture feature method with limited training set. © 2014, Springer-Verlag
	Berlin Heidelberg.},
  affiliation = {College of Mechanical and Electrical Engineering, Agricultural University of Hebei, Lingyusi Street, No. 289, Baoding, China; College of Mathematics and Computer Science, Hebei University, Wusi East Road, No. 180, Baoding, China},
  author_keywords = {Gabor; Music score; Texture; Unsupervised feature learning; Visual image},
  correspondence_address1 = {Gui-fa, T.; College of Mechanical and Electrical Engineering, Agricultural University of Hebei, Lingyusi Street, No. 289, China},
  doi = {10.1007/s13042-014-0260-2},
  file = {:pdfs/2015 - Visual music score detection with unsupervised feature learning method based on K-means.pdf:PDF},
  funding_details = {61375075, NSFC, National Natural Science Foundation of China},
  keywords = {Gabor filters; Image processing; Interference suppression; Learning systems; Textures, Detection framework; Gabor; Gabor texture features; Music scores; Optical music recognition; Searching algorithms; Unsupervised feature learning; Visual image, Feature extraction},
  publisher = {Springer Verlag}
}
@inproceedings{Ferrand1998,
  author = {Ferrand, Miguel and Cardoso, Am{\'i}lcar},
  booktitle = {Advances in Artificial Intelligence},
  title = {Scheduling to Reduce Uncertainty in Syntactical Music Structures},
  year = {1998},
  address = {Berlin, Heidelberg},
  editor = {de Oliveira, Fl{\'a}vio Moreira},
  pages = {249--258},
  publisher = {Springer Berlin Heidelberg},
  abstract = {In this paper, we focus on the syntactical aspects of music representation. We look at a music score as a structured layout of events with intrinsic temporal significance and we show that important basic relations between these events can be inferred from the topology of symbol objects in a music score. Within this framework, we propose a scheduling algorithm to find consistent assignments of events to voices, in the presence of uncertain information. Based on some experimental results, we show how we may use this approach to improve the accuracy of an Optical Music Recognition system.},
  doi = {10.1007/10692710_26},
  file = {:pdfs/1998 - Scheduling to Reduce Uncertainty in Syntactical Music Structures.pdf:PDF},
  isbn = {978-3-540-49523-9}
}
@inproceedings{Ferrand1999,
  author = {Ferrand, Miguel and Leite, Jo{\~a}o Alexandre and Cardoso, Amilcar},
  booktitle = {Appia-Gulp-Prode'99 joint conference on declarative programming},
  title = {Hypothetical reasoning: An application to Optical Music Recognition},
  year = {1999},
  pages = {367--381},
  file = {:pdfs/1999 - Hypothetical Reasoning - an Application to Optical Music Recognition.pdf:PDF},
  groups = {interpretation},
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.43.7672&rep=rep1&type=pdf}
}
@inproceedings{Ferrand1999a,
  author = {Ferrand, Miguel and Leite, Jo{\~a}o Alexandre and Cardoso, Amilcar},
  booktitle = {Progress in Artificial Intelligence},
  title = {Improving Optical Music Recognition by Means of Abductive Constraint Logic Programming},
  year = {1999},
  address = {Berlin, Heidelberg},
  editor = {Barahona, Pedro and Alferes, Jos{\'e} J.},
  pages = {342--356},
  publisher = {Springer Berlin Heidelberg},
  abstract = {In this paper we propose a hybrid system that bridges the gap between traditional image processing methods, used for low-level object recognition, and abductive constraint logic programming used for high-level musical interpretation. Optical Music Recognition (OMR) is the automatic recognition of a scanned page of printed music. All such systems are evaluated by their rate of successful recognition; therefore a reliable OMR program should be able to detect and eventually correct its own recognition errors. Since we are interested in dealing with polyphonic music, some additional complexity is introduced as several concurrent voices and simultaneous musical events may occur. In RIEM, the OMR system we are developing, when events are inaccurately recognized they will generate inconsistencies in the process of voice separation. Furthermore if some events are missing a consistent voice separation may not even be possible.},
  doi = {10.1007/3-540-48159-1_24},
  file = {:pdfs/1999 - Improving Optical Music Recognition by Means of Abductive Constraint Logic Programming.pdf:PDF},
  isbn = {978-3-540-48159-1}
}
@mastersthesis{Fornes2005,
  author = {Forn\'{e}s, Alicia},
  school = {Universitat Autònoma de Barcelona},
  title = {Analysis of Old Handwritten Musical Scores},
  year = {2005},
  file = {:pdfs/2005 - Analysis of Old Handwritten Musical Scores.pdf:PDF;:2005 - Analysis of Old Handwritten Musical Scores.pdf:PDF},
  supervisor = {Llad\`{o}s, Josep},
  url = {http://www.cvc.uab.es/~afornes/publi/AFornes_Master.pdf}
}
@inproceedings{Fornes2006,
  author = {Forn{\'e}s, Alicia and Llad{\'o}s, Josep and S{\'a}nchez, Gemma},
  booktitle = {Graphics Recognition. Ten Years Review and Future Perspectives},
  title = {Primitive Segmentation in Old Handwritten Music Scores},
  year = {2006},
  address = {Berlin, Heidelberg},
  editor = {Liu, Wenyin and Llad{\'o}s, Josep},
  pages = {279--290},
  publisher = {Springer Berlin Heidelberg},
  abstract = {Optical Music Recognition consists in the identification of music information from images of scores. In this paper, we propose a method for the early stages of the recognition: segmentation of staff lines and graphical primitives in handwritten scores. After introducing our work with modern musical scores (where projections and Hough Transform are effectively used), an approach to deal with ancient handwritten scores is exposed. The recognition of such these old scores is more difficult due to paper degradation and the lack of a standard in musical notation. Our method has been tested with several scores of 19th century with high performance rates.},
  doi = {10.1007/11767978_25},
  file = {:pdfs/2006 - Primitive segmentation in old handwritten music scores.pdf:PDF},
  isbn = {978-3-540-34712-5}
}
@inproceedings{Fornes2008,
  author = {Forn{\'{e}}s, Alicia and Llad{\'{o}}s, Josep and S{\'{a}}nchez, Gemma and Bunke, Horst},
  booktitle = {8th International Workshop on Document Analysis Systems},
  title = {Writer Identification in Old Handwritten Music Scores},
  year = {2008},
  address = {Nara, Japan},
  pages = {347--353},
  abstract = {The aim of writer identification is determining the writer of a piece of handwriting from a set of writers. In this paper we present a system for writer identification in old handwritten music scores. Even though an important amount of compositions contains handwritten text in the music scores, the aim of our work is to use only music notation to determine the author. The steps of the system proposed are the following. First of all, the music sheet is preprocessed and normalized for obtaining a single binarized music line, without the staff lines. Afterwards, 100 features are extracted for every music line, which are subsequently used in a k-NN classifier that compares every feature vector with prototypes stored in a database. By applying feature selection and extraction methods on the original feature set, the performance is increased. The proposed method has been tested on a database of old music scores from the 17th to 19th centuries, achieving a recognition rate of about 95%.},
  doi = {10.1109/DAS.2008.29},
  file = {:pdfs/2008 - Writer Identification in Old Handwritten Music Scores.pdf:PDF},
  keywords = {document handling;feature extraction;handwriting recognition;music;pattern classification;writer identification;old handwritten music scores;handwritten text;music notation;music sheet preprocessing;binarized music line;feature extraction;k-NN classifier;feature selection;document analysis;Feature extraction;Text analysis;Computer science;Prototypes;Spatial databases;Image recognition;Handwriting recognition;Computer vision;Mathematics;Testing;Old documents;Handwritten recognition;Writer Identification}
}
@inproceedings{Fornes2008a,
  author = {Forn{\'e}s, Alicia and Llad{\'o}s, Josep and S{\'a}nchez, Gemma},
  booktitle = {Graphics Recognition. Recent Advances and New Opportunities},
  title = {Old Handwritten Musical Symbol Classification by a Dynamic Time Warping Based Method},
  year = {2008},
  address = {Berlin, Heidelberg},
  editor = {Liu, Wenyin and Llad{\'o}s, Josep and Ogier, Jean-Marc},
  pages = {51--60},
  publisher = {Springer Berlin Heidelberg},
  abstract = {A growing interest in the document analysis field is the recognition of old handwritten documents, towards the conversion into a readable format. The difficulties when we work with old documents are increased, and other techniques are required for recognizing handwritten graphical symbols that are drawn in such these documents. In this paper we present a Dynamic Time Warping based method that outperforms the classical descriptors, being also invariant to scale, rotation, and elastic deformations typical found in handwriting musical notation.},
  doi = {10.1007/978-3-540-88188-9_6},
  file = {:pdfs/2008 - Old Handwritten Musical Symbol classification by a Dynamic Time Warping based method.pdf:PDF},
  isbn = {978-3-540-88188-9}
}
@article{Fornes2009,
  author = {Forn{\'{e}}s, Alicia and Llad{\'{o}}s, Josep and S{\'{a}}nchez, Gemma and Bunke, Horst},
  journal = {10th International Conference on Document Analysis and Recognition},
  title = {On the Use of Textural Features for Writer Identification in Old Handwritten Music Scores},
  year = {2009},
  pages = {996--1000},
  doi = {10.1109/ICDAR.2009.100},
  file = {:pdfs/2009 - On the Use of Textural Features for Writer Identification in Old Handwritten Music Scores.pdf:PDF},
  isbn = {978-1-4244-4500-4},
  keywords = {writer features},
  url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5277541}
}
@phdthesis{Fornes2009a,
  author = {Forn{\'{e}}s, Alicia},
  school = {Universitat Autònoma de Barcelona},
  title = {Writer Identification by a Combination of Graphical Features in the Framework of Old Handwritten Music Scores},
  year = {2009},
  file = {:pdfs/2009 - Writer Identification by a Combination of Graphical Features in the Framework of Old Handwritten Music Scores.pdf:PDF},
  url = {http://www.cvc.uab.es/~afornes/publi/PhDAliciaFornes.pdf}
}
@inproceedings{Fornes2011,
  author = {Forn{\'{e}}s, Alicia and Dutta, Anjan and Gordo, Albert and Llados, Josep},
  booktitle = {International Conference on Document Analysis and Recognition},
  title = {The {ICDAR} 2011 Music Scores Competition: Staff Removal and Writer Identification},
  year = {2011},
  pages = {1511--1515},
  abstract = {In the last years, there has been a growing interest in the analysis of handwritten music scores. In this sense, our goal has been to foster the interest in the analysis of handwritten music scores by the proposal of two different competitions: Staff removal and Writer Identification. Both competitions have been tested on the CVC-MUSCIMA database: a ground-truth of handwritten music score images. This paper describes the competition details, including the dataset and ground-truth, the evaluation metrics, and a short description of the participants, their methods, and the obtained results.},
  doi = {10.1109/ICDAR.2011.300},
  file = {:pdfs/2011 - The ICDAR 2011 Music Scores Competition - Staff Removal and Writer Identification.pdf:PDF},
  issn = {2379-2140},
  keywords = {handwritten character recognition;music;ICDAR 2011 music scores competition;staff removal;writer identification;CVC-MUSCIMA database;handwritten music score images;evaluation metrics;Error analysis;Handwriting recognition;Measurement;Support vector machines;Music;Databases;Educational institutions;competition;music scores;writer identification;staff removal}
}
@article{Fornes2012,
  author = {Forn{\'{e}}s, Alicia and Dutta, Anjan and Gordo, Albert and Llad{\'{o}}s, Josep},
  journal = {International Journal on Document Analysis and Recognition},
  title = {{CVC-MUSCIMA}: A Ground-truth of Handwritten Music Score Images for Writer Identification and Staff Removal},
  year = {2012},
  issn = {1433-2825},
  number = {3},
  pages = {243--251},
  volume = {15},
  abstract = {The analysis of music scores has been an active research field in the last decades. However, there are no publicly available databases of handwritten music scores for the research community. In this paper, we present the CVC-MUSCIMA database and ground truth of handwritten music score images. The dataset consists of 1,000 music sheets written by 50 different musicians. It has been especially designed for writer identification and staff removal tasks. In addition to the description of the dataset, ground truth, partitioning, and evaluation metrics, we also provide some baseline results for easing the comparison between different approaches.},
  doi = {10.1007/s10032-011-0168-2},
  file = {:pdfs/2012 - CVC-MUSCIMA - A ground truth of handwritten music score images for writer identification and staff removal.pdf:PDF},
  groups = {datasets},
  keywords = {Music scores; Handwritten documents; Writer identification; Staff removal; Performance evaluation; Graphics recognition; Ground truths},
  publisher = {Springer-Verlag}
}
@inproceedings{Fornes2013,
  author = {Forn{\'e}s, Alicia and Dutta, Anjan and Gordo, Albert and Llad{\'o}s, Josep},
  booktitle = {Graphics Recognition. New Trends and Challenges},
  title = {The 2012 Music Scores Competitions: Staff Removal and Writer Identification},
  year = {2013},
  address = {Berlin, Heidelberg},
  editor = {Kwon, Young-Bin and Ogier, Jean-Marc},
  pages = {173--186},
  publisher = {Springer Berlin Heidelberg},
  abstract = {Since there has been a growing interest in the analysis of handwritten music scores, we have tried to foster this interest by proposing in ICDAR and GREC two different competitions: Staff removal and Writer identification. Both competitions have been tested on the CVC-MUSCIMA database of handwritten music score images. In the corresponding ICDAR publication, we have described the ground-truth, the evaluation metrics, the participants' methods and results. As a result of the discussions with attendees in ICDAR and GREC concerning our music competition, we decided to propose a new experiment for an extended competition. Thus, this paper is focused on this extended competition, describing the new set of images and analyzing the new results.},
  doi = {10.1007/978-3-642-36824-0_17},
  file = {:pdfs/2013 - The 2012 Music Scores Competitions - Staff Removal and Writer Identification.pdf:PDF},
  isbn = {978-3-642-36824-0}
}
@inproceedings{Fornes2014,
  author = {Forn{\'e}s, Alicia and Kieu, Van Cuong and Visani, Muriel and Journet, Nicholas and Dutta, Anjan},
  booktitle = {Graphics Recognition. Current Trends and Challenges},
  title = {The {ICDAR/GREC} 2013 Music Scores Competition: Staff Removal},
  year = {2014},
  address = {Berlin, Heidelberg},
  editor = {Lamiroy, Bart and Ogier, Jean-Marc},
  pages = {207--220},
  publisher = {Springer Berlin Heidelberg},
  abstract = {The first competition on music scores that was organized at ICDAR and GREC in 2011 awoke the interest of researchers, who participated in both staff removal and writer identification tasks. In this second edition, we focus on the staff removal task and simulate a real case scenario concerning old and degraded music scores. For this purpose, we have generated a new set of semi-synthetic images using two degradation models that we previously introduced: local noise and 3D distortions. In this extended paper we provide an extended description of the dataset, degradation models, evaluation metrics, the participant's methods and the obtained results that could not be presented at ICDAR and GREC proceedings due to page limitations.},
  file = {:pdfs/2014 - The ICDAR_GREC 2013 Music Scores Competition - Staff Removal.pdf:PDF},
  isbn = {978-3-662-44854-0},
  url = {https://link.springer.com/chapter/10.1007/978-3-662-44854-0_16}
}
@proceedings{Fornes2018,
  title = {Graphics Recognition, Current Trends and Evolutions},
  year = {2018},
  editor = {Forn{\'{e}}s, Alicia and Lamiroy Bart},
  isbn = {978-3-030-02283-9},
  publisher = {Springer International Publishing},
  series = {Lecture Notes in Computer Science},
  volume = {11009},
  doi = {10.1007/978-3-030-02284-6},
  file = {:pdfs/2018 - Graphics Recognition, Current Trends and Evolutions.pdf:PDF}
}
@inproceedings{Fotinea2000,
  author = {Fotinea, Stavroula-Evita and Giakoupis, George and Livens, Aggelos and Bakamidis, Stylianos and Carayannis, George},
  booktitle = {RIAO '00 Content-Based Multimedia Information Access},
  title = {An Optical Notation Recognition System for Printed Music Based on Template Matching and High Level Reasoning},
  year = {2000},
  address = {Paris, France},
  pages = {1006--1014},
  publisher = {Le centre de hautes etudes internationales d'informatique documentaire},
  acmid = {2856159},
  file = {:pdfs/2000 - An Optical Notation Recognition System for Printed Music Based on Template Matching and High Level Reasoning.pdf:PDF},
  url = {http://dl.acm.org/citation.cfm?id=2856151.2856159}
}
@inproceedings{Fremerey2008,
  author = {Fremerey, Christian and M{\"{u}}ller, Meinard and Kurth, Frank and Clausen, Michael},
  booktitle = {9th International Conference on Music Information Retrieval},
  title = {Automatic Mapping of Scanned Sheet Music to Audio Recordings},
  year = {2008},
  pages = {413--418},
  abstract = {Significant digitization efforts have resulted in large multimodal music collections comprising visual (scanned sheet music) as well as acoustic material (audio recordings). In this paper, we present a novel procedure for mapping scanned pages of sheet music to a given collection of audio recordings by identifying musically corresponding audio clips. To this end, both the scanned images as well as the audio recordings are first transformed into a common feature representation using optical music recognition ({OMR}) and methods from digital signal processing, respectively. Based on this common representation, a direct comparison of the two different types of data is facilitated. This allows for a search of scan-based queries in the audio collection. We report on systematic experiments conducted on the corpus of Beethoven’s piano sonatas showing that our mapping procedure works with high precision across the two types of music data in the case that there are no severe {OMR} errors. The proposed mapping procedure is relevant in a real-world application scenario at the Bavarian State Library for automatically identifying and annotating scanned sheet music by means of already available annotated audio material.},
  file = {:pdfs/2008 - Automatic Mapping of Scanned Sheet Music to Audio Recordings.pdf:PDF},
  isbn = {978-0-615-24849-3},
  url = {http://ismir2008.ismir.net/papers/ISMIR2008_116.pdf}
}
@inproceedings{Fremerey2009,
  author = {Fremerey, Christian and Damm, David and Kurth, Frank and Clausen, Michael},
  booktitle = {International Conference on Acoustics NAG/DAGA},
  title = {Handling Scanned Sheet Music and Audio Recordings in Digital Music Libraries},
  year = {2009},
  pages = {1--2},
  file = {:pdfs/2009 - Handling Scanned Sheet Music and Audio Recordings in Digital Music Libraries.pdf:PDF},
  url = {https://www.audiolabs-erlangen.de/content/05-fau/professor/00-mueller/03-publications/2009_FremereyDaMuKuCl_ScanAudio_DAGA.pdf}
}
@inproceedings{Fuente2021,
  author = {Fuente, Carlos de la and Valero-Mas, Jose J. and Castellanos, Francisco J. and Calvo-Zaragoza, Jorge},
  booktitle = {Proceedings of the 3rd International Workshop on Reading Music Systems},
  title = {Multimodal Audio and Image Music Transcription},
  year = {2021},
  address = {Alicante, Spain},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander},
  pages = {18--22},
  file = {:pdfs/2021 - Multimodal Audio and Image Music Transcription.pdf:PDF},
  url = {https://sites.google.com/view/worms2021/proceedings}
}
@mastersthesis{Fujinaga1988,
  author = {Fujinaga, Ichiro},
  school = {McGill University},
  title = {Optical Music Recognition using Projections},
  year = {1988},
  abstract = {This research examines the feasibility of implementing an optical music score recognition system on a
microcomputer. Projection technique is the principal mcthod employed in the recognition process, assisted
by some of the structural roles governing musical notation. Musical examples, excerpted mostly from solo
repertoire for monophonic instruments and representing various publishers, are used as samples to develop
a computer program that recognizes a set of musical symbols. A final test of the system is undertaken, involving additional samples of monophohnic music which were not used in the development stage. With these samples, an average recognition rate of 70% is attained without any operator intervention. On an IMB-AT-compatible microcomputer, the total processing time including the scanning operation is about two minutes per page.},
  file = {:pdfs/1988 - Optical Music Recognition using Projections.pdf:PDF},
  url = {https://www.researchgate.net/profile/Ichiro_Fujinaga/publication/38435306_Optical_music_recognition_using_projections/links/546ca7980cf24b753c628c6e.pdf}
}
@inproceedings{Fujinaga1993,
  author = {Fujinaga, Ichiro},
  booktitle = {Enabling Technologies for High-Bandwidth Applications},
  title = {Optical music recognition system which learns},
  year = {1993},
  abstract = {This paper describes an optical music recognition system composed of a database and three interdependent processes: a recognizer, an editor, and a learner. Given a scanned image of a musical score, the recognizer locates, separates, and classifies symbols into musically meaningful categories. This classification is based on the k-nearest neighbor method using a subset of the database that contains features of symbols classified in previous recognition sessions. Output of the recognizer is corrected by a musically trained human operator using a music notation editor. The editor provides both visual and high-quality audio feedback of the output. Editorial corrections made by the operator are passed to the learner which then adds the newly acquired data to the database. The learner's main task, however, involves selecting a subset of the database and reweighing the importance of the features to improve accuracy and speed for subsequent sessions. Good preliminary results have been obtained with everything from professionally engraved scores to hand-written manuscripts.},
  doi = {10.1117/12.139262}
}
@inproceedings{Fujinaga1996,
  author = {Fujinaga, Ichiro},
  booktitle = {International Computer Music Conference},
  title = {Exemplar-based learning in adaptive optical music recognition system},
  year = {1996},
  address = {Hong Kong},
  pages = {55--56},
  file = {:pdfs/1996 - Exemplar Based Learning in Adaptive Optical Music Recognition System.pdf:PDF},
  isbn = {962-85092-1-7},
  url = {http://hdl.handle.net/2027/spo.bbp2372.1996.015}
}
@phdthesis{Fujinaga1996a,
  author = {Fujinaga, Ichiro},
  school = {McGill University},
  title = {Adaptive optical music recognition},
  year = {1996},
  file = {:pdfs/1996 - Adaptive Optical Music Recognition.pdf:PDF},
  url = {http://www.music.mcgill.ca/~ich/research/diss/FujinagaDiss.pdf}
}
@inproceedings{Fujinaga1998,
  author = {Fujinaga, Ichiro and Moore, Stephan and Sullivan, David S.},
  booktitle = {International Conference on Music Perception and Cognition},
  title = {Implementation of exemplar-based learning model for music cognition},
  year = {1998},
  address = {Seoul, South Korea},
  pages = {171--179},
  file = {:pdfs/1998 - Implementation of Exemplar Based Learning Model for Music Cognition.pdf:PDF},
  url = {https://pdfs.semanticscholar.org/8d27/309a9070c5737a1eb5fa7ef5dfc6c9484b89.pdf}
}
@misc{Fujinaga2000,
  author = {Fujinaga, Ichiro},
  howpublished = {\url{http://www.music.mcgill.ca/~ich/research/omr/omrbib.html}},
  title = {Optical Music Recognition Bibliography},
  year = {2000},
  file = {:pdfs/2000 - Optical Music Recognition Bibliography by Ichiro Fujinaga from October 2000.pdf:PDF},
  keywords = {OMR, Optical Music Recognition, Bibliography, References},
  url = {http://www.music.mcgill.ca/~ich/research/omr/omrbib.html}
}
@incollection{Fujinaga2004,
  author = {Fujinaga, Ichiro},
  booktitle = {Visual Perception of Music Notation: On-Line and Off Line Recognition},
  publisher = {IGI Global},
  title = {Staff detection and removal},
  year = {2004},
  pages = {1--39},
  doi = {10.4018/978-1-59140-298-5.ch001},
  file = {:pdfs/2004 - Staff Detection and Removal.pdf:PDF}
}
@inproceedings{Fujinaga2014,
  author = {Fujinaga, Ichiro and Hankinson, Andrew and Cumming, Julie E.},
  booktitle = {1st International Workshop on Digital Libraries for Musicology},
  title = {Introduction to {SIMSSA} (Single Interface for Music Score Searching and Analysis)},
  year = {2014},
  organization = {ACM},
  pages = {1--3},
  doi = {10.1145/2660168.2660184},
  file = {:pdfs/2014 - Introduction to SIMSSA (Single Interface for Music Score Searching and Analysis).pdf:PDF}
}
@article{Fujinaga2014a,
  author = {Fujinaga, Ichiro and Hankinson, Andrew},
  journal = {Journal of the Japanese Society for Sonic Arts},
  title = {SIMSSA: Single Interface for Music Score Searching and Analysis},
  year = {2014},
  number = {3},
  pages = {25--30},
  volume = {6},
  file = {:pdfs/2004 - SIMSSA - Single Interface for Music Score Searching and Analysis.pdf:PDF},
  url = {http://data.jssa.info/paper/2014v06n03/7.Fujinaga.pdf}
}
@incollection{Fujinaga2018,
  author = {Fujinaga, Ichiro and Hankinson, Andrew and Pugin, Laurent},
  booktitle = {Springer Handbook of Systematic Musicology},
  publisher = {Springer Berlin Heidelberg},
  title = {Automatic Score Extraction with Optical Music Recognition ({OMR})},
  year = {2018},
  address = {Berlin, Heidelberg},
  isbn = {978-3-662-55004-5},
  pages = {299--311},
  abstract = {Optical music recognition (OMR                    optical music recognition (OMR)                  ) describes the process of automatically transcribing music notation from a digital image. Although similar to optical character recognition (OCR                    optical character recognition (OCR)                  ), the process and procedures of OMR diverge due to the fundamental differences between text and music notation, such as the two-dimensional nature of the notation system and the overlay of music symbols on top of staff lines. The OMR process can be described as a sequence of steps, with techniques adapted from disciplines including image processing, machine learning, grammars, and notation encoding. The sequence and specific techniques used can differ depending on the condition of the image, the type of notation, and the desired output.},
  doi = {10.1007/978-3-662-55004-5_16},
  file = {:pdfs/2018 - Automatic Score Extraction with Optical Music Recognition (OMR).pdf:PDF}
}
@inproceedings{Fujinaga2023,
  author = {Fujinaga, Ichiro and Vigliensoni, Gabriel},
  booktitle = {Proceedings of the 5th International Workshop on Reading Music Systems},
  title = {Optical Music Recognition Workflow for Medieval Music Manuscripts},
  year = {2023},
  address = {Milan, Italy},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {4--6},
  doi = {10.48550/arXiv.2311.04091},
  file = {:pdfs/2023 - Optical Music Recognition Workflow for Medieval Music Manuscripts.pdf:PDF},
  url = {https://sites.google.com/view/worms2023/proceedings}
}
@techreport{Galea2014,
  author = {G{\^a}lea, Dan and Rotaru, Florin and Bejinariu, Silviu-Ioan and Bulea, Mihai and Murgu, Dan and Pescaru, Simona and Apopei, Vasile and Murgu, Mihaela and Rusu, Irina},
  institution = {Universitatea Tehnic{\v{a}} Gheorghe Asachi din Ia{\c{s}}i},
  title = {A review on printed music recognition system developed in institute of computer science ia{\c{s}}i},
  year = {2014},
  number = {Lxiv},
  file = {:pdfs/2014 - A Review on Printed Music Recognition System Developed in Institute of Computer Science IASI.pdf:PDF},
  keywords = {2010 mathematics subject classification, 68t10, 68u10, classification, musical information reconstruction, musical symbols recognition, staff lines detection, symbol, to classify},
  url = {http://www12.tuiasi.ro/users/103/Buletin_2014_1_49-66_4_Galea__AC%201_2014.pdf}
}
@article{Gallego2017,
  author = {Gallego, Antonio-Javier and Calvo-Zaragoza, Jorge},
  journal = {Expert Systems with Applications},
  title = {Staff-line removal with selectional auto-encoders},
  year = {2017},
  issn = {0957-4174},
  pages = {138--148},
  volume = {89},
  abstract = {Abstract Staff-line removal is an important preprocessing stage as regards most Optical Music Recognition systems. The common procedures employed to carry out this task involve image processing techniques. In contrast to these traditional methods, which are based on hand-engineered transformations, the problem can also be approached from a machine learning point of view if representative examples of the task are provided. We propose doing this through the use of a new approach involving auto-encoders, which select the appropriate features of an input feature set (Selectional Auto-Encoders). Within the context of the problem at hand, the model is trained to select those pixels of a given image that belong to a musical symbol, thus removing the lines of the staves. Our results show that the proposed technique is quite competitive and significantly outperforms the other state-of-art strategies considered, particularly when dealing with grayscale input images.},
  doi = {10.1016/j.eswa.2017.07.002},
  file = {:pdfs/2017 - Staff-line removal with selectional auto-encoders.pdf:PDF},
  keywords = {Staff-line removal},
  url = {http://www.sciencedirect.com/science/article/pii/S0957417417304712}
}
@inproceedings{Gan2005,
  author = {Gan, Ting},
  booktitle = {5th ACM/IEEE-CS Joint Conference on Digital Libraries},
  title = {M\'{u}sica Colonial: 18th Century Music Score Meets 21st Century Digitalization Technology},
  year = {2005},
  address = {Denver, USA},
  pages = {379--379},
  publisher = {ACM},
  acmid = {1065482},
  doi = {10.1145/1065385.1065482},
  file = {:pdfs/2005 - Musica Colonial - 18th Century Music Score Meets 21st Century Digitalization Technology.pdf:PDF},
  isbn = {1-58113-876-8},
  keywords = {music score transcription, music sheet digitization}
}
@inproceedings{GarridoMunoz2022,
  author = {Garrido-Munoz, Carlos and R{\'{i}}os-Vila, Antonio and Calvo-Zaragoza, Jorge},
  booktitle = {Proceedings of the 4th International Workshop on Reading Music Systems},
  title = {End-to-End Graph Prediction for Optical Music Recognition},
  year = {2022},
  address = {Online},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {25--28},
  doi = {10.48550/arXiv.2211.13285},
  file = {:pdfs/2022 - End to End Graph Prediction for Optical Music Recognition.pdf:PDF},
  url = {https://sites.google.com/view/worms2022/proceedings}
}
@inproceedings{Genfang2009,
  author = {Genfang, Chen and Wenjun, Zhang and Qiuqiu, Wang},
  booktitle = {1st International Workshop on Education Technology and Computer Science},
  title = {Pick-up the Musical Information from Digital Musical Score Based on Mathematical Morphology and Music Notation},
  year = {2009},
  pages = {1141--1144},
  abstract = {The basic rule of musical notation for image processing is analyzed, in this paper. Using the structuring elements of musical notation and the basic algorithms of mathematical morphology, a new recognizing for the musical information of digital musical score is presented, and then the musical information is transformed to MIDI file for the communication and restoration of musical score. The results of experiment show that the statistic average value of recognition rate for musical information from digital musical score is 94.4%, and can be satisfied the practical applied demand, and it is a new way for applications of digital library, musical education, musical theory analysis and so on.},
  doi = {10.1109/ETCS.2009.261},
  file = {:pdfs/2009 - Pick-up the Musical Information from Digital Musical Score Based on Mahematical Morphology and Music Notation.pdf:PDF},
  keywords = {image recognition;music;musical information;digital musical score;mathematical morphology;music notation;image processing;MIDI file;digital library;musical education;musical theory analysis;image recognition;Morphology;Music;Image processing;Information analysis;Image recognition;Computer science education;Image analysis;Image restoration;Software libraries;TV;Musical score;Image recognition;Mathematical morphology;Music notation;MIDI}
}
@article{George2003,
  author = {George, Susan E.},
  journal = {Computer Music Journal},
  title = {Online Pen-Based Recognition of Music Notation with Artificial Neural Networks},
  year = {2003},
  number = {2},
  pages = {70--79},
  volume = {27},
  doi = {10.1162/014892603322022673},
  file = {:pdfs/2003 - Online Pen Based Recognition of Music Notation with Artificial Neural Networks.pdf:PDF}
}
@book{George2004,
  author = {George, Susan E.},
  publisher = {IRM Press},
  title = {Visual Perception of Music Notation On-Line and Off-Line Recognition},
  year = {2004},
  isbn = {1931777942},
  file = {:pdfs/2004 - Visual Perception of Music Notation on Line and off Line Recognition.pdf:PDF},
  url = {https://books.google.at/books?isbn=1591402980}
}
@incollection{George2004a,
  author = {George, Susan E.},
  booktitle = {Visual Perception of Music Notation: On-Line and Off Line Recognition},
  publisher = {IRM Press},
  title = {Evaluation in the Visual Perception of Music Notation},
  year = {2004},
  address = {Hershey, PA},
  editor = {George, S.},
  pages = {304--349},
  doi = {10.4018/978-1-59140-298-5.ch010},
  file = {:pdfs/2004 - Evaluation in the Visual Perception of Music Notation.pdf:PDF}
}
@incollection{George2004b,
  author = {George, Susan E.},
  booktitle = {Visual Perception of Music Notation: On-Line and Off Line Recognition},
  publisher = {IRM Press},
  title = {Lyric Recognition and Christian Music},
  year = {2004},
  address = {Hershey, PA},
  editor = {George, S.},
  pages = {198--226},
  doi = {10.4018/978-1-59140-298-5.ch007},
  file = {:pdfs/2004 - Lyric Recognition and Christian Music.pdf:PDF}
}
@incollection{George2004c,
  author = {George, Susan E.},
  booktitle = {Visual Perception of Music Notation: On-Line and Off Line Recognition},
  publisher = {IRM Press},
  title = {Wavelets for Dealing with Super-Imposed Objects in Recognition of Music Notation},
  year = {2004},
  address = {Hershey, PA},
  editor = {George, S.},
  pages = {78--107},
  doi = {10.4018/978-1-59140-298-5.ch003},
  file = {:pdfs/2004 - Wavelets for Dealing with Super Imposed Objects in Recognition of Music Notation.pdf:PDF}
}
@incollection{George2004d,
  author = {George, Susan E.},
  booktitle = {Visual Perception of Music Notation: On-Line and Off Line Recognition},
  publisher = {IRM Press},
  title = {Pen-Based Input for On-Line Handwritten Music Notation},
  year = {2004},
  address = {Hershey, PA},
  editor = {George, S.},
  pages = {128--160},
  doi = {10.4018/978-1-59140-298-5.ch005},
  file = {:pdfs/2004 - Pen Based Input for on Line Handwritten Music Notation.pdf:PDF}
}
@inproceedings{Geraud2014,
  author = {G{\'{e}}raud, Thierry},
  booktitle = {International Conference on Image Processing},
  title = {A morphological method for music score staff removal},
  year = {2014},
  pages = {2599--2603},
  publisher = {Institute of Electrical and Electronics Engineers Inc.},
  abstract = {Removing the staff in music score images is a key to improve the recognition of music symbols and, with ancient and degraded handwritten music scores, it is not a straightforward task. In this paper we present the method that has won in 2013 the staff removal competition, organized at the International Conference on Document Analysis and Recognition (ICDAR). The main characteristics of this method is that it essentially relies on mathematical morphology filtering. So it is simple, fast, and its full source code is provided to favor reproducible research. © 2014 IEEE.},
  affiliation = {EPITA Research and Development Laboratory (LRDE), 14-16, rue Voltaire, Le Kremlin-Bicêtre, France},
  author_keywords = {Document image analysis; Filtering; Mathematical morphology; Music score},
  correspondence_address1 = {Geraud, T.; EPITA Research and Development Laboratory (LRDE), 14-16, rue Voltaire, France},
  doi = {10.1109/ICIP.2014.7025526},
  file = {:pdfs/2014 - A Morphological Method for Music Score Staff Removal.pdf:PDF},
  isbn = {9781479957514},
  keywords = {Character recognition; Filtration; Image processing; Information retrieval systems, Document analysis; Document image analysis; Mathematical morphology filtering; Music scores; Reproducible research; Source codes, Mathematical morphology}
}
@article{Gezerlis2002,
  author = {Gezerlis, Velissarios G. and Theodoridis, Sergios},
  journal = {Pattern Recognition},
  title = {Optical character recognition of the Orthodox Hellenic Byzantine Music notation},
  year = {2002},
  issn = {0031-3203},
  number = {4},
  pages = {895--914},
  volume = {35},
  abstract = {In this paper we present for the first time, the development of a new system for the off-line optical recognition of the characters used in the orthodox Hellenic Byzantine Music notation, that has been established since 1814. We describe the structure of the new system and propose algorithms for the recognition of the 71 distinct character classes, based on Wavelets, 4-projections and other structural and statistical features. Using a nearest neighbor classifier, combined with a post classification schema and a tree-structured classification philosophy, an accuracy of 99.4% was achieved, in a database of about 18,000 Byzantine character patterns that have been developed for the needs of the system.},
  doi = {https://doi.org/10.1016/S0031-3203(01)00098-X},
  file = {:pdfs/2002 - Optical Character Recognition of the Orthodox Hellenic Byzantine Music Notation.pdf:PDF},
  keywords = {Optical music recognition, Off-line character recognition, Byzantine Music, Byzantine Music notation, Wavelets, Projections, Neural networks, Contour processing, Nearest neighbor classifier, Byzantine Music database},
  url = {http://www.sciencedirect.com/science/article/pii/S003132030100098X}
}
@inproceedings{Goecke2003,
  author = {G{\"{o}}cke, Roland},
  booktitle = {IASTED International Conference on Signal Processing, Pattern Recognition, and Applications},
  title = {Building a system for writer identification on handwritten music scores},
  year = {2003},
  pages = {250--255},
  publisher = {Acta Press},
  abstract = {A significant example of the integration of musicology and computer science. The problem of writer identification process by historical musicologists is identified and possible solutions by computer technology are assessed. The system outline is unique and seems convincing including the interesting ideas such as the feature trees and consistency check. However, it lacks any concrete methods to implement the proposed system and any evaluation.},
  file = {:pdfs/2003 - Building a System for Writer Identification on Handwritten Music Scores.pdf:PDF},
  isbn = {0 88986 363 6},
  keywords = {handwriting identifica-, image processing, music scores, writer features},
  url = {http://users.cecs.anu.edu.au/~roland/Publications/Goecke_SPPRA2003.pdf}
}
@article{Gomez2017,
  author = {Gomez, Ashley Antony and Sujatha, C. N.},
  journal = {International Journal of Application or Innovation in Engineering \& Management},
  title = {Optical Music Recognition: Staffline Detection and Removal},
  year = {2017},
  comment = {Highly irrelevant paper in a paid journal},
  file = {:pdfs/2017 - Optical Music Recognition - Staffline Detection and Removal.pdf:PDF},
  journaltitle = {International Journal of Application or Innovation in Engineering \& Management}
}
@article{Gordo2013,
  author = {Gordo, Albert and Forn{\'{e}}s, Alicia and Valveny, Ernest},
  journal = {Pattern Recognition},
  title = {Writer identification in handwritten musical scores with bags of notes},
  year = {2013},
  issn = {0031-3203},
  number = {5},
  pages = {1337--1345},
  volume = {46},
  abstract = {Writer Identification is an important task for the automatic processing of documents. However, the identification of the writer in graphical documents is still challenging. In this work, we adapt the Bag of Visual Words framework to the task of writer identification in handwritten musical scores. A vanilla implementation of this method already performs comparably to the state-of-the-art. Furthermore, we analyze the effect of two improvements of the representation: a Bhattacharyya embedding, which improves the results at virtually no extra cost, and a Fisher Vector representation that very significantly improves the results at the cost of a more complex and costly representation. Experimental evaluation shows results more than 20 points above the state-of-the-art in a new, challenging dataset.},
  doi = {https://doi.org/10.1016/j.patcog.2012.10.013},
  file = {:pdfs/2013 - Writer Identification in Handwritten Musical Scores with Bags of Notes.pdf:PDF},
  keywords = {Writer identification, Handwritten musical scores, Bag of notes},
  url = {http://www.sciencedirect.com/science/article/pii/S0031320312004475}
}
@inproceedings{Gotham2018,
  author = {Gotham, Mark and Jonas, Peter and Bower, Bruno and Bosworth, William and Rootham, Daniel and VanHandel, Leigh},
  booktitle = {5th International Conference on Digital Libraries for Musicology},
  title = {Scores of Scores: An Openscore Project to Encode and Share Sheet Music},
  year = {2018},
  address = {Paris, France},
  pages = {87--95},
  publisher = {ACM},
  acmid = {3273026},
  doi = {10.1145/3273024.3273026},
  file = {:pdfs/2018 - Scores of Scores - an Openscore Project to Encode and Share Sheet Music.pdf:PDF},
  isbn = {978-1-4503-6522-2},
  keywords = {corpus study, crowdsourcing, digital music library, music information retrieval, musical scores, song},
  url = {http://doi.acm.org/10.1145/3273024.3273026}
}
@inproceedings{Goularas2019,
  author = {Goularas, Dionysis and {{\c{C}}}{\i}nar, K{\"{u}}r{\c{s}}at},
  booktitle = {2019 Ninth International Conference on Image Processing Theory, Tools and Applications (IPTA)},
  title = {Optical Music Recognition of the Hamparsum Notation},
  year = {2019},
  month = {Nov},
  pages = {1--7},
  abstract = {This paper presents a method for the recognition of music notes from the Hamparsum music notation system. This notation was widely used during the last two centuries of the Ottoman Empire and it is still in use today. The Hamparsum notation presents significant differences compared to the European music notation, in terms of symbols and structure. Moreover, the notes can consist of more than one individual symbols. The proposed recognition method comprises several steps and algorithms, including a feature extraction based on Gabor Filters, recognition of symbols using a Support Vector Machine classifier, a method for assigning recognized symbols to a candidate Hamparsum note and a final recognition system based on template matching. This work will help to popularize this unique cultural heritage by providing Hamparsum scores in a machine-readable format.},
  doi = {10.1109/IPTA.2019.8936130},
  file = {:pdfs/2019 - Optical Music Recognition of the Hamparsum Notation.pdf:PDF},
  issn = {2154-5111},
  keywords = {Optical Music Recognition;Image Processing;Feature Extraction;Machine Learning}
}
@inproceedings{Gover2019,
  author = {Gover, Matan and Fujinaga, Ichiro},
  booktitle = {6th International Conference on Digital Libraries for Musicology},
  title = {A Notation-Based Query Language for Searching in Symbolic Music},
  year = {2019},
  address = {New York, NY, USA},
  pages = {79--83},
  publisher = {Association for Computing Machinery},
  series = {DLfM ’19},
  doi = {10.1145/3358664.3358667},
  file = {:pdfs/2019 - A Notation Based Query Language for Searching in Symbolic Music.pdf:PDF},
  isbn = {9781450372398},
  keywords = {Music Encoding Initiative, Humdrum, query language, computational musicology, music searching, regular expressions, symbolic music},
  location = {The Hague, Netherlands},
  numpages = {5},
  url = {https://doi.org/10.1145/3358664.3358667}
}
@mastersthesis{Gozzi2010,
  author = {Gozzi, Gianmarco},
  school = {Politecnico di Milano},
  title = {{OMRJX}: A framework for piano scores optical music recognition},
  year = {2010},
  file = {:pdfs/2010 - OMRJX - a Framework for Piano Scores Optical Music Recognition.pdf:PDF},
  groups = {recognition},
  url = {https://www.politesi.polimi.it/bitstream/10589/12761/3/2011_03_Gozzi.pdf}
}
@inproceedings{Hajic2023,
  author = {Haji\v{c}, Jan jr. and \v{Z}abi\v{c}ka, Petr and Rycht\'{a}\v{r},Jan and Mayer, Ji\v{r}\'{i} and Dvo\v{r}\'{a}kov\'{a}, Martina and Jebav\'{y}, Filip and Vlkov\'{a}, Mark\'{e}ta and Pavel Pecina},
  booktitle = {Proceedings of the 5th International Workshop on Reading Music Systems},
  title = {The OmniOMR Project},
  year = {2023},
  address = {Milan, Italy},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {12--14},
  doi = {10.48550/arXiv.2311.04091},
  file = {:pdfs/2023 - The OmniOMR Project.pdf:PDF},
  url = {https://sites.google.com/view/worms2023/proceedings}
}
@inproceedings{Hajicjr.2016,
  author = {Haji{\v{c}} jr., Jan and Novotn\'{y}, Ji\v{r}\'{i} and Pecina, Pavel and Pokorn\'{y}, Jaroslav},
  booktitle = {17th International Society for Music Information Retrieval Conference},
  title = {Further Steps towards a Standard Testbed for Optical Music Recognition},
  year = {2016},
  address = {New York, {USA}},
  editor = {Michael Mandel and Johanna Devaney and Douglas Turnbull and George Tzanetakis},
  organization = {New York University},
  pages = {157--163},
  publisher = {New York University},
  file = {:pdfs/2016 - Further Steps Towards a Standard Testbed for Optical Music Recognition.pdf:PDF},
  isbn = {978-0-692-75506-8},
  url = {https://wp.nyu.edu/ismir2016/event/proceedings/}
}
@article{Hajicjr.2017,
  author = {Haji{\v{c}} jr., Jan and Pecina, Pavel},
  journal = {Computing Research Repository},
  title = {In Search of a Dataset for Handwritten Optical Music Recognition: Introducing {MUSCIMA++}},
  year = {2017},
  pages = {1--16},
  volume = {abs/1703.04824},
  archiveprefix = {arXiv},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.org/rec/bib/journals/corr/HajicP17},
  file = {:pdfs/2017 - In Search of a Dataset for Handwritten Optical Music Recognition - Introducing MUSCIMA++.pdf:PDF},
  keywords = {Computer Science - Computer Vision and Pattern Recognition, I.7.5},
  primaryclass = {cs.CV},
  url = {http://arxiv.org/abs/1703.04824}
}
@article{Hajicjr.2017a,
  author = {Haji{\v{c}} jr., Jan and Pecina, Pavel},
  journal = {Computing Research Repository},
  title = {Detecting Noteheads in Handwritten Scores with ConvNets and Bounding Box Regression},
  year = {2017},
  volume = {abs/1708.01806},
  archiveprefix = {arXiv},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.org/rec/bib/journals/corr/abs-1708-01806},
  file = {:pdfs/2017 - Detecting Noteheads in Handwritten Scores with ConvNets and Bounding Box Regression.pdf:PDF},
  url = {http://arxiv.org/abs/1708.01806}
}
@inproceedings{Hajicjr.2017b,
  author = {Haji{\v{c}} jr., Jan and Dorfer, Matthias},
  booktitle = {Extended abstracts for the Late-Breaking Demo Session of the 18th International Society for Music Information Retrieval Conference},
  title = {Prototyping Full-Pipeline Optical Music Recognition with {MUSCIMarker}},
  year = {2017},
  address = {Suzhou, China},
  file = {:pdfs/2017 - Prototyping Full-Pipeline Optical Music Recognition with MUSCIMARKER.pdf:PDF},
  url = {https://ismir2017.smcnus.org/lbds/Hajic2017.pdf}
}
@inproceedings{Hajicjr.2017c,
  author = {Haji{\v{c}} jr., Jan and Pecina, Pavel},
  booktitle = {14th International Conference on Document Analysis and Recognition},
  title = {Groundtruthing (Not Only) Music Notation with {MUSICMarker}: A Practical Overview},
  year = {2017},
  address = {Kyoto, Japan},
  pages = {47--48},
  abstract = {Dataset creation for graphics recognition, especially for hand-drawn inputs, is often an expensive and time-consuming undertaking. The MUSCIMarker tool used for creating the MUSCIMA++ dataset for Optical Music Recognition (OMR) led to efficient use of annotation resources, and it provides enough flexibility to be applicable to creating datasets for other graphics recognition tasks where the ground truth can be represented similarly. First, we describe the MUSCIMA++ ground truth to define the range of tasks for which using MUSCIMarker to annotate ground truth is applicable. We then describe the MUSCIMarker tool itself, discuss its strong and weak points, and share practical experience with the tool from creating the MUSCIMA++ dataset.},
  doi = {10.1109/ICDAR.2017.271},
  file = {:pdfs/2017 - Groundtruthing (Not Only) Music Notation with MUSICMarker- A Practical Overview.pdf:PDF},
  issn = {2379-2140},
  keywords = {computer graphics;music;music notation;MUSICMarker;dataset creation;MUSCIMarker tool;MUSCIMA++ dataset;Optical Music Recognition;graphics recognition tasks;Tools;Optical imaging;Optical feedback;Grammar;Integrated optics;Graphics;Text analysis;Optical Music Recognition;Dataset Creation;Annotation Tools}
}
@inproceedings{Hajicjr.2017d,
  author = {Haji{\v{c}} jr., Jan and Pecina, Pavel},
  booktitle = {14th International Conference on Document Analysis and Recognition},
  title = {The {MUSCIMA++} Dataset for Handwritten Optical Music Recognition},
  year = {2017},
  address = {Kyoto, Japan},
  pages = {39--46},
  abstract = {Optical Music Recognition (OMR) promises to make accessible the content of large amounts of musical documents, an important component of cultural heritage. However, the field does not have an adequate dataset and ground truth for benchmarking OMR systems, which has been a major obstacle to measurable progress. Furthermore, machine learning methods for OMR require training data. We design and collect MUSCIMA++, a new dataset for OMR. Ground truth in MUSCIMA++ is a notation graph, which our analysis shows to be a necessary and sufficient representation of music notation. Building on the CVC-MUSCIMA dataset for staffline removal, the MUSCIMA++ dataset v1.0 consists of 140 pages of handwritten music, with 91254 manually annotated notation symbols and 82247 explicitly marked relationships between symbol pairs. The dataset allows training and directly evaluating models for symbol classification, symbol localization, and notation graph assembly, and indirectly musical content extraction, both in isolation and jointly. Open-source tools are provided for manipulating the dataset, visualizing the data and annotating further, and the data is made available under an open license.},
  doi = {10.1109/ICDAR.2017.16},
  file = {:pdfs/2017 - The MUSCIMA++ Dataset for Handwritten Optical Music Recognition.pdf:PDF},
  issn = {2379-2140},
  keywords = {data visualisation;document image processing;handwriting recognition;handwritten character recognition;history;image classification;learning (artificial intelligence);music;optical character recognition;training data;music notation;CVC-MUSCIMA dataset;symbol pairs;symbol classification;symbol localization;notation graph assembly;handwritten optical Music Recognition;musical documents;cultural heritage;machine learning;MUSCIMA++ dataset;OMR systems;manually annotated notation symbols;musical content extraction;data visualization;image preprocessing;Music;Pipelines;Image reconstruction;Optical character recognition software;Complexity theory;Text analysis;Optical Music Recognition;Dataset;Graph Representation;Musical Symbol Detection}
}
@inproceedings{Hajicjr.2018,
  author = {Haji{\v{c}} jr., Jan and Kol\'{a}rov\'{a}, Marta and Pacha, Alexander and Calvo-Zaragoza, Jorge},
  booktitle = {5th International Conference on Digital Libraries for Musicology},
  title = {How Current Optical Music Recognition Systems Are Becoming Useful for Digital Libraries},
  year = {2018},
  address = {Paris, France},
  pages = {57--61},
  publisher = {ACM},
  abstract = {Optical Music Recognition (OMR) promises to make large collections of sheet music searchable by their musical content. It would open up novel ways of accessing the vast amount of written music that has never been recorded before. For a long time, OMR was not living up to that promise, as its performance was simply not good enough, especially on handwritten music or under non-ideal image conditions. However, OMR has recently seen a number of improvements, mainly due to the advances in machine learning. In this work, we take an OMR system based on the traditional pipeline and an end-to-end system, which represent the current state of the art, and illustrate in proof-of-concept experiments their applicability in retrieval settings. We also provide an example of a musicological study that can be replicated with OMR outputs at much lower costs. Taken together, this indicates that in some settings, current OMR can be used as a general tool for enriching digital libraries.},
  acmid = {3273034},
  doi = {10.1145/3273024.3273034},
  file = {:pdfs/2018 - How Current Optical Music Recognition Systems Are Becoming Useful for Digital Libraries.pdf:PDF},
  isbn = {978-1-4503-6522-2},
  keywords = {digital musicology, music digital libraries, music information retrieval, optical music recognition, symbolic music search},
  url = {http://doi.acm.org/10.1145/3273024.3273034}
}
@inproceedings{Hajicjr.2018a,
  author = {Haji{\v{c}} jr., Jan and Dorfer, Matthias and Widmer, Gerhard and Pecina, Pavel},
  booktitle = {19th International Society for Music Information Retrieval Conference},
  title = {Towards Full-Pipeline Handwritten {OMR} with Musical Symbol Detection by U-Nets},
  year = {2018},
  address = {Paris, France},
  pages = {225--232},
  file = {:pdfs/2018 - Towards Full Pipeline Handwritten OMR with Musical Symbol Detection by U Nets.pdf:PDF},
  isbn = {978-2-9540351-2-3},
  url = {http://ismir2018.ircam.fr/doc/pdfs/175_Paper.pdf}
}
@inproceedings{Hajicjr.2018b,
  author = {Haji{\v{c}} jr., Jan},
  booktitle = {1st International Workshop on Reading Music Systems},
  title = {A Case for Intrinsic Evaluation of Optical Music Recognition},
  year = {2018},
  address = {Paris, France},
  editor = {Calvo-Zaragoza, Jorge and Haji{\v{c}} jr., Jan and Pacha, Alexander},
  pages = {15--16},
  file = {:pdfs/2018 - A Case for Intrinsic Evaluation of Optical Music Recognition.pdf:PDF},
  url = {https://sites.google.com/view/worms2018/proceedings}
}
@phdthesis{Hajicjr.2019,
  author = {Haji{\v{c}} jr., Jan},
  school = {Charles University},
  title = {Optical Recognition of Handwritten Music Notation},
  year = {2019},
  address = {Prague},
  type = {phdthesis},
  abstract = {Optical Music Recognition (OMR) is the field of computationally reading music notation. This thesis presents, in the form of dissertation by publication, contributions to the theory, resources, and methods of OMR especially for handwritten notation. The main contributions are (1) the Music Notation Graph (MuNG) formalism for describing arbitrarily complex music notation using an oriented graph that can be unambiguously interpreted in terms of musical semantics, (2) the MUSCIMA++ dataset of musical manuscripts with MuNG as ground truth that can be used to train and evaluate OMR systems and subsystems from the image all the way to extracting the musical semantics encoded therein, and (3) a pipeline for performing OMR on musical manuscripts that relies on machine learning both for notation symbol detection and the notation assembly stage, and on properties of the inferred MuNG representation to deterministically extract the musical semantics. While the the OMR pipeline does not perform flawlessly, this is the first OMR system to perform at basic useful tasks over musical semantics extracted from handwritten music notation of arbitrary complexity.},
  file = {:pdfs/2019 - Optical Recognition of Handwritten Music Notation.pdf:PDF}
}
@article{Hakim2019,
  author = {Hakim, Dzikry Maulana and Rainarli, Ednawati},
  journal = {Techno.COM},
  title = {Convolutional Neural Network untuk Pengenalan Citra Notasi Musik},
  year = {2019},
  issn = {2356-2579},
  number = {3},
  pages = {214--226},
  volume = {18},
  doi = {10.33633/tc.v18i3.2387},
  file = {:pdfs/2019 - Convolutional Neural Network Untuk Pengenalan Citra Notasi Musik.pdf:PDF},
  language = {Indonesian},
  url = {http://publikasi.dinus.ac.id/index.php/technoc/article/view/2387}
}
@article{Han2014,
  author = {Han, Sejin and Lee, Gueesang},
  journal = {International Journal of Contents},
  title = {Optical Music Score Recognition System for Smart Mobile Devices},
  year = {2014},
  number = {4},
  pages = {63--68},
  volume = {10},
  doi = {10.5392/IJoC.2014.10.4.063},
  file = {:pdfs/2014 - Optical Music Score Recognition System for Smart Mobile Devices.pdf:PDF},
  keywords = {music ocr, music recognition, optical music score recognition, to classify}
}
@inproceedings{Hande2023,
  author = {Hande, Pranjali and Shatri, Elona and Timms, Benjamin and Fazekas, Gy{\"{o}}rgy},
  booktitle = {Proceedings of the 5th International Workshop on Reading Music Systems},
  title = {Towards Artificially Generated Handwritten Sheet Music Datasets},
  year = {2023},
  address = {Milan, Italy},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {25--30},
  doi = {10.48550/arXiv.2311.04091},
  file = {:pdfs/2023 - Towards Artificially Generated Handwritten Sheet Music Datasets.pdf:PDF},
  url = {https://sites.google.com/view/worms2023/proceedings}
}
@inproceedings{Hankinson2010,
  author = {Hankinson, Andrew and Pugin, Laurent and Fujinaga, Ichiro},
  booktitle = {11th International Society for Music Information Retrieval Conference},
  title = {An Interchange Format for Optical Music Recognition Applications},
  year = {2010},
  address = {Utrecht, The Netherlands},
  pages = {51--56},
  file = {:pdfs/2010 - An Interchange Format for Optical Music Recognition Applications.pdf:PDF},
  url = {http://ismir2010.ismir.net/proceedings/ismir2010-11.pdf}
}
@inproceedings{Hankinson2012,
  author = {Hankinson, Andrew and Burgoyne, John Ashley and Vigliensoni, Gabriel and Porter, Alastair and Thompson, Jessica and Liu, Wendy and Chiu, Remi and Fujinaga, Ichiro},
  booktitle = {13th International Society for Music Information Retrieval Conference},
  title = {Digital Document Image Retrieval Using Optical Music Recognition},
  year = {2012},
  editor = {Fabien Gouyon and Perfecto Herrera and Luis Gustavo Martins and Meinard M{\"{u}}ller},
  pages = {577--582},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/ismir/HankinsonBVPTLCF12},
  file = {:pdfs/2012 - Digital Document Image Retrieval Using Optical Music Recognition.pdf:PDF},
  url = {http://ismir2012.ismir.net/event/papers/577-ismir-2012.pdf}
}
@inproceedings{Hankinson2012a,
  author = {Hankinson, Andrew and Burgoyne, John Ashley and Vigliensoni, Gabriel and Fujinaga, Ichiro},
  booktitle = {21st International Conference on World Wide Web},
  title = {Creating a Large-scale Searchable Digital Collection from Printed Music Materials},
  year = {2012},
  address = {Lyon, France},
  pages = {903--908},
  publisher = {ACM},
  acmid = {2188221},
  doi = {10.1145/2187980.2188221},
  file = {:pdfs/2012 - Creating a Large-Scale Searchable Digital Collection from Printed Music Materials.pdf:PDF},
  isbn = {978-1-4503-1230-1},
  keywords = {music notation, music score searching, optical music recognition, web applications}
}
@inproceedings{Hankinson2012b,
  author = {Hankinson, Andrew and Fujinaga, Ichiro},
  booktitle = {Conference of the International Association of Music Libraries},
  title = {{SIMSSA}: Single Interface for Music Score Searching and Analysis},
  year = {2012},
  address = {Montréal, QC},
  url = {https://www.iaml.info/sites/default/files/pdf/20120711a_montreal_programme.pdf}
}
@misc{Hankinson2012c,
  author = {Hankinson, Andrew},
  howpublished = {\url{http://ddmal.music.mcgill.ca/research/omr/omr_bibliography}},
  title = {Optical Music Recognition Bibliography},
  year = {2012},
  keywords = {OMR, Optical Music Recognition, Bibliography, References},
  url = {http://ddmal.music.mcgill.ca/research/omr/omr_bibliography}
}
@inproceedings{Hankinson2013,
  author = {Hankinson, Andrew and Fujinaga, Ichiro},
  booktitle = {Conference of the International Association of Music Libraries},
  title = {Using optical music recognition to navigate and retrieve music documents},
  year = {2013},
  address = {Vienna, Austria},
  url = {https://www.iaml.info/sites/default/files/pdf/2013-07-25_iaml_vienna_conference_programme.pdf}
}
@phdthesis{Hankinson2014,
  author = {Hankinson, Andrew},
  school = {McGill University},
  title = {Optical music recognition infrastructure for large-scale music document analysis},
  year = {2014},
  file = {:pdfs/2014 - Optical music recognition infrastructure for largescale music document analysis.pdf:PDF},
  url = {http://digitool.library.mcgill.ca/webclient/DeliveryManager?pid=130291}
}
@inproceedings{Havelka2023,
  author = {Havelka, Jon\'{a}\v{s} and Mayer, Ji\v{r}\'{i} and Pecina, Pavel},
  booktitle = {Proceedings of the 5th International Workshop on Reading Music Systems},
  title = {Symbol Generation via Autoencoders for Handwritten Music Synthesis},
  year = {2023},
  address = {Milan, Italy},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {20--24},
  doi = {10.48550/arXiv.2311.04091},
  file = {:pdfs/2023 - Symbol Generation Via Autoencoders for Handwritten Music Synthesis.pdf:PDF},
  url = {https://sites.google.com/view/worms2023/proceedings}
}
@article{Helsen2014,
  author = {Helsen, Kate and Bain, Jennifer and Fujinaga, Ichiro and Hankinson, Andrew and Lacoste, Debra},
  journal = {Early Music},
  title = {Optical music recognition and manuscript chant sources},
  year = {2014},
  number = {4},
  pages = {555--558},
  volume = {42},
  doi = {10.1093/em/cau092}
}
@techreport{Hemmatifar2018,
  author = {Hemmatifar, Ali and Krishna, Ashish},
  institution = {Stanford University},
  title = {DeepPiano: A Deep Learning Approach to Translate Music Notation to English Alphabet},
  year = {2018},
  file = {:pdfs/2018 - DeepPiano - A Deep Learning Approach to Translate Music Notation to English Alphabet.pdf:PDF},
  school = {Stanford University},
  url = {http://cs230.stanford.edu/files_winter_2018/projects/6940264.pdf}
}
@inproceedings{Henkel2019,
  author = {Henkel, Florian and Kelz, Rainer and Widmer, Gerhard},
  booktitle = {2nd International Workshop on Reading Music Systems},
  title = {Audio-Conditioned U-Net for Position Estimation in Full Sheet Images},
  year = {2019},
  address = {Delft, The Netherlands},
  editor = {Jorge Calvo-Zaragoza and Alexander Pacha},
  pages = {8--11},
  file = {:pdfs/2019 - Audio Conditioned U Net for Position Estimation in Full Sheet Images.pdf:PDF},
  url = {https://sites.google.com/view/worms2019/proceedings}
}
@inproceedings{Henkel2020,
  author = {Henkel, Florian and Kelz, Rainer and Widmer, Gerhard},
  booktitle = {Proceedings of the 21st Int. Society for Music Information Retrieval Conf.},
  title = {Learning to Read and Follow Music in Complete Score Sheet Images},
  year = {2020},
  file = {:pdfs/2020 - Learning to Read and Follow Music in Complete Score Sheet Images.pdf:PDF},
  url = {https://program.ismir2020.net/poster_6-02.html}
}
@book{Hewlett1990,
  editor = {Hewlett, Walter B. and Selfridge-Field, Eleanor},
  publisher = {Center for Computer},
  title = {Computing in Musicology: A Directory of Research},
  year = {1990},
  volume = {6},
  chapter = {Optical recognition of musical data},
  file = {:pdfs/1990 - Computing in Musicology - a Directory of Research.pdf:PDF},
  pages = {36--45},
  url = {http://wiki.ccarh.org/images/f/f5/Computing_in_Musicology_06_00.pdf}
}
@inproceedings{Homenda1995,
  author = {Homenda, W{\l}adys{\l}aw},
  booktitle = {Symposium on OE/Aerospace Sensing and Dual Use Photonics},
  title = {Optical pattern recognition for printed music notation},
  year = {1995},
  abstract = {The paper presents problems related to automated recognition of printed music notation. Music notation recognition is a challenging problem in both fields: pattern recognition and knowledge representation. Music notation symbols, though well characterized by their features, are arranged in elaborated way in real music notation, which makes recognition task very difficult and still open for new ideas. On the other hand, the aim of the system, i.e. application of acquired printed music into further processing requires special representation of music data. Due to complexity of music nature and music notation, music representation is one of the key issue in music notation recognition and music processing. The problems of pattern recognition and knowledge representation in context or music processing are discussed in this paper. MIDISCAN, the computer system for music notation recognition and music processing, is presented.},
  doi = {10.1117/12.205779},
  url = {https://doi.org/10.1117/12.205779}
}
@article{Homenda1996,
  author = {Homenda, W{\l}adys{\l}aw},
  journal = {Control and Cybernetics},
  title = {Automatic recognition of printed music and its conversion into playable music data},
  year = {1996},
  number = {2},
  pages = {353--367},
  volume = {25},
  abstract = {The paper describes MIDISCAN-a recognition system for printed music notation. Music notation recognition is a challenging problem in both fields: pattern recognition and knowledge representation. Music notation symbols, though well characterized by their features, are arranged in an elaborate way in real music notation, which makes recognition task very difficult and still open for new ideas, as for example, fuzzy set application in skew correction and stave location. On the other hand, the aim of the system, i.e. conversion of acquired printed music into playable MIDI format requires special representation of music data. The problems of pattern recognition and knowledge representation in context of music processing are discussed in this paper (16 Refs.) music; optical character recognition},
  file = {:pdfs/1996 - Automatic Recognition of Printed Music and Its Conversion into Playable Music Data.pdf:PDF},
  keywords = {printed music recognition; playable music data; MIDISCAN; music notation recognition; music notation symbols; skew correction; stave location; knowledge representation; music processing; MIDI format engineering techniques); C5260B (Computer vision and image processing techniques)},
  url = {http://control.ibspan.waw.pl:3000/contents/export?filename=1996-2-12_homenda.pdf}
}
@incollection{Homenda2001,
  author = {Homenda, W{\l}adys{\l}aw},
  booktitle = {Granular Computing: An Emerging Paradigm},
  publisher = {Physica-Verlag HD},
  title = {Optical Music Recognition: the Case of Granular Computing},
  year = {2001},
  address = {Heidelberg},
  isbn = {978-3-7908-1823-9},
  pages = {341--366},
  abstract = {The paper deals with optical music recognition (OMR) as a process of structured data processing applied to music notation. Granularity of OMR in both its aspects: data representation and data processing is especially emphasised in the paper. OMR is a challenge in intelligent computing technologies, especially in such fields as pattern recognition and knowledge representation and processing. Music notation is a language allowing for communication in music, one of most sophisticated field of human activity, and has a high level of complexity itself. On the one hand, music notation symbols vary in size and have complex shapes; they often touch and overlap each other. This feature makes the recognition of music symbols a very difficult and complicated task. On the other hand, music notation is a two dimensional language in which importance of geometrical and logical relations between its symbols may be compared to the importance of the symbols alone. Due to complexity of music nature and music notation, music representation, necessary to store and reuse recognised information, is also the key issue in music notation recognition and music processing. Both: the data representation and the data processing used in OMR is highly structured, granular rather than numeric. OMR technology fits paradigm of granular computing},
  doi = {10.1007/978-3-7908-1823-9_15},
  url = {https://doi.org/10.1007/978-3-7908-1823-9_15}
}
@inproceedings{Homenda2004,
  author = {Homenda, W{\l}adys{\l}aw and Luckner, Marcin},
  booktitle = {International Conference on AI and Systems},
  title = {Automatic Recognition of Music Notation Using Neural Networks},
  year = {2004},
  address = {Divnormorkoye, Russia},
  file = {:pdfs/2004 - Automatic Recognition of Music Notation Using Neural Networks.pdf:PDF},
  institution = {Warsaw University of Technology},
  url = {https://www.researchgate.net/publication/275207587_Automatic_recognition_of_music_notation_using_neural_networks}
}
@inproceedings{Homenda2004a,
  author = {Homenda, W{\l}adys{\l}aw and Mossakowski, K.},
  booktitle = {EUROFUSE Workshop On Data And Knowledge Engineering},
  title = {Music Symbol Recognition: Neural Networks vs. Statistical Methods},
  year = {2004},
  address = {Warszawa, Poland},
  editor = {De Baets, B. and De Caluwe, R. and De Tre, G. and Fodor, Janos and Kaprzyk, J. and Zadrozny, S.},
  url = {http://viking.ibspan.waw.pl/eurofuse2004/}
}
@inproceedings{Homenda2005,
  author = {Homenda, W{\l}adys{\l}aw},
  booktitle = {Computer Recognition Systems},
  title = {Optical Music Recognition: the Case Study of Pattern Recognition},
  year = {2005},
  address = {Berlin, Heidelberg},
  editor = {Kurzy{\'{n}}ski, Marek and Pucha{\l}a, Edward and Wo{\'{z}}niak, Micha{\l} and {\.{z}}o{\l}nierek, Andrzej},
  pages = {835--842},
  publisher = {Springer Berlin Heidelberg},
  abstract = {The paper presents a pattern recognition study aimed on music notation recognition. The study is focused on practical aspect of optical music recognition; it presents a variety of methods applied in optical music recognition technology. The following logically separated stages of music notation recognition are distinguished: acquiring music notation structure, recognizing symbols of music notation, analyzing contextual information. The directions for OMR package development are drawn.},
  doi = {10.1007/3-540-32390-2_98},
  isbn = {978-3-540-32390-7}
}
@inproceedings{Homenda2006,
  author = {Homenda, W{\l}adys{\l}aw and Luckner, Marcin},
  booktitle = {International Joint Conference on Neural Network},
  title = {Automatic Knowledge Acquisition: Recognizing Music Notation with Methods of Centroids and Classifications Trees},
  year = {2006},
  address = {Vancouver, Canada},
  pages = {3382--3388},
  abstract = {This paper presents a pattern recognition study aimed al music symbols recognition. The study is focused on classification methods of music symbols based on decision trees and clustering method applied to classes of music symbols that face classification problems. Classification is made on the basis of extracted features. A comparison of selected classifiers was made on some classes of nutation symbols distorted by a variety of factors as image noise, printing defects, different fonts, skew and curvature of scanning, overlapped symbols.},
  doi = {10.1109/IJCNN.2006.247339},
  file = {:pdfs/2006 - Automatic Knowledge Acquisition - Recognizing Music Notation with Methods of Centroids and Classifications Trees.pdf:PDF},
  issn = {2161-4393},
  keywords = {feature extraction;knowledge acquisition;music;pattern classification;knowledge acquisition;music notation recognition;centroids;classifications trees;pattern recognition;music symbols recognition;feature extraction;classifiers;Knowledge acquisition;Multiple signal classification;Classification tree analysis;Ordinary magnetoresistance;Printing;Optical character recognition software;Text recognition;Tiles;Neural networks;Decision trees}
}
@inproceedings{Homenda2006a,
  author = {Homenda, W{\l}adys{\l}aw},
  booktitle = {International Joint Conference on Neural Network},
  title = {Automatic understanding of images: integrated syntactic and semantic analysis of music notation},
  year = {2006},
  address = {Vancouver, Canada},
  pages = {3026--3033},
  abstract = {The paper introduces an approach to image processing and recognition based on the perception of images as subjects being exchanged in the man-computer communication. The approach reveals the parallel syntactic and semantic attempts to automatic image understanding. Both attempts are reflected in the paradigms of information granulation and granular computing. The parallel syntactic and semantic processing of images allows for solving problems raised by difficulties and complexity of the detailed syntactic description of images as well as difficulties of detailed semantic analysis. The study presented in this paper is cast on the practical task of the music notation recognition.},
  doi = {10.1109/IJCNN.2006.247261},
  file = {:pdfs/2006 - Automatic Understanding of Images_ Integrated Syntactic and Semantic Analysis of Music Notation.pdf:PDF},
  issn = {2161-4393},
  keywords = {image recognition;music;semantic networks;automatic image understanding;integrated syntactic;semantic analysis;music notation;image recognition;image perception;man-computer communication;information granulation;granular computing;parallel syntactic image processing;parallel semantic image processing;Image analysis;Multiple signal classification;Natural languages;Humans;Artificial intelligence;Emulation;Machine intelligence;Hardware;Information science;Civil engineering}
}
@article{Homenda2014,
  author = {Homenda, W{\l}adys{\l}aw and Lesinski, Wojciech},
  journal = {Lecture Notes in Computer Science},
  title = {Decision trees and their families in imbalanced pattern recognition: Recognition with and without Rejection},
  year = {2014},
  issn = {0302-9743},
  pages = {219--230},
  volume = {8838},
  abstract = {Decision trees are considered to be among the best classifiers. In
	this work we use decision trees and its families to the problem of
	imbalanced data recognition. Considered are aspects of recognition
	without rejection and with rejection: it is assumed that all recognized
	elements belong to desired classes in the first case and that some
	of them are outside of such classes and are not known at classifiers
	training stage. The facets of imbalanced data and recognition with
	rejection affect different real world problems. In this paper we
	discuss results of experiment of imbalanced data recognition on the
	case study of music notation symbols. Decision trees and three methods
	of joining decision trees (simple voting, bagging and random forest)
	are studied. These methods are used for recognition without and with
	rejection. © IFIP International Federation for Information Processing
	2014.},
  affiliation = {Warsaw University of Technology, Plac Politechniki 1, Warsaw, Poland; University of Bialystok, ul. Sosnowa 64, Bialystok, Poland},
  author_keywords = {Bagging; Decision tree; Imbalanced data; Optical music recognition; Pattern recognition; Random forest},
  correspondence_address1 = {Homenda, W.; Warsaw University of Technology, Plac Politechniki 1, Poland},
  doi = {10.1007/978-3-662-45237-0_22},
  editor = {Saeed K., Snasel V., Saeed K.},
  file = {:pdfs/2014 - Decision Trees and Their Families in Imbalanced Pattern Recognition_ Recognition with and without Rejection.pdf:PDF},
  isbn = {9783662452363},
  keywords = {Industrial management; Information management; Information systems; Pattern recognition, Bagging; Imbalanced data; Music notation; Optical music recognition; Random forests; Real-world problem, Decision trees},
  publisher = {Springer Verlag}
}
@inproceedings{Hori1999,
  author = {Hori, Toyokazu and Wada, Shinichiro and Tai, Howzan and Kung, S. Y.},
  booktitle = {3rd Workshop on Multimedia Signal Processing},
  title = {Automatic music score recognition/play system based on decision based neural network},
  year = {1999},
  pages = {183--184},
  abstract = {This paper proposes an automatic music score recognition system based on a hierarchically structured decision based neural network (DBNN), which can classify patterns with nonlinear decision boundaries. Currently, this system yields around a 97% recognition rate for printed music scores.},
  doi = {10.1109/MMSP.1999.793817},
  file = {:pdfs/1999 - Automatic Music Score Recognition_play System Based on Decision Based Neural Network.pdf:PDF},
  keywords = {music;image recognition;neural nets;document image processing;image classification;automatic music score recognition system;automatic music score play system;hierarchically structured decision based neural network;pattern classification;nonlinear decision boundaries;printed music scores;Multiple signal classification;Neural networks;Length measurement;Pattern recognition;Feature extraction;Principal component analysis;Image edge detection;Density measurement;Multi-layer neural network;Convergence}
}
@inproceedings{Huang2015,
  author = {Huang, Yu-Hui and Chen, Xuanli and Beck, Serafina and Burn, David and Van Gool, Luc},
  booktitle = {16th International Society for Music Information Retrieval Conference},
  title = {Automatic Handwritten Mensural Notation Interpreter: From Manuscript to {MIDI} Performance},
  year = {2015},
  address = {M{\'{a}}laga, Spain},
  editor = {Meinard M{\"{u}}ller and Frans Wiering},
  pages = {79--85},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/ismir/HuangCBBG15},
  file = {:pdfs/2015 - Automatic Handwritten Mensural Notation Interpreter - From Manuscript to MIDI Performances.pdf:PDF},
  isbn = {978-84-606-8853-2},
  url = {http://ismir2015.uma.es/articles/191_Paper.pdf}
}
@article{Huang2019,
  author = {Huang, Zhiquing and Jia, Xiang and Guo, Yifan},
  journal = {Applied Sciences},
  title = {State-of-the-Art Model for Music Object Recognition with Deep Learning},
  year = {2019},
  issn = {2076-3417},
  number = {13},
  pages = {2645--2665},
  volume = {9},
  abstract = {Optical music recognition (OMR) is an area in music information retrieval. Music object
detection is a key part of the OMR pipeline. Notes are used to record pitch and duration and
have semantic information. Therefore, note recognition is the core and key aspect of music score
recognition. This paper proposes an end-to-end detection model based on a deep convolutional neural
network and feature fusion. This model is able to directly process the entire image and then output
the symbol categories and the pitch and duration of notes. We show a state-of-the-art recognition
model for general music symbols which can get 0.92 duration accurary and 0.96 pitch accuracy .},
  doi = {10.3390/app9132645},
  file = {:pdfs/2019 - State of the Art Model for Music Object Recognition with Deep Learning.pdf:PDF},
  keywords = {optical music recognition; deep learning; object detection; music scores},
  url = {https://www.mdpi.com/2076-3417/9/13/2645}
}
@inproceedings{Inesta2018,
  author = {I{\~{n}}esta, Jos{\'{e}} Manuel and Le{\'{o}}n, Pedro J. Ponce de and Rizo, David and Oncina, Jos{\'{e}} and Mic{\'{o}}, Luisa and Rico-Juan, Juan Ram{\'{o}}n and P{\'{e}}rez-Sancho, Carlos and Pertusa, Antonio},
  booktitle = {1st International Workshop on Reading Music Systems},
  title = {HISPAMUS: Handwritten Spanish Music Heritage Preservation by Automatic Transcription},
  year = {2018},
  address = {Paris, France},
  editor = {Calvo-Zaragoza, Jorge and Haji{\v{c}} jr., Jan and Pacha, Alexander},
  pages = {17--18},
  file = {:pdfs/2018 - HISPAMUS - Handwritten Spanish Music Heritage Preservation by Automatic Transcription.pdf:PDF},
  url = {https://sites.google.com/view/worms2018/proceedings}
}
@inproceedings{Inesta2019,
  author = {I{\~{n}}esta, Jos{\'{e}} M. and Rizo, David and Calvo-Zaragoza, Jorge},
  booktitle = {2nd International Workshop on Reading Music Systems},
  title = {{MuRET} as a software for the transcription of historical archives},
  year = {2019},
  address = {Delft, The Netherlands},
  editor = {Jorge Calvo-Zaragoza and Alexander Pacha},
  pages = {12--15},
  file = {:pdfs/2019 - MuRET As a Software for the Transcription of Historical Archives.pdf:PDF},
  url = {https://sites.google.com/view/worms2019/proceedings}
}
@misc{iSeeNotes,
  author = {{Gear Up AB}},
  howpublished = {\url{http://www.iseenotes.com}},
  title = {iSeeNotes},
  year = {2017},
  url = {http://www.iseenotes.com}
}
@incollection{Itagaki1992,
  author = {Itagaki, Takebumi and Isogai, Masayuki and Hashimoto, Shuji and Ohteru, Sadamu},
  booktitle = {Structured Document Image Analysis},
  publisher = {Springer Berlin Heidelberg},
  title = {Automatic Recognition of Several Types of Musical Notation},
  year = {1992},
  address = {Berlin, Heidelberg},
  isbn = {978-3-642-77281-8},
  pages = {466--476},
  abstract = {This paper describes recent progress towards systems for automatic recognition of several different types of musical notation, including printed sheet music, Braille music, and dance notation.},
  doi = {10.1007/978-3-642-77281-8_22},
  file = {:pdfs/1992 - Automatic Recognition of Several Types of Musical Notation.pdf:PDF},
  url = {https://doi.org/10.1007/978-3-642-77281-8_22}
}
@inproceedings{Jacquemard2022,
  author = {Jacquemard, Florent and Rodriguez-de la Nava, Lydia and Digard, Martin},
  booktitle = {Proceedings of the 4th International Workshop on Reading Music Systems},
  title = {Automated Transcription of Electronic Drumkits},
  year = {2022},
  address = {Online},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {37--41},
  doi = {10.48550/arXiv.2211.13285},
  file = {:pdfs/2022 - Automated Transcription of Electronic Drumkits.pdf:PDF},
  url = {https://sites.google.com/view/worms2022/proceedings}
}
@incollection{Jastrzebska2014,
  author = {Jastrzebska, Agnieszka and Lesinski, Wojciech},
  booktitle = {International Conference on Systems Science 2013},
  publisher = {Springer International Publishing},
  title = {Optical Music Recognition as the Case of Imbalanced Pattern Recognition: A Study of Complex Classifiers},
  year = {2014},
  address = {Cham},
  isbn = {978-3-319-01857-7},
  pages = {325--335},
  abstract = {The article is focused on a particular aspect of classification, namely
	the imbalance of recognized classes. Imbalanced data adversely affects
	the recognition ability and requires proper classifier's construction.
	The aim of presented study is to explore the capabilities of classifier
	combining methods with such raised problem. In this paper authors
	discuss results of experiment of imbalanced data recognition on the
	case study of music notation symbols. Applied classification methods
	include: simple voting method, bagging and random forest.},
  doi = {10.1007/978-3-319-01857-7_31}
}
@inproceedings{Jastrzebska2016,
  author = {Jastrzebska, Agnieszka and Lesinski, Wojciech},
  booktitle = {Knowledge, Information and Creativity Support Systems: Recent Trends, Advances and Solutions},
  title = {Optical Music Recognition as the Case of Imbalanced Pattern Recognition: A Study of Single Classifiers},
  year = {2016},
  address = {Cham},
  editor = {Skulimowski, Andrzej M.J. and Kacprzyk, Janusz},
  pages = {493--505},
  publisher = {Springer International Publishing},
  abstract = {The article is focused on a particular aspect of classification, namely the imbalance of recognized classes. The paper contains a comparative study of results of musical symbols classification using known algorithms: k-nearest neighbors, k-means, Mahalanobis minimal distance, and decision trees. Authors aim at addressing the problem of imbalanced pattern recognition. First, we theoretically analyze difficulties entailed in the classification of music notation symbols. Second, in the enclosed case study we investigate the fitness of named single classifiers on real data. Conducted experiments are based on own implementations of named algorithms with all necessary image processing tasks. Results are highly satisfying.},
  doi = {https://link.springer.com/chapter/10.1007%2F978-3-319-19090-7_37},
  file = {:pdfs/2016 - Optical Music Recognition as the Case of Imbalanced Pattern Recognition - A Study of Single Classifiers.pdf:PDF},
  isbn = {978-3-319-19090-7}
}
@mastersthesis{Jastrzebski2014,
  author = {Jastrz{\k{e}}bski, Krzysztof},
  school = {Politechnika Wroc{\l}awska},
  title = {{OMR} for sheet music digitization},
  year = {2014},
  file = {:pdfs/2014 - OMR for Sheet Music Digitization.pdf:PDF},
  url = {http://www.zsi.pwr.wroc.pl/~kopel/mgr/2014.07%20mgr%20Jastrzebski.pdf}
}
@inproceedings{Jin2012,
  author = {Jin, Rong and Raphael, Christopher},
  booktitle = {13th International Society for Music Information Retrieval Conference},
  title = {Interpreting Rhythm in Optical Music Recognition},
  year = {2012},
  address = {Porto, Portugal},
  editor = {Fabien Gouyon and Perfecto Herrera and Luis Gustavo Martins and Meinard M{\"{u}}ller},
  pages = {151--156},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/ismir/JinR12},
  file = {:pdfs/2012 - Interpreting Rhythm in Optical Music Recognition.pdf:PDF},
  url = {http://ismir2012.ismir.net/event/papers/151-ismir-2012.pdf}
}
@phdthesis{Jin2017,
  author = {Jin, Rong},
  school = {Indiana University},
  title = {Graph-Based Rhythm Interpretation in Optical Music Recognition},
  year = {2017},
  file = {:pdfs/2017 - Graph-Based Rhythm Interpretation in Optical Music Recognition - PhD Thesis.pdf:PDF},
  url = {https://search.proquest.com/openview/891a2b54a68dba0e3a03698c18dfac06/}
}
@mastersthesis{Johansen2009,
  author = {Johansen, Linn Saxrud},
  school = {University of Oslo},
  title = {Optical Music Recognition},
  year = {2009},
  file = {:pdfs/2009 - Optical Music Recognition.pdf:PDF},
  number = {April},
  pages = {1--52},
  url = {https://www.duo.uio.no/handle/10852/10832}
}
@incollection{Jones2008,
  author = {Jones, Graham and Ong, Bee and Bruno, Ivan and Ng, Kia},
  booktitle = {Interactive multimedia music technologies},
  publisher = {IGI Global},
  title = {Optical Music Imaging: Music Document Digitisation, Recognition, Evaluation, and Restoration},
  year = {2008},
  pages = {50--79},
  abstract = {This paper presents the applications and practices in the domain of music imaging for musical scores (music sheets and music manuscripts), which include music sheet digitisation, optical music recognition (OMR) and optical music restoration. With a general background of Optical Music Recognition (OMR), the paper discusses typical obstacles in this domain and reports currently available commercial OMR software. It reports hardware and software related to music imaging, discussed the SharpEye optical music recognition system and provides an evaluation of a number of OMR systems. Besides the main focus on the transformation from images of music scores to symbolic format, this paper also discusses optical music image restoration and the application of music imaging techniques for graphical preservation and potential applications for cross-media integration.},
  doi = {10.4018/978-1-59904-150-6.ch004},
  file = {:pdfs/2008 - Optical Music Imaging - Music Document Digitisation, Recognition, Evaluation, and Restoration - Sample.pdf:PDF}
}
@article{Ju2019,
  author = {Ju, Qinjie and Chalon, Ren{\'e} and Derrode, St{\'e}phane},
  journal = {Proc. ACM Hum.-Comput. Interact.},
  title = {Assisted Music Score Reading Using Fixed-Gaze Head Movement: Empirical Experiment and Design Implications},
  year = {2019},
  issn = {2573-0142},
  number = {EICS},
  pages = {3:1--3:29},
  volume = {3},
  acmid = {3300962},
  address = {New York, NY, USA},
  articleno = {3},
  doi = {10.1145/3300962},
  file = {:pdfs/2019 - Assisted Music Score Reading Using Fixed Gaze Head Movement_ Empirical Experiment and Design Implications.pdf:PDF},
  issue_date = {June 2019},
  keywords = {eye-tracking, gaze interaction, head movement, music score},
  numpages = {29},
  publisher = {ACM},
  url = {http://doi.acm.org/10.1145/3300962}
}
@article{Kassler1972,
  author = {Kassler, Michael},
  journal = {Perspectives of New Music},
  title = {Optical Character-Recognition of Printed Music : A Review of Two Dissertations. Automatic Recognition of Sheet Music by Dennis Howard Pruslin ; Computer Pattern Recognition of Standard Engraved Music Notation by David Stewart Prerau.},
  year = {1972},
  number = {1},
  pages = {250--254},
  volume = {11},
  abstract = {Stable {URL}: http://www.jstor.org/stable/832471},
  file = {:pdfs/1972 - Optical Character Recognition of Printed Music - a Review of Two Dissertations.pdf:PDF},
  url = {http://www.jstor.org/stable/832471}
}
@inproceedings{Katayose1990,
  author = {Katayose, H. and Fukuoka, T. and Takami, K. and Inokuchi, S.},
  booktitle = {10th International Conference on Pattern Recognition},
  title = {Expression extraction in virtuoso music performances},
  year = {1990},
  pages = {780--784 vol.1},
  abstract = {An approach to music interpretation by computers is discussed. A rule-based music interpretation system is being developed that generates sophisticated performance from a printed music score. The authors describe the function of learning how to play music, which is the most important process in music interpretation. The target to be learned is expression rules and grouping strategy: expression rules are used to convert dynamic marks and motives into concrete performance data, and grouping strategy is used to extract motives from sequences of notes. They are learned from a given virtuoso performance. The delicate control of attack timing and of the duration and strength of the notes is extracted by the music transcription function. The performance rules are learned by investigating how the same or similar musical primitives are played in a performance. As for the grouping strategy, the system analyzes how the player grouped music and registers dominant note sequences to extract motives.<>},
  doi = {10.1109/ICPR.1990.118216},
  file = {:pdfs/1990 - Expression Extraction in Virtuoso Music Performances.pdf:PDF},
  keywords = {electronic music;knowledge based systems;expression extraction;virtuoso music performances;rule-based music interpretation system;printed music score;music transcription function;dominant note sequences;motives;Multiple signal classification;Music;Data mining;Concrete;Synthesizers;Humans;Control engineering;Performance analysis;Registers;Manuals}
}
@article{Kato1987,
  author = {Kato, Ichiro and Ohteru, Sadamu and Shirai, Katsuhiko and Matsushima, Toshiaki and Narita, Seinosuke and Sugano, Shigeki and Kobayashi, Tetsunori and Fujisawa, Eizo},
  journal = {Robotics},
  title = {The robot musician 'wabot-2' (waseda robot-2)},
  year = {1987},
  issn = {0167-8493},
  note = {Special Issue: Sensors},
  number = {2},
  pages = {143--155},
  volume = {3},
  abstract = {The wabot-2 is an anthropomorphic robot playing keyboard instruments, developed by the study group of Waseda University's Science and Engineering Department. The wabot-2 is equipped with hands tapping softly on keys, with legs handling bass keys and expression pedal, with eyes reading a score, and with a mouth and ears to converse with humans. Based on wabot-2, wasubot has been developed by Sumitomo Electric Industries Ltd., whose artistic skill has been demonstrated in performing music at the Japanese Government Pavillion in Expo'85. The present paper summarizes the wabot-2's motion, visual and vocal subsystems as well as its supervisory system and singing voice-tracking subsystem.},
  doi = {https://doi.org/10.1016/0167-8493(87)90002-7},
  keywords = {Anthropomorphic robot, Autonomous robot, Multiple Degrees of Freedom, Dexterity, camera, High speed image processing, Speech recognition, Speech synthesis},
  url = {http://www.sciencedirect.com/science/article/pii/0167849387900027}
}
@incollection{Kato1992,
  author = {Kato, Hirokazu and Inokuchi, Seiji},
  booktitle = {Structured Document Image Analysis},
  publisher = {Springer Berlin Heidelberg},
  title = {A Recognition System for Printed Piano Music Using Musical Knowledge and Constraints},
  year = {1992},
  address = {Berlin, Heidelberg},
  isbn = {978-3-642-77281-8},
  pages = {435--455},
  abstract = {We describe a recognition system for printed piano music, which presents challenging problems in both image pattern matching and semantic analysis. In music notation, the shape of symbols is simple, but confusing connections and overlaps among symbols occur. In order to deal with these difficulties, proper knowledge is required, so our system adopts a top-down approach based on bar-unit recognition to use musical knowledge and constraints effectively. Recognition results, described with a symbolic playable data format, exceed 90{\%} correct on beginner's piano music.},
  doi = {10.1007/978-3-642-77281-8_20},
  file = {:pdfs/1992 - A Recognition System for Printed Piano Music Using Musical Knowledge and Constraints.pdf:PDF},
  url = {https://doi.org/10.1007/978-3-642-77281-8_20}
}
@inproceedings{Kim1987,
  author = {Kim, W. J. and Chung, M. J. and Bien, Z.},
  booktitle = {TENCON 87- Computers and Communications Technology Toward 2000},
  title = {Recognition system for a printed music score},
  year = {1987},
  pages = {573--577},
  url = {http://www.dbpia.co.kr/Journal/ArticleDetail/NODE00396371}
}
@article{Kiriella2014,
  author = {Kiriella, Dawpadee B. and Kumari, Shyama C. and Ranasinghe, Kavindu C. and Jayaratne, Lakshman},
  journal = {GSTF Journal on Computing},
  title = {Music Training Interface for Visually Impaired through a Novel Approach to Optical Music Recognition},
  year = {2014},
  issn = {2010-2283},
  number = {4},
  pages = {45},
  volume = {3},
  abstract = {Some inherited barriers which limits the human abilities can be surprisingly
	win through technology. This research focuses on defining a more
	reliable and a controllable interface for visually impaired people
	to read and study eastern music notations which are widely available
	in printed format. One of another concept behind was that differently-abled
	people should be assisted in a way which they can proceed interested
	tasks in an independent way. The research provide means to continue
	on researching the validity of using a controllable auditory interface
	instead using Braille music scripts converted with the help of 3rd
	parties. The research further summarizes the requirements aroused
	by the relevant users, design considerations, evaluation results
	on user feedbacks of proposed interface.},
  doi = {10.7603/s40601-013-0045-6},
  file = {:pdfs/2014 - Music Training Interface for Visually Impaired through a Novel Approach to Optical Music Recognition.pdf:PDF}
}
@inproceedings{Kletz2021,
  author = {Kletz, Marc and Pacha, Alexander},
  booktitle = {Proceedings of the 3rd International Workshop on Reading Music Systems},
  title = {Detecting Staves and Measures in Music Scores with Deep Learning},
  year = {2021},
  address = {Alicante, Spain},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander},
  pages = {8--12},
  file = {:pdfs/2021 - Detecting Staves and Measures in Music Scores with Deep Learning.pdf:PDF},
  url = {https://sites.google.com/view/worms2021/proceedings}
}
@inproceedings{Knopke2007,
  author = {Knopke, Ian and Byrd, Donald},
  booktitle = {8th International Conference on Music Information Retrieval},
  title = {Towards Musicdiff : A Foundation for Improved Optical Music Recognition Using Multiple Recognizers},
  year = {2007},
  address = {Vienna, Austria},
  pages = {123--126},
  abstract = {This paper presents work towards a “musicdiff” program for comparing files representing different versions of the same piece, primarily in the context of comparing versions produced by different optical music recognition ({OMR}) programs. Previous work by the current authors and oth- ers strongly suggests that using multiple recognizers will make it possible to improve {OMR} accuracy substantially. The basicmethodology requires several stages: documents must be scanned and submitted to several{OMR} programs, programswhose strengths andweaknesses have previously been evaluated in detail. We discuss techniques we have implemented for normalization, alignment and rudimen- tary error correction. We also describe a visualization tool for comparingmultiple versions on ameasure-by-measure basis.},
  file = {:pdfs/2007 - Towards Musicdiff - a Foundation for Improved Optical Music Recognition Using Multiple Recognizers.pdf:PDF},
  isbn = {978-3-85403-218},
  keywords = {evaluation},
  url = {http://homes.sice.indiana.edu/donbyrd/Papers/ismir_2007_omr.pdf}
}
@inproceedings{Kodirov2014,
  author = {Kodirov, Elyor and Han, Sejin and Lee, Guee-Sang and Kim, YoungChul},
  booktitle = {8th International Conference on Ubiquitous Information Management and Communication},
  title = {Music with Harmony: Chord Separation and Recognition in Printed Music Score Images},
  year = {2014},
  address = {Siem Reap, Cambodia},
  pages = {1--8},
  publisher = {ACM},
  abstract = {Optical music recognition systems are in the general interest recently.
	These systems achieve accurate symbol recognition at some level.
	However, chords are not considered in these systems yet they play
	a role in music. Therefore, we aimed to develop an algorithm that
	can deal with separation and recognition of chords in music score
	images. Separation is necessary because the chords can be touched,
	overlapped or/and broken due to noise and other reasons. By considering
	these problems, we propose top-down based separation using domain
	information and characteristics of the chords. To handle recognition,
	we propose a modified zoning method with k-nearest neighbor classifier.
	Also, we analyzed several classifiers with different features to
	see which method is reliable for the chord recognition. Since this
	topic is not considered with special focus before, there is not a
	standard benchmark to evaluate performance of the algorithm. Thus,
	we introduce a new dataset, namely OMR-ChSR6306, which includes a
	wide range of chords such as single chords, touched chords, and overlapped
	chords. Experiments on the proposed dataset demonstrate that our
	algorithm can separate and recognize the chords, with 100% separation
	and 98.98% recognition accuracy respectively.},
  acmid = {2558042},
  doi = {10.1145/2557977.2558042},
  file = {:pdfs/2014 - Music With Harmony - Chord Separation and Recognition in Printed Music Score Images.pdf:PDF},
  isbn = {978-1-4503-2644-5},
  keywords = {chord, k-nearest neighbor, music score images, recognition, separation, zoning}
}
@inproceedings{Kolakowska2008,
  author = {Ko{\l}akowska, Agata},
  booktitle = {1st International Conference on Information Technology},
  title = {Applying decision trees to the recognition of musical symbols},
  year = {2008},
  pages = {1--4},
  abstract = {The paper presents an experimental study on the recognition of printed musical scores. The first part of the study focuses on data preparation. Bitmaps containing musical symbols are converted to feature vectors using various methods. The vectors created in such a way are used to train classifiers which are the essential part of the study. Several decision tree classifiers are applied to this recognition task. These classifiers are created using different decision tree induction methods. The algorithms incorporate different criteria to select attributes in the nodes of the trees. Moreover, some of them apply stopping criteria, whereas the others perform tree pruning. The classification accuracy of the decision trees is estimated on data taken from musical scores. Eventually the usefulness of decision trees in the recognition of printed musical symbols is evaluated.},
  doi = {10.1109/INFTECH.2008.4621624},
  file = {:pdfs/2008 - Applying Decision Trees to the Recognition of Musical Symbols.pdf:PDF},
  keywords = {decision trees;document image processing;image classification;music;decision trees;musical symbol recognition;printed musical scores;data preparation;feature vectors;decision tree classifiers;task recognition;decision tree induction methods;tree pruning;printed musical symbols;Classification algorithms;Decision trees;Accuracy;Classification tree analysis;Machine learning algorithms;Software algorithms;Training}
}
@misc{KompApp,
  author = {Ragan, Gene},
  howpublished = {\url{http://kompapp.com}},
  title = {KompApp},
  year = {2017},
  url = {http://kompapp.com}
}
@inproceedings{Konwer2018,
  author = {Konwer, Aishik and Bhunia, Ayan Kumar and Bhowmick, Abir and Bhunia, Ankan Kumar and Banerjee, Prithaj and Roy, Partha Pratim and Pal, Umapada},
  booktitle = {2018 24th International Conference on Pattern Recognition (ICPR)},
  title = {Staff line Removal using Generative Adversarial Networks},
  year = {2018},
  month = {Aug},
  pages = {1103--1108},
  abstract = {Staff line removal is a crucial pre-processing step in Optical Music Recognition. In this paper we propose a novel approach for staff line removal, based on Generative Adversarial Networks. We convert staff line images into patches and feed them into a U-Net, used as Generator. The Generator intends to produce staff-less images at the output. Then the Discriminator does binary classification and differentiates between the generated fake staff-less image and real ground truth staff less image. For training, we use a Loss function which is a weighted combination of L2 loss and Adversarial loss. L2 loss minimizes the difference between real and fake staff-less image. Adversarial loss helps to retrieve more high quality textures in generated images. Thus our architecture supports solutions which are closer to ground truth and it reflects in our results. For evaluation we consider the ICDAR/GREC 2013 staff removal database. Our method achieves superior performance in comparison to other conventional approaches on the same dataset.},
  doi = {10.1109/ICPR.2018.8546105},
  file = {:pdfs/2018 - Staff Line Removal Using Generative Adversarial Networks.pdf:PDF},
  issn = {1051-4651},
  keywords = {document image processing;image recognition;music;Adversarial loss;staff line removal;ICDAR-GREC 2013 staff removal database;ground truth staff;generated fake staff-less;staff-less images;staff line images;crucial pre-processing step;generative adversarial networks;Generators;Gallium nitride;Generative adversarial networks;Training;Task analysis;Convolutional neural networks;Image generation;Adversarial Loss;Staff-line Removal;Generative Adversarial Network;U-Net}
}
@article{Kopec1996,
  author = {Kopec, Gary E. and Chou, Philip A. and Maltz, David A.},
  journal = {Journal of Electronic Imaging},
  title = {Markov source model for printed music decoding},
  year = {1996},
  volume = {5},
  abstract = {A Markov source model is described for a simple subset of printed music notation that was developed as an extended example of the document image decoding (DID) approach to document image analysis. The model is based on the Adobe Sonata music symbol set and a finite-state language of textual music messages. The music message language is defined and several important aspects of message imaging are discussed. Aspects of music notation that appear problematic for a finite-state representation are identified. Finally, an example of music image decoding and resynthesis using the model is presented. Development of the model was greatly facilitated by the duality between image synthesis and image decoding that is fundamental to the DID paradigm.},
  doi = {10.1117/12.227527},
  file = {:pdfs/1996 - Markov Source Model for Printed Music Decoding.pdf:PDF},
  url = {https://www.researchgate.net/profile/Philip_Chou2/publication/220050304_Markov_source_model_for_printed_music_decoding/links/00b7d51aaadea3a4b6000000/Markov-source-model-for-printed-music-decoding.pdf}
}
@inproceedings{Kurth2008,
  author = {Kurth, Frank and Damm, David and Fremerey, Christian and M{\"u}ller, Meinard and Clausen, Michael},
  booktitle = {Research and Advanced Technology for Digital Libraries},
  title = {A Framework for Managing Multimodal Digitized Music Collections},
  year = {2008},
  address = {Berlin, Heidelberg},
  editor = {Christensen-Dalsgaard, Birte and Castelli, Donatella and Ammitzb{\o}ll Jurik, Bolette and Lippincott, Joan},
  pages = {334--345},
  publisher = {Springer Berlin Heidelberg},
  abstract = {In this paper, we present a framework for managing heterogeneous, multimodal digitized music collections containing visual music representations (scanned sheet music) as well as acoustic music material (audio recordings). As a first contribution, we propose a preprocessing workflow comprising feature extraction, audio indexing, and music synchronization (linking the visual with the acoustic data). Then, as a second contribution, we introduce novel user interfaces for multimodal music presentation, navigation, and content-based retrieval. In particular, our system offers high quality audio playback with time-synchronous display of the digitized sheet music. Furthermore, our system allows a user to select regions within the scanned pages of a musical score in order to search for musically similar sections within the audio documents. Our novel user interfaces and search functionalities will be integrated into the library service system of the Bavarian State Library as part of the Probado project.},
  doi = {10.1007/978-3-540-87599-4_35},
  file = {:pdfs/2008 - A Framework for Managing Multimodal Digitized Music Collections.pdf:PDF},
  isbn = {978-3-540-87599-4}
}
@inproceedings{Kusakunniran2014,
  author = {Kusakunniran, Worapan and Prempanichnukul, Attapol and Maneesutham, Arthid and Chocksawud, Kullachut and Tongsamui, Suparus and Thongkanchorn, Kittikhun},
  booktitle = {International Computer Science and Engineering Conference},
  title = {Optical music recognition for traditional Thai sheet music},
  year = {2014},
  organization = {IEEE},
  pages = {157--162},
  doi = {10.1109/ICSEC.2014.6978187},
  file = {:pdfs/2014 - Optical Music Recognition for Traditional Thai Sheet Music.pdf:PDF}
}
@inproceedings{Lallican2000,
  author = {Lallican, P. M. and Viard-Gaudin, C. and Knerr, S.},
  booktitle = {7th International Workshop on Frontiers in Handwriting Recognition},
  title = {From Off-Line to On-Line Handwriting Recognition},
  year = {2000},
  address = {Amsterdam},
  editor = {L. R. B. Schomaker and L. G. Vuurpijl},
  pages = {303--312},
  publisher = {International Unipen Foundation},
  file = {:pdfs/2000 - From off Line to on Line Handwriting Recognition.pdf:PDF},
  groups = {handwriting},
  isbn = {90-76942-01-3},
  url = {http://www.rug.nl/research/portal/files/2981118/paper-050-lallican.pdf}
}
@inproceedings{Laplante2016,
  author = {Laplante, Audrey and Fujinaga, Ichiro},
  booktitle = {3rd International Workshop on Digital Libraries for Musicology},
  title = {Digitizing musical scores: Challenges and opportunities for libraries},
  year = {2016},
  organization = {ACM},
  pages = {45--48},
  doi = {10.1145/2970044.2970055},
  file = {:pdfs/2016 - Digitizing musical scores- Challenges and opportunities for libraries.pdf:PDF}
}
@inproceedings{Lee2016a,
  author = {Lee, Sangkuk and Son, Sung Joon and Oh, Jiyong and Kwak, Nojun},
  booktitle = {International Conference on Information Science and Security},
  title = {Handwritten Music Symbol Classification Using Deep Convolutional Neural Networks},
  year = {2016},
  pages = {1--5},
  abstract = {In this paper, we utilize deep Convolutional Neural Networks (CNNs)
	to classify handwritten music symbols in HOMUS data set. HOMUS data
	set is made up of various types of strokes which contain time information
	and it is expected that online techniques are more appropriate for
	classification. However, experimental results show that CNN which
	does not use time information achieved classification accuracy around
	94.6% which is way higher than 82% of dynamic time warping (DTW),
	the prior state-of-the-art online technique. Finally, we achieved
	the best accuracy around 95.6% with the ensemble of CNNs.},
  doi = {10.1109/ICISSEC.2016.7885856},
  file = {:pdfs/2016 - Handwritten Music Symbol Classification Using Deep Convolutional Neural Networks.pdf:PDF},
  keywords = {handwritten character recognition;image classification;music;neural nets;CNN;DTW;HOMUS data set;deep convolutional neural networks;dynamic time warping;handwritten music symbol classification;time information;Electronic mail;Fats;Handwriting recognition;Kernel;Music;Neural networks;Smart phones}
}
@techreport{Lehman-Borer2016,
  author = {Lehman-Borer, Ryerson},
  institution = {Swarthmore College},
  title = {Optical Music Recognition},
  year = {2016},
  file = {:pdfs/2016 - Lehman-Borer - Optical Music Recognition Thesis.pdf:PDF},
  url = {https://scholarship.tricolib.brynmawr.edu/handle/10066/18782}
}
@inproceedings{Leplumey1993,
  author = {Leplumey, Ivan and Camillerapp, Jean and Lorette, G.},
  booktitle = {2nd International Conference on Document Analysis and Recognition},
  title = {A robust detector for music staves},
  year = {1993},
  pages = {902--905},
  abstract = {A method for the automatic recognition of music staves based on a prediction-and-check technique is presented in order to extract staves. It can detect lines with some curvature, discontinuities, and inclination. Lines are asserted to be a part of a staff if they can be grouped by five, thus completing the staff. This last phase also identifies additional staff lines.<>},
  doi = {10.1109/ICDAR.1993.395591},
  file = {:pdfs/1993 - A Robust Detector for Music Staves.pdf:PDF},
  keywords = {image recognition;music;robust detector;music staves;automatic recognition;prediction-and-check technique;curvature;discontinuities;inclination;additional staff lines;Detectors;Multiple signal classification;Histograms;Noise robustness;Image recognition;Labeling;Image segmentation;Samarium;Image coding;Sampling methods}
}
@inproceedings{Lesinski2015,
  author = {Lesinski, Wojciech and Jastrzebska, Agnieszka},
  booktitle = {IFIP International Conference on Computer Information Systems and Industrial Management},
  title = {Optical Music Recognition: Standard and Cost-Sensitive Learning with Imbalanced Data},
  year = {2015},
  organization = {Springer},
  pages = {601--612},
  doi = {10.1007/978-3-319-24369-6_51},
  file = {:pdfs/2015 - Optical Music Recognition - Standard and Cost-Sensitive learning with Imbalanced Data.pdf:PDF}
}
@inproceedings{Li2018,
  author = {Li, Chuanzhen and Zhao, Jiaqi and Cai, Juanjuan and Wang, Hui and Du, Huaichang},
  booktitle = {11th International Symposium on Computational Intelligence and Design (ISCID)},
  title = {Optical Music Notes Recognition for Printed Music Score},
  year = {2018},
  month = {Dec},
  pages = {285--288},
  volume = {01},
  abstract = {To convert printed music score into a machine-readable format, a system that can automatically decode the symbolic image and play the music is proposed. The system takes a music score image as input, segments music symbols after preprocessing the image, then recognizes their pitch and duration. Finally, MIDI files are generated. The experiments on Rebelo Database shows that the proposed method obtains superior recognition accuracy against other methods.},
  doi = {10.1109/ISCID.2018.00071},
  file = {:pdfs/2018 - Optical Music Notes Recognition for Printed Music Score.pdf:PDF},
  issn = {2473-3547},
  keywords = {decoding;image recognition;image segmentation;music;symbolic image decoding;music symbols segmentation;MIDI files;music score image;machine-readable format;printed music score;optical music notes recognition;Image segmentation;Image recognition;Head;Magnetic heads;Neural networks;Databases;Music;music score;segmentation;recognition;music symbols;MIDI}
}
@inproceedings{Lin2000,
  author = {Lin, Karen and Bell, Tim},
  booktitle = {International Society for Music Information Retrieval},
  title = {Integrating Paper and Digital Music Information Systems},
  year = {2000},
  pages = {23--25},
  abstract = {Active musicians generally rely on extensive personal paper-based music information retrieval systems containing scores, parts, compositions, and arrangements of published and hand-written music. Many have a bias against using computers to store, edit and retrieve music, and prefer to work in the paper domain rather than using digital documents, despite the flexibility and powerful retrieval opportunities available. In this paper we propose a model of operation that blurs the boundaries between the paper and digital domains, offering musicians the best of both worlds. A survey of musicians identifies the problems and potential of working with digital tools, and we propose a system using colour printing and scanning technology that simplifies the process of moving music documents between the two domains},
  file = {:pdfs/2000 - Integrating Paper and Digital Music Information Systems.pdf:PDF},
  keywords = {optical music recognition, to classify, user interfaces, user needs},
  url = {http://ismir2000.ismir.net/posters/linbell_fullpaper.pdf}
}
@inproceedings{Liu2012,
  author = {Liu, Xiaoxiang},
  booktitle = {Intelligent Information and Database Systems},
  title = {Note Symbol Recognition for Music Scores},
  year = {2012},
  address = {Berlin, Heidelberg},
  editor = {Pan, Jeng-Shyang and Chen, Shyi-Ming and Nguyen, Ngoc Thanh},
  pages = {263--273},
  publisher = {Springer Berlin Heidelberg},
  abstract = {Note symbol recognition plays a fundamental role in the process of an OMR system. In this paper, we propose new approaches for recognizing notes by extracting primitives and assembling them into constructed symbols. Firstly, we propose robust algorithms for extracting primitives (stems, noteheads and beams) based on Run-Length Encoding. Secondly, introduce the concept of interaction field to describe the relationship between primitives, and define six hierarchical categories for the structure of notes. Thirdly, propose an effective sequence to assemble the primitives into notes, guided by the mechanism of giving priority to the key structures. To evaluate the performance of those approaches,wepresent experimental results on real-life scores and comparisons with commercial systems. The results show our approaches can recognize notes with high-accuracy and powerful adaptability, especially for the complicated scores with high density of symbols.},
  file = {:pdfs/2012 - Note Symbol Recognition for Music Scores.pdf:PDF},
  isbn = {978-3-642-28490-8},
  url = {https://link.springer.com/chapter/10.1007%2F978-3-642-28490-8_28}
}
@inproceedings{Liu2015,
  author = {Liu, Xiaoxiang and Zhou, Mi and Xu, Peng},
  booktitle = {14th International Conference on Computer-Aided Design and Computer Graphics},
  title = {A Robust Method for Musical Note Recognition},
  year = {2015},
  pages = {212--213},
  publisher = {Institute of Electrical and Electronics Engineers Inc.},
  abstract = {Musical note recognition plays a fundamental role in the process of the optical music recognition system. In this paper, we propose a robust method for recognizing notes. The method includes three parts: (1) the description of relationships between primitives by introducing the concept of interaction field, (2) the definition of six hierarchical structure features for analyzing notes structures, (3) the workflow of primitive assembly under the guidance of giving priority to key structure features. To evaluate the performance of our method, we present experimental results on real-life scores and comparisons with two commercial products. Experiment show that our method lead to quite good results, especially for complicated scores.},
  affiliation = {College of Electrical Engineering and Information, Jinan University, Zhuhai, China},
  author_keywords = {Musical note recognition; Optical music recognition; Primitive assembly; Primitive relationships},
  doi = {10.1109/CADGRAPHICS.2015.34},
  file = {:pdfs/2015 - A Robust Method for Musical Note Recognition.pdf:PDF},
  isbn = {9781467380201},
  keywords = {Computer graphics, Commercial products; Hierarchical structures; Interaction fields; Key structures; Musical notes; Optical music recognition; Primitive relationships; Robust methods, Computer aided design}
}
@incollection{Lopresti2002,
  author = {Lopresti, Daniel and Nagy, George},
  booktitle = {Graphics Recognition Algorithms and Applications},
  publisher = {Springer Berlin Heidelberg},
  title = {Issues in Ground-Truthing Graphic Documents},
  year = {2002},
  address = {Ontario, Canada},
  isbn = {978-3-540-45868-5},
  pages = {46--67},
  doi = {10.1007/3-540-45868-9_5},
  file = {:pdfs/2002 - Issues in Ground Truthing Graphic Documents.pdf:PDF}
}
@misc{Low2012,
  author = {Low, Grady and Chang, Yung-Ho},
  title = {Optical Music Recognition Application},
  year = {2012},
  file = {:pdfs/2012 - Optical Music Recognition Application.pdf:PDF},
  number = {April},
  pages = {1--52},
  url = {http://www.winlab.rutgers.edu/~crose/capstone12/entries/OpticalMusicRecognition.pdf}
}
@inproceedings{Luangnapa2012,
  author = {Luangnapa, Nawapon and Silpavarangkura, Thongchai and Nukoolkit, Chakarida and Mongkolnam, Pornchai},
  booktitle = {International Conference on Advances in Information Technology},
  title = {Optical Music Recognition on Android Platform},
  year = {2012},
  organization = {Springer},
  pages = {106--115},
  doi = {10.1007/978-3-642-35076-4},
  file = {:pdfs/2012 - Optical Music Recognition on Android Platform.pdf:PDF}
}
@inproceedings{Luckner2006,
  author = {Luckner, Marcin},
  booktitle = {6th International Conference on Intelligent Systems Design and Applications},
  title = {Recognition of Noised Patterns Using Non-Disruption Learning Set},
  year = {2006},
  pages = {557--562},
  abstract = {In this paper the recognition of strongly noised symbols on the basis of non-disruption patterns is discussed taking music symbols as an example. Although Optical Music Recognition technology is not developed as successfully as OCR technology, several systems do recognize typical musical symbols to quite a good level. However, the recognition of non-typical fonts is still an unsolved issue. In this paper a model of a recognition system for unusual scores is presented. In the model described non-disruption symbols are used to generate a learning set that makes possible improved recognition as is presented on a real example of rests and accidentals recognition. Some techniques are presented with various recognition rates and computing times including supervised and unsupervised ones},
  doi = {10.1109/ISDA.2006.223},
  file = {:pdfs/2006 - Recognition of Noised Patterns Using Non Disruption Learning Set.pdf:PDF},
  issn = {2164-7143},
  keywords = {optical character recognition;noised pattern recognition;nondisruption learning set;strongly noised symbol recognition;nondisruption patterns;music symbols;optical music recognition;recognition system;supervised recognition;unsupervised recognition;Pattern recognition;Optical character recognition software;Optical noise;Ordinary magnetoresistance;Geodesy;Probes;Noise generators;Testing;Delay;Computer networks}
}
@inproceedings{Luth2002,
  author = {Luth, Nailja},
  booktitle = {2nd International Conference on WEB Delivering of Music},
  title = {Automatic Identification of Music Notations},
  year = {2002},
  doi = {10.1109/WDM.2002.1176212},
  file = {:pdfs/2002 - Automatic Identification of Music Notations.pdf:PDF},
  isbn = {0769518621}
}
@inproceedings{MacMillan2001,
  author = {MacMillan, Karl and Droettboom, Michael and Fujinaga, Ichiro},
  booktitle = {2nd International Symposium on Music Information Retrieval},
  title = {Gamera: A structured document recognition application development environment},
  year = {2001},
  address = {Bloomington, IN},
  pages = {15--16},
  file = {:pdfs/2001 - Gamera_ a Structured Document Recognition Application Development Environment.pdf:PDF},
  url = {https://jscholarship.library.jhu.edu/handle/1774.2/44376}
}
@inproceedings{MacMillan2002,
  author = {MacMillan, Karl and Droettboom, Michael and Fujinaga, Ichiro},
  booktitle = {International Computer Music Conference},
  title = {Gamera: Optical music recognition in a new shell},
  year = {2002},
  pages = {482--485},
  file = {:pdfs/2002 - Gamera - Optical Music Recognition in a New Shell.pdf:PDF},
  keywords = {optical music recognition, to classify},
  url = {http://www.music.mcgill.ca/~ich/research/icmc02/icmc2002.gamera.pdf}
}
@inproceedings{Malik2013,
  author = {Malik, Rakesh and Roy, Partha Pratim and Pal, Umapada and Kimura, Fumitaka},
  booktitle = {12th International Conference on Document Analysis and Recognition},
  title = {Handwritten Musical Document Retrieval Using Music-Score Spotting},
  year = {2013},
  pages = {832--836},
  abstract = {In this paper, we present a novel approach for retrieval of handwritten musical documents using a query sequence/word of musical scores. In our algorithm, the musical score-words are described as sequences of symbols generated from a universal codebook vocabulary of musical scores. Staff lines are removed first from musical documents using structural analysis of staff lines and symbol codebook vocabulary is created in offline. Next, using this symbol codebook the music symbol information in each document image is encoded. Given a query sequence of musical symbols in a musical score-line, the symbols in the query are searched in each of these encoded documents. Finally, a sub-string matching algorithm is applied to find query words. For codebook, two different feature extraction methods namely: Zernike Moments and 400 dimensional gradient features are tested and two unsupervised classifiers using SOM and K-Mean are evaluated. The results are compared with a baseline approach of DTW. The performance is measured on a collection of handwritten musical documents and results are promising.},
  doi = {10.1109/ICDAR.2013.170},
  file = {:pdfs/2013 - Handwritten Musical Document Retrieval using Music-Score Spotting.pdf:PDF},
  issn = {1520-5363},
  keywords = {document image processing;image retrieval;music;string matching;Zernike moments;document image;feature extraction methods;handwritten musical document retrieval;music-score spotting;musical score-line;musical scores query sequence-word;substring matching algorithm;universal codebook vocabulary;unsupervised classifiers;Algorithm design and analysis;Classification algorithms;Feature extraction;Heuristic algorithms;Indexing;Vectors;Vocabulary;Approximate String Matching;Musical Document Retrieval;Staff Removal;Symbol Classification}
}
@inproceedings{Marinai1999,
  author = {Marinai, Simone and Nesi, Paolo},
  booktitle = {5th International Conference on Document Analysis and Recognition},
  title = {Projection Based Segmentation of Musical Sheets},
  year = {1999},
  pages = {3--6},
  abstract = {The automatic recognition of music scores is a key process for the electronic treatment of music information. In this paper we present the segmentation module of an {OMR} system. The proposed approach is based on the use of projection profiles for the location of elementary symbols that constitute the music notation. An extensive experimentation was made which the help of a tool developed to this purpose. Reported results shown a high efficiency in the correct location of elementary symbols},
  doi = {10.1109/ICDAR.1999.791838},
  file = {:pdfs/1999 - Projection Based Segmentation of Musical Sheets.pdf:PDF},
  isbn = {0-7695-0318-7}
}
@article{Martin1992,
  author = {Martin, Philippe and Bellisant, Camille},
  journal = {International Journal of Pattern Recognition and Artificial Intelligence},
  title = {Neural Networks for the Recognition of Engraved Musical Scores},
  year = {1992},
  number = {01},
  pages = {193--208},
  volume = {06},
  abstract = {The image analysis levels of a recognition system for engraved musical scores are described. Recognizing musical score images requires an accurate segmentation stage to isolate symbols from staff lines. This symbols/staves segregation is achieved by the use of inscribed line (chord) information. This information, processed by a multilayer perceptron, allows an efficient segmentation in terms of the remaining connected components. Some of these components are then classified, using another network, according to a coding of their skeleton graph. Special attention is paid to the design of the networks: the architectures are adapted to the specificities of each task. Multilayer perceptrons are employed here together with other more classical image analysis techniques which are also presented.},
  doi = {10.1142/S0218001492000114}
}
@phdthesis{Martin1992a,
  author = {Martin, Philippe},
  school = {Universit{\'e} Joseph-Fourier - Grenoble I},
  title = {Artificial neural networks : application to optical musical score recognition},
  year = {1992},
  type = {Theses},
  file = {:pdfs/1992 - Artificial Neural Networks _ Application to Optical Musical Score Recognition.pdf:PDF},
  keywords = {r{\'e}seaux de neurones artificiels ; r{\'e}seaux multi-couches ; automates {\`a} seuil ; arbres de classification ; reconnaissance de formes ; analyse d'images ; partitions musicales},
  url = {https://tel.archives-ouvertes.fr/tel-00340938}
}
@inproceedings{MartinezSevilla2023,
  author = {Martinez-Sevilla, Juan Carlos and Castellanos, Francisco J.},
  booktitle = {Proceedings of the 5th International Workshop on Reading Music Systems},
  title = {Towards Music Notation and Lyrics Alignment: Gregorian Chants as Case Study},
  year = {2023},
  address = {Milan, Italy},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {15--19},
  doi = {10.48550/arXiv.2311.04091},
  file = {:pdfs/2023 - Towards Music Notation and Lyrics Alignment_ Gregorian Chants As Case Study.pdf:PDF},
  url = {https://sites.google.com/view/worms2023/proceedings}
}
@inproceedings{MasCandela2021,
  author = {Mas-Candela, Enrique and Alfaro-Contreras, Mar\'{i}a},
  booktitle = {Proceedings of the 3rd International Workshop on Reading Music Systems},
  title = {Sequential Next-Symbol Prediction for Optical Music Recognition},
  year = {2021},
  address = {Alicante, Spain},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander},
  pages = {13--17},
  file = {:pdfs/2021 - Sequential Next Symbol Prediction for Optical Music Recognition.pdf:PDF},
  url = {https://sites.google.com/view/worms2021/proceedings}
}
@mastersthesis{Mateiu2019,
  author = {Mateiu, Tudor Nicolae},
  school = {Universidad de Alicante},
  title = {Unsupervised Learning for Domain Adaptation in automatic classification tasks through Neural Networks},
  year = {2019},
  type = {mathesis},
  abstract = {Machine Learning systems have improved dramatically in recent years for automatic recognition and artificial intelligence tasks. In general, these systems are based on the use of a large amount of labeled data - also called training sets - in order to learn a model that fits the problem in question. The training set consists of examples of possible inputs to the system and the output expected from them. Achieving this training set is the main limitation to use Machine Learning systems, since it requires human effort to find and map possible inputs with their corresponding outputs. The situation is often frustrating since systems learn to solve the task for a specific domain - that is, a type of input with relatively homogeneous conditions – and they are not able to generalize to correctly solve the same task in other domains. This project considers the use of Domain Adaptation algorithms, which are capable of learning to adapt a Machine Learning model to work in an unknown domain based on only unlabeled data (unsupervised learning). This facilitates the transfer of systems to new domains because obtaining unlabeled data is relatively cheap, since the cost is to label them. To date, Domain Adaptation algorithms have been used in very restricted contexts, so this project aims to make an empirical evaluation of these algorithms in a greater number of cases, as well as propose possible improvements.},
  file = {:pdfs/2019 - Unsupervised Learning for Domain Adaptation in Automatic Classification Tasks through Neural Networks.pdf:PDF},
  url = {http://hdl.handle.net/10045/96448}
}
@inproceedings{Mateiu2019a,
  author = {Mateiu, Tudor N. and Gallego, Antonio-Javier and Calvo-Zaragoza, Jorge},
  booktitle = {Pattern Recognition and Image Analysis},
  title = {Domain Adaptation for Handwritten Symbol Recognition: A Case of Study in Old Music Manuscripts},
  year = {2019},
  address = {Cham},
  editor = {Morales, Aythami and Fierrez, Julian and S{\'a}nchez, Jos{\'e} Salvador and Ribeiro, Bernardete},
  pages = {135--146},
  publisher = {Springer International Publishing},
  abstract = {The existence of a large amount of untranscripted music manuscripts has caused initiatives that use Machine Learning (ML) for Optical Music Recognition, in order to efficiently transcribe the music sources into a machine-readable format. Although most music manuscript are similar in nature, they inevitably vary from one another. This fact can negatively influence the complexity of the classification task because most ML models fail to transfer their knowledge from one domain to another, thereby requiring learning from scratch on new domains after manually labeling new data. This work studies the ability of a Domain Adversarial Neural Network for domain adaptation in the context of classifying handwritten music symbols. The main idea is to exploit the knowledge of a specific manuscript to classify symbols from different (unlabeled) manuscripts. The reported results are promising, obtaining a substantial improvement over a conventional Convolutional Neural Network approach, which can be used as a basis for future research.},
  doi = {10.1007/978-3-030-31321-0_12},
  file = {:pdfs/2019 - Domain Adaptation for Handwritten Symbol Recognition_ a Case of Study in Old Music Manuscripts.pdf:PDF},
  isbn = {978-3-030-31321-0}
}
@inproceedings{Matsushima1985,
  author = {Matsushima, T. and Sonomoto, I. and Harada, T. and Kanamori, K. and Ohteru, S.},
  booktitle = {International Conference on Advanced Robotics},
  title = {Automated High Speed Recognition of Printed Music ({WABOT}-2 Vision System)},
  year = {1985},
  pages = {477--482},
  abstract = {Concerns the intelligent robot WABOT-2, which can play an electronic piano, using ten fingers and feet, while reading printed music. It can hold a conversation with a man using an artificial voice. The paper reports on its vision system, which can recognize not only a printed score but also fine hand-written score or instant lettering score. The resulting musical robot vision performance is sufficient to permit the reading of one sheet of commercially available printed music for an electric piano with three parts. Pertinent data can be recognized in about 15 seconds, with 100\% accuracy (4 Refs.) electronic music; optical character recognition; robots},
  keywords = {computer vision; automated high speed recognition; printed music; vision system; intelligent robot; WABOT-2; electronic piano; artificial voice; fine hand-written score; instant lettering score computer vision equipment); C7410F (Communications computing)},
  url = {https://ci.nii.ac.jp/naid/10006937757/en/}
}
@inproceedings{Mayer2022,
  author = {Mayer, Ji{{\v{r}}}{\'{i}} and Pecina, Pavel},
  booktitle = {Proceedings of the 4th International Workshop on Reading Music Systems},
  title = {Obstacles with Synthesizing Training Data for OMR},
  year = {2022},
  address = {Online},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {15--19},
  doi = {10.48550/arXiv.2211.13285},
  file = {:pdfs/2022 - Obstacles with Synthesizing Training Data for OMR.pdf:PDF},
  url = {https://sites.google.com/view/worms2022/proceedings}
}
@article{McGee1991,
  author = {McGee, William and Merkley, Paul},
  journal = {Computers and the Humanities},
  title = {The Optical Scanning of Medieval Music},
  year = {1991},
  issn = {1572-8412},
  number = {1},
  pages = {47--53},
  volume = {25},
  doi = {10.1007/BF00054288},
  url = {https://doi.org/10.1007/BF00054288}
}
@article{McKay2007,
  author = {McKay, Cory and Fujinaga, Ichiro},
  journal = {Journal of Interdisciplinary Music Studies},
  title = {Style-independent computer-assisted exploratory analysis of large music collections},
  year = {2007},
  number = {1},
  pages = {63--85},
  volume = {1},
  file = {:pdfs/2007 - Style Independent Computer Assisted Exploratory Analysis of Large Music Collections.pdf:PDF},
  url = {https://www.researchgate.net/profile/Ichiro_Fujinaga/publication/237570792_Style-Independent_Computer-Assisted_Exploratory_Analysis_of_Large_Music_Collections_Buyuk_Muzik_Koleksiyonlarinin_Bicemden_Baimsiz_Bilgisayar_Destekli_Keif_Niteliinde_Cozumlenmesi/links/09e4150a2948c805f9000000.pdf}
}
@inproceedings{McLeod2018,
  author = {McLeod, Andrew and Steedman, Mark},
  booktitle = {19th International Society for Music Information Retrieval Conference},
  title = {Evaluating Automatic Polyphonic Music Transcription},
  year = {2018},
  address = {Paris, France},
  pages = {42--49},
  file = {:pdfs/2018 - Evaluating Automatic Polyphonic Music Transcription.pdf:PDF},
  isbn = {978-2-9540351-2-3},
  url = {http://ismir2018.ircam.fr/doc/pdfs/148_Paper.pdf}
}
@techreport{McPherson1999,
  author = {McPherson, John R.},
  institution = {University of Canterbury, New Zealand},
  title = {Page Turning --- Score Automation for Musicians},
  year = {1999},
  file = {:pdfs/1999 - Page Turning Score Automation for Musicians.pdf:PDF},
  url = {http://hdl.handle.net/10092/13351}
}
@misc{McPherson2001,
  author = {McPherson, John R.},
  title = {Using feedback to improve Optical Music Recognition},
  year = {2001},
  file = {:pdfs/2001 - Using Feedback to Improve Optical Music Recognition.pdf:PDF},
  keywords = {feedback}
}
@inproceedings{McPherson2002,
  author = {McPherson, John R.},
  booktitle = {3rd International Conference on Music Information Retrieval},
  title = {Introducing Feedback into an Optical Music Recognition System},
  year = {2002},
  address = {Paris, France},
  file = {:pdfs/2002 - Introducing Feedback into an Optical Music Recognition System.pdf:PDF},
  url = {http://ismir2002.ircam.fr/proceedings/03-SP01-2.pdf}
}
@techreport{McPherson2002a,
  author = {McPherson, John R. and Bainbridge, David},
  institution = {University of Waikato, Hamilton, New Zealand},
  title = {Coordinating Knowledge Within an Optical Music Recognition System},
  year = {2002},
  file = {:pdfs/2002 - Coordinating Knowledge within an Optical Music Recognition System.pdf:PDF},
  url = {https://www.researchgate.net/publication/2395549_Coordinating_Knowledge_Within_an_Optical_Music_Recognition_System}
}
@phdthesis{McPherson2006,
  author = {McPherson, John R.},
  school = {The University of Waikato},
  title = {Coordinating Knowledge To Improve Optical Music Recognition},
  year = {2006},
  file = {:pdfs/2006 - Coordinating Knowledge to Improve Optical Music Recognition.pdf:PDF},
  url = {https://www.researchgate.net/profile/John_Mcpherson9/publication/242402211_COORDINATING_KNOWLEDGE_TO_IMPROVE_OPTICAL_MUSIC_RECOGNITION/links/55c0719908ae092e9666b75b.pdf}
}
@article{Mehta2014,
  author = {Mehta, Apurva Ashokbhai and Bhatt, Malay S.},
  journal = {International Journal of Advance Research in Computer Science and Management Studies},
  title = {Practical Issues in the Field of Optical Music Recognition},
  year = {2014},
  issn = {2321-7782},
  note = {Dubious Journal},
  number = {1},
  pages = {513--518},
  volume = {2},
  file = {:pdfs/2014 - Practical Issues in the Field of Optical Music Recognition.pdf:PDF},
  url = {http://www.ijarcsms.com/docs/paper/volume2/issue1/V2I1-0136.pdf}
}
@inproceedings{Mehta2015,
  author = {Mehta, Apurva A. and Bhatt, Malay S.},
  booktitle = {International Conference on Computer Communication and Informatics},
  title = {Optical Music Notes Recognition for Printed Piano Music Score Sheet},
  year = {2015},
  address = {Coimbatore, India},
  abstract = {Entertainment, Therapy and Education are the fields where music is always found in couple with homo-sapiens. Music is presented in various formats to us like aural, visual and one more - written form of music that is known very less to us. In a way music dominates our life. System discussed in this paper inputs music score written for piano music using modern staff notations as image. Segmentation is carried out using hierarchical decomposition using thresholding along with stave lines of score sheet. Segmented symbols are recognized through an established artificial neural network based on boosting approach. Recognized symbols are represented in an admissible way. System is capable enough of addressing very complex cases and validation is done over 53 songs available at various global music scores resources. Segmentation algorithms achieve accuracy of 99.12% and segmented symbols are recognized with prompt accuracy of 92.38% through the help of PCA and AdaBoost.},
  doi = {10.1109/ICCCI.2015.7218061},
  file = {:pdfs/2015 - Optical Music Notes Recognition for Printed Piano Music Score Sheet.pdf:PDF},
  isbn = {9781479968053},
  keywords = {- grand stave, adaboost, hierarchical decomposition, measures, modern staff notation, pca using, piano score, svd, to classify}
}
@article{Mengarelli2019,
  author = {Mengarelli, Luciano and Kostiuk, Bruno and Vit{\'o}rio, Jo{\~a}o G. and Tibola, Maicon A. and Wolff, William and Silla, Carlos N.},
  journal = {Multimedia Tools and Applications},
  title = {OMR metrics and evaluation: a systematic review},
  year = {2019},
  issn = {1573-7721},
  month = {Dec},
  abstract = {Music is rhythm, timbre, tones, intensity and performance. Conventional Western Music Notation (CWMN) is used to generate Music Scores in order to register music on paper. Optical Music Recognition (OMR) studies techniques and algorithms for converting music scores into a readable format for computers. work presents a systematic literature review (SLR) searching for metrics and methods of evaluation and comparing for OMR systems and algorithms. The most commonly used metrics on OMR works are described. A research protocol is elaborated and executed. From 802 publications found, 94 are evaluated. All results are organized and classified focusing on metrics, stages, comparisons, OMR datasets and related works. Although there is still no standard methodology for evaluating OMR systems, a good number of datasets and metrics are already available and apply to all the stages of OMR. Some of the analyzed works can give good directions for future works.},
  day = {14},
  doi = {10.1007/s11042-019-08200-0},
  file = {:pdfs/2019 - OMR Metrics and Evaluation_ a Systematic Review.pdf:PDF}
}
@techreport{Metaj2019,
  author = {Metaj, Stiven and Magnolfi, Federico},
  institution = {Politecnico di Milano},
  title = {MNR: MUSCIMA Notes Recognition. Using Faster R-CNN on handwritten music dataset.},
  year = {2019},
  type = {resreport},
  doi = {10.13140/RG.2.2.29120.48640},
  file = {:pdfs/2019 - MNR_ MUSCIMA Notes Recognition. Using Faster R CNN on Handwritten Music Dataset..pdf:PDF}
}
@inproceedings{Mexin2017,
  author = {Mexin, Yevgen and Hadjakos, Aristotelis and Berndt, Axel and Waloschek, Simon and Wawilow, Anastasia and Szwillus, Gerd},
  booktitle = {14th Sound and Music Computing Conference},
  title = {Tools for Annotating Musical Measures in Digital Music Editions},
  year = {2017},
  address = {Espoo, Finland},
  pages = {279--286},
  file = {:pdfs/2017 - Tools for Annotating Musical Measures in Digital Music Editions.pdf:PDF},
  url = {http://smc2017.aalto.fi/media/materials/proceedings/SMC17_p279.pdf}
}
@inproceedings{Mico2018,
  author = {Mic{\'{o}}, Luisa and I{\~{n}}esta, Jos{\'{e}} Manuel and Rizo, David},
  booktitle = {11th International Workshop on Machine Learning and Music},
  title = {Incremental Learning for Recognition of Handwritten Mensural Notation},
  year = {2018},
  file = {:pdfs/2018 - Incremental Learning for Recognition of Handwritten Mensural Notation.pdf:PDF},
  url = {https://sites.google.com/site/faimmusic2018/program}
}
@inproceedings{Mico2020,
  author = {Mic{\'o}, Luisa and Oncina, Jose and I{\~{n}}esta, Jos{\'e} M.},
  booktitle = {Machine Learning and Knowledge Discovery in Databases},
  title = {Adaptively Learning to Recognize Symbols in Handwritten Early Music},
  year = {2020},
  address = {Cham},
  editor = {Cellier, Peggy and Driessens, Kurt},
  pages = {470--477},
  publisher = {Springer International Publishing},
  abstract = {Human supervision is necessary for a correct edition and publication of handwritten early music collections. The output of an optical music recognition system for that kind of documents may contain a significant number of errors, making it tedious to correct for a human expert. An adequate strategy is needed to optimize the human feedback information during the correction stage to adapt the classifier to the specificities of each manuscript. In this paper, we compare the performance of a neural system, difficult and slow to be retrained, and a nearest neighbor strategy, based on the neural codes provided by a neural net, trained offline, used as a feature extractor.},
  doi = {10.1007/978-3-030-43887-6_40},
  file = {:pdfs/2020 - Adaptively Learning to Recognize Symbols in Handwritten Early Music.pdf:PDF},
  isbn = {978-3-030-43887-6}
}
@inproceedings{Min2011,
  author = {Min, Du},
  booktitle = {International Conference on Business Management and Electronic Information},
  title = {Research on numbered musical notation recognition and performance in a intelligent system},
  year = {2011},
  pages = {340--343},
  abstract = {A intelligent system with numbered musical notation recognition and performance (NMRPIS) is presented which is based on notation recognition and can play digital music automatically. The system combines with OMR to analyze musical notation, interpret completely, form the output quickly and efficiently by the embedded program. The experimental result indicates this system has high classification rate and higher recognition performance.},
  doi = {10.1109/ICBMEI.2011.5916943},
  file = {:pdfs/2011 - Research on Numbered Musical Notation Recognition and Performance in a Intelligent System.pdf:PDF},
  keywords = {electronic music;embedded systems;music;optical character recognition;pattern classification;numbered musical notation recognition;intelligent system;digital music;OMR;embedded program;optical music recognition;Feature extraction;Image recognition;Heuristic algorithms;Support vector machine classification;Intelligent systems;Music;Optical imaging;notation recognition;OMR;music}
}
@mastersthesis{Miro2019,
  author = {Mir{\'{o}}, Jordi Burgu{\'{e}}s},
  school = {Universitat Politècnica de Catalunya},
  title = {Recognition of musical symbols in scores using neural networks},
  year = {2019},
  address = {Barcelona},
  month = jun,
  abstract = {Object detection is present nowadays in many aspects of our life. From security to entertainment, its applications play a key role in computer vision and image processing worlds.

This thesis addresses, through the usage of an object detector, the creation of an application that allows its user to play a music score. The main goal is to display a digital music score and be able
to play it by touching on its notes.

In order to achieve the proposed system, deep learning techniques based on neural networks are used to detect musical symbols from a digitized score and infer their position along the staff lines. Different models and approaches are considered to tackle the main objective.},
  file = {:pdfs/2019 - Recognition of Musical Symbols in Scores Using Neural Networks.pdf:PDF},
  url = {http://hdl.handle.net/2117/165583}
}
@inproceedings{Mitobe2004,
  author = {Mitobe, Youichi and Miyao, Hidetoshi and Maruyama, Minoru},
  booktitle = {9th International Workshop on Frontiers in Handwriting Recognition},
  title = {A fast HMM algorithm based on stroke lengths for on-line recognition of handwritten music scores},
  year = {2004},
  pages = {521--526},
  abstract = {The hidden Markov model (HMM) has been successfully applied to various kinds of on-line recognition problems including, speech recognition, handwritten character recognition, etc. In this paper, we propose an on-line method to recognize handwritten music scores. To speed up the recognition process and improve usability of the system, the following methods are explained: (1) The target HMMs are restricted based on the length of a handwritten stroke, and (2) Probability calculations of HMMs are successively made as a stroke is being written. As a result, recognition rates of 85.78% and average recognition times of 5.19 ms/stroke were obtained for 6,999 test strokes of handwritten music symbols, respectively. The proposed HMM recognition rate is 2.4% higher than that achieved with the traditional method, and the processing time was 73% of that required by the traditional method.},
  doi = {10.1109/IWFHR.2004.2},
  file = {:pdfs/2004 - A Fast HMM Algorithm Based on Stroke Lengths for on Line Recognition of Handwritten Music Scores.pdf:PDF},
  issn = {1550-5235},
  keywords = {hidden Markov models;handwritten character recognition;fast HMM algorithm;stroke lengths;online recognition;handwritten music scores;handwritten stroke;Probability calculations;handwritten music symbols;Hidden Markov models;Handwriting recognition;Multiple signal classification;Probability;Character recognition;Speech recognition;Usability;Shape;Target recognition;Testing;HMM;Handwritten Music Score Recognition;On-line Symbol Recognition}
}
@inproceedings{Miyao1995,
  author = {Miyao, Hidetoshi and Nakano, Yasuaki},
  booktitle = {3rd International Conference on Document Analysis and Recognition},
  title = {Head and stem extraction from printed music scores using a neural network approach},
  year = {1995},
  pages = {1074--1079},
  abstract = {In an automatic music score recognition system, it is very important to extract heads and stems of notes, since these symbols are most ubiquitous in a score and musically important. The purpose of our system is to present an accurate and high-speed extraction of note heads (except the whole notes) and stems according to the following procedure. (1) We extract all regions which are considered as candidates of stems or heads. (2) To identify heads from the candidates, we use a three-layer neural network. (3) The weights for the network are learned by the back propagation method. In the learning, the network learns the spatial constraints between heads and surroundings rather than the shapes of heads. (4) After the learning process is completed we use this network to identify a number of test head candidates (5) The stem candidates touching the detected heads are extracted as true stems. As an experimental result, we obtained high recognition rates of 99.0{\%} and 99.2{\%} for stems and note heads, respectively. It took between 40 to 100 seconds to process a printed piano score on A4 sheet using a workstation. Therefore, our system can analyze it at least 10 times as fast as manual methods},
  doi = {10.1109/ICDAR.1995.602095},
  file = {:pdfs/1995 - Head and Stem Extraction from Printed Music Scores Using a Neural Network Approach.pdf:PDF},
  isbn = {0-8186-7128-9}
}
@article{Miyao1996,
  author = {Miyao, Hidetoshi and Nakano, Yasuaki},
  journal = {IEICE Transactions on Information and Systems},
  title = {Note symbol extraction for printed piano scores using neural networks},
  year = {1996},
  number = {5},
  pages = {548--554},
  volume = {E79-D},
  abstract = {In the traditional note symbol extraction processes, extracted candidates of note elements were identified using complex if-then rules based on the note formation rules and they needed subtle adjustment of parameters through many experiments. The purpose of our system is to avoid the tedious tasks and to present an accurate and high-speed extraction of note heads, stems and flags according to the following procedure. (1) We extract head and flag candidates based on the stem positions. (2) To identify heads and flags from the candidates, we use a couple of three-layer neural networks. To make the networks learn, we give the position informations and reliability factors of candidates to the input units. (3) With the weights learned by the net, the head and flag candidates are recognized. As an experimental result, we obtained a high extraction rate of more than 99\% for thirteen printed piano scores on A4 sheet which have various difficulties. Using a workstation (SPARC Station 10), it took about 90 seconds to do on the average. It means that our system can analyze piano scores 5 times or more as fast as the manual work. Therefore, our system can execute the task without the traditional tedious works, and can recognize them quickly and accurately (9 Refs.) recognition},
  keywords = {note symbol extraction; printed piano scores; position informations; high-speed extraction; flag candidates; head candidates; three-layer neural networks; reliability factors; SPARC Station 10; template matching; character recognition; score recognition techniques); C5290 (Neural computing techniques); C1250B (Character recognition)},
  url = {https://search.ieice.org/bin/summary.php?id=e79-d_5_548}
}
@inproceedings{Miyao2000,
  author = {Miyao, Hidetoshi and Haralick, Robert Martin},
  booktitle = {4th International Workshop on Document Analysis Systems},
  title = {Format of Ground Truth Data Used in the Evaluation of the Results of an Optical Music Recognition System},
  year = {2000},
  address = {Brasil},
  pages = {497--506},
  file = {:pdfs/2000 - Format of Ground Truth Data Used in the Evaluation of the Results of an Optical Music Recognition System.pdf:PDF},
  keywords = {evaluation},
  url = {https://www.researchgate.net/profile/Robert_Haralick/publication/242138660_Format_of_Ground_Truth_Data_Used_in_the_Evaluation_of_the_Results_of_an_Optical_Music_Recognition_System/links/0046353bac1589cc3f000000.pdf}
}
@inproceedings{Miyao2002,
  author = {Miyao, Hidetoshi},
  booktitle = {Intelligent Data Engineering and Automated Learning},
  title = {Stave Extraction for Printed Music Scores},
  year = {2002},
  editor = {Yin, Hujun and Allinson, Nigel and Freeman, Richard and Keane, John and Hubbard, Simon},
  pages = {562--568},
  publisher = {Springer Berlin Heidelberg},
  abstract = {In this paper, a satisfactory method is described for the extraction of staff lines in which there are some inclinations, discontinuities, and curvatures. The extraction calls for four processes: (1) Extraction of specific points on a stave on vertical scan lines, (2) Connection of the points using DP matching, (3) Composition of stave groups using labeling, and (4) Extraction and adjustment of the edges of lines. The experiment resulted in an extraction rate of 99.4{\%} for 71 printed music scores that included lines with some inclinations, discontinuities, and curvatures.},
  file = {:pdfs/2002 - Stave Extraction for Printed Music Scores.pdf:PDF},
  isbn = {978-3-540-45675-9},
  url = {https://link.springer.com/chapter/10.1007/3-540-45675-9_85}
}
@inproceedings{Miyao2004,
  author = {Miyao, Hidetoshi and Maruyama, Minoru},
  booktitle = {17th International Conference on Pattern Recognition},
  title = {An online handwritten music score recognition system},
  year = {2004},
  publisher = {Institute of Electrical {\&} Electronics Engineers ({IEEE})},
  doi = {10.1109/icpr.2004.1334164},
  file = {:pdfs/2004 - An Online Handwritten Music Score Recognition System.pdf:PDF},
  groups = {recognition}
}
@article{Modayur1993,
  author = {Modayur, Bharath R. and Ramesh, Visvanathan and Haralick, Robert M. and Shapiro, Linda G.},
  journal = {Machine Vision and Applications},
  title = {{MUSER}: A prototype musical score recognition system using mathematical morphology},
  year = {1993},
  issn = {1432-1769},
  number = {2},
  pages = {140--150},
  volume = {6},
  abstract = {Music representation utilizes a fairly rich repertoire of symbols. These symbols appear on a score sheet with relatively little shape distortion, differing from the prototype symbol shapes mainly by a positional translation and scale change. The prototype system we describe in this article is aimed at recognizing printed music notation from digitized music score images. The recognition system is composed of two parts: a low-level vision module that uses morphological algorithms for symbol detection and a high-level module that utilizes prior knowledge of music notation to reason about spatial positions and spatial sequences of these symbols. The high-level module also employs verification procedures to check the veracity of the output of the morphological symbol recognizer. The system produces an ASCII representation of music scores that can be input to a music-editing system. Mathematical morphology provides us the theory and the tools to analyze shapes. This characteristic of mathematical morphology lends itself well to analyzing and subsequently recognizing music scores that are rich in well-defined musical symbols. Since morphological operations can be efficiently implemented in machine vision systems that have special hardware support, the recognition task can be performed in near real-time. The system achieves accuracy in excess of 95{\%} on the sample scores processed so far with a peak accuracy of 99.7{\%} for the quarter and eighth notes, demonstrating the efficacy of morphological techniques for shape extraction.},
  doi = {10.1007/BF01211937}
}
@techreport{Modayur1996,
  author = {Modayur, Bharath R.},
  institution = {Electrical Engineering Department, University of Washington, Seattle},
  title = {Music Score Recognition - A Selective Attention Approach using Mathematical Morphology},
  year = {1996},
  file = {:pdfs/1996 - Music Score Recognition a Selective Attention Approach Using Mathematical Morphology.pdf:PDF},
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.128.887&rep=rep1&type=pdf}
}
@inproceedings{Montagner2014,
  author = {Montagner, Igor dos Santos and Hirata, Roberto Jr. and Hirata, Nina S. T.},
  booktitle = {International Conference on Image Processing},
  title = {Learning to remove staff lines from music score images},
  year = {2014},
  pages = {2614--2618},
  abstract = {The methods for removal of staff lines rely on characteristics specific
	to musical documents and they are usually not robust to some types
	of imperfections in the images. To overcome this limitation, we propose
	the use of binary morphological operator learning, a technique that
	estimates a local operator from a set of example images. Experimental
	results in both synthetic and real images show that our approach
	can adapt to different types of deformations and achieves similar
	or better performance than existing methods in most of the test scenarios.},
  doi = {10.1109/ICIP.2014.7025529},
  file = {:pdfs/2014 - Learning To Remove Staff Lines From Music Score Images.pdf:PDF},
  issn = {1522-4880},
  keywords = {document handling;learning (artificial intelligence);music;binary morphological operator learning;deformation;local operator;music score images;musical document characteristics;staff line removal method;Accuracy;Learning systems;Optical imaging;Robustness;Skeleton;Text analysis;Training;Document analysis;Machine Learning;Optical Music Recognition;Staff Removal}
}
@inproceedings{Montagner2014a,
  author = {Montagner, Igor dos Santos and Hirata, Roberto Jr. and Hirata, Nina S. T.},
  booktitle = {22nd International Conference on Pattern Recognition},
  title = {A Machine Learning based method for Staff Removal},
  year = {2014},
  pages = {3162--3167},
  publisher = {Institute of Electrical and Electronics Engineers Inc.},
  abstract = {Staff line removal is an important pre-processing step to convert content of music score images to machine readable formats. Many heuristic algorithms have been proposed for staff removal and recently a competition was organized in the 2013 ICDAR/GREC conference. Music score images are often subject to different deformations and variations, and existing algorithms do not work well for all cases. We investigate the application of a machine learning based method for the staff removal problem. The method consists in learning multiple image operators from training input-output pairs of images and then combining the results of these operators. Each operator is based on local information provided by a neighborhood window, which is usually manually chosen based on the content of the images. We propose a feature selection based approach for automatically defining the windows and also for combining the operators. The performance of the proposed method is superior to several existing methods and is comparable to the best method in the competition. © 2014 IEEE.},
  affiliation = {Institute of Mathematics and Statistics, University of São Paulo, Rua do Matão, SP, Brazil},
  doi = {10.1109/ICPR.2014.545},
  file = {:pdfs/2014 - A Machine Learning based method for Staff Removal.pdf:PDF},
  isbn = {9781479952083},
  issn = {1051-4651},
  keywords = {Artificial intelligence; Heuristic algorithms; Learning systems; Pattern recognition, Input-output; Line removal; Local information; Machine-readable format; Multiple image; Music scores; Pre-processing step, Personnel training}
}
@article{Montagner2017,
  author = {Montagner, Igor dos Santos and Hirata, Nina S.T. and Hirata, Roberto Jr.},
  journal = {Pattern Recognition},
  title = {Staff removal using image operator learning},
  year = {2017},
  issn = {0031-3203},
  pages = {310--320},
  volume = {63},
  abstract = {Staff removal is an image processing task that aims to facilitate
	further analysis of music score images. Even when restricted to images
	in specific domains such as music score recognition, solving image
	processing problems usually requires the design of customized algorithms.
	To cope with image variabilities and the growing amount of data,
	machine learning based techniques emerge as a natural approach to
	be employed in image processing problems. In this sense, image operator
	learning methods are concerned with estimating, from sample pairs
	of input-output images of a transformation, a local function that
	characterizes the image transformation. These methods require the
	definition of some parameters, including the local information to
	be considered in the processing which is defined by a window. In
	this work we show how to apply the image operator learning technique
	to the staff line removal problem. We present an algorithm for window
	determination and show that it captures visual information relevant
	for staff removal. We also present a reference window set to be used
	in cases where the training set is not sufficiently large. Experimental
	results obtained with respect to synthetic and handwritten music
	scores under varying image conditions show that the learned image
	operators are comparable with especially designed state-of-the-art
	heuristic algorithms. © 2016 Elsevier Ltd},
  affiliation = {Department of Computer Science, Institute of Mathematics and Statistics, University of São Paulo, 05508-090, Rua do Matão, São Paulo, Brazil},
  author_keywords = {Document image analysis; Image operator; Machine learning; Optical music recognition; Staff removal},
  correspondence_address1 = {Montagner, I.S.; Department of Computer Science, Institute of Mathematics and Statistics, University of São Paulo, 05508-090, Brazil; email: igordsm@ime.usp.br},
  doi = {10.1016/j.patcog.2016.10.002},
  file = {:pdfs/2017 - Staff Removal Using Image Operator Learning.pdf:PDF},
  funding_details = {2011/00325-1, FAPESP, Fundação de Amparo à Pesquisa do Estado de São Paulo; 2011/23310-0, FAPESP, Fundação de Amparo à Pesquisa do Estado de São Paulo; 2014/21692-0, FAPESP, Fundação de Amparo à Pesquisa do Estado de São Paulo; 2015/17741-9, FAPESP, Fundação de Amparo à Pesquisa do Estado de São Paulo; 484572/2013-0, CNPq, Conselho Nacional de Desenvolvimento Científico e Tecnológico},
  keywords = {Artificial intelligence; Heuristic algorithms; Image analysis; Learning systems; Optical data processing; Problem solving, Document image analysis; Image operators; Image processing problems; Image transformations; Input-output image; Natural approaches; Optical music recognition; Visual information, Image processing},
  publisher = {Elsevier Ltd}
}
@misc{Moonlight,
  author = {Ringwalt, Dan},
  howpublished = {\url{https://github.com/ringw/moonlight}},
  title = {Moonlight},
  year = {2018},
  url = {https://github.com/ringw/moonlight}
}
@inproceedings{Moss2022,
  author = {Moss, Fabian C. and L{\'{o}}pez, N{\'{e}}stor N{\'{a}}poles and K{\"{o}}ster, Maik and Rizo, David},
  booktitle = {Proceedings of the 4th International Workshop on Reading Music Systems},
  title = {Challenging sources: a new dataset for OMR of diverse 19th-century music theory examples},
  year = {2022},
  address = {Online},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {4--8},
  doi = {10.48550/arXiv.2211.13285},
  file = {:pdfs/2022 - Challenging Sources_ a New Dataset for OMR of Diverse 19th Century Music Theory Examples.pdf:PDF},
  url = {https://sites.google.com/view/worms2022/proceedings}
}
@misc{MuNG,
  author = {Pacha, Alexander and Haji{\v{c}} jr., Jan},
  howpublished = {\url{https://github.com/OMR-Research/mung}},
  title = {The Music Notation Graph (MuNG) Repository},
  year = {2020},
  url = {https://github.com/OMR-Research/mung}
}
@misc{MusicScoreClassifier,
  author = {Pacha, Alexander},
  howpublished = {\url{https://github.com/apacha/MusicScoreClassifier}},
  title = {Github Repository of the Music Score Classifier},
  year = {2017},
  url = {https://github.com/apacha/MusicScoreClassifier}
}
@inproceedings{Napoles2018,
  author = {N\'{a}poles, N{\'e}stor and Vigliensoni, Gabriel and Fujinaga, Ichiro},
  booktitle = {5th International Conference on Digital Libraries for Musicology},
  title = {Encoding Matters},
  year = {2018},
  address = {Paris, France},
  pages = {69--73},
  publisher = {ACM},
  acmid = {3273027},
  doi = {10.1145/3273024.3273027},
  file = {:pdfs/2018 - Encoding Matters.pdf:PDF},
  isbn = {978-1-4503-6522-2},
  keywords = {humdrum, humlib, mei, music encoding, music information retrieval, music notation, music transcription, music21, musicxml, symbolic music, verovio, vis},
  url = {http://doi.acm.org/10.1145/3273024.3273027}
}
@techreport{Nehab2003,
  author = {Nehab, Diego},
  title = {Staff Line Detection by Skewed Projection},
  year = {2003},
  file = {:pdfs/2003 - Staff Line Detection by Skewed Projection Going Vectorial.pdf:PDF},
  url = {https://pdfs.semanticscholar.org/142c/dc7231a7a8093fd2da6f293a36862e592733.pdf}
}
@inproceedings{Ng1992,
  author = {Ng, Kia and Boyle, Roger},
  booktitle = {BMVC92},
  title = {Segmentation of Music Primitives},
  year = {1992},
  address = {London},
  editor = {Hogg, David and Boyle, Roger},
  pages = {472--480},
  publisher = {Springer London},
  abstract = {In this paper, low-level knowledge directed pre-processing and segmentation of music scores are presented. We discuss some of the problems that have been overlooked by existing research but have proved to be major obstacles for robust optical music recognisers [1] to help entering music into a computer, including sub-segmentation of interconnected primitives and identification of nonstraight stave lines, and present solutions to these problems. We conclude that, with knowledge, a significant improvement in low-level segmentations can be achieved.},
  doi = {10.1007/978-1-4471-3201-1_49},
  isbn = {978-1-4471-3201-1}
}
@inproceedings{Ng1995,
  author = {Ng, Kia and Boyle, Roger and Cooper, David},
  booktitle = {IEE Colloquium on Document Image Processing and Multimedia Environments},
  title = {Low- and high-level approaches to optical music score recognition},
  year = {1995},
  pages = {31--36},
  abstract = {The computer has become an increasingly important device in music. It can not only generate sound but is also able to perform time consuming and repetitive tasks, such as transposition and part extraction, with speed and accuracy. However, a score must be represented in a machine readable format before any operation can be carried out. Current input methods, such as using an electronic keyboard, are time consuming and require human intervention. Optical music recognition (OMR) provides an interesting, efficient and automatic method to transform paper-based music scores into a machine representation. The authors outline the techniques for pre-processing and discuss the heuristic and musical rules employed to enhance recognition. A spin-off application that makes use of the intermediate results to enhance stave lines is also presented. The authors concentrate on the techniques used for time-signature detection, discuss the application of frequently-found rhythmical patterns to clarify the results of OMR, and propose possible enhancements using such knowledge. They believe that domain-knowledge enhancement is essential for complex document analysis and recognition. Other possible areas of development include melodic, harmonic and stylistic analysis to improve recognition results further.},
  doi = {10.1049/ic:19951184},
  file = {:pdfs/1995 - Low and High Level Approaches to Optical Music Score Recognition.pdf:PDF},
  keywords = {optical character recognition;pattern recognition;music;humanities;music computing;optical character recognition;musical score;high-level approach;low level approach;optical music score recognition;pattern recognition;machine readable format;optical music recognition;OMR;automatic method;paper-based music score;machine representation;technique;heuristic rule;time-signature detection;frequently-found rhythmical pattern;rhythm;domain-knowledge enhancement;complex document analysis;stylistic analysis;Optical character recognition;Pattern recognition;Music;Humanities}
}
@article{Ng1996,
  author = {Ng, Kia and Boyle, Roger},
  journal = {Image and Vision Computing},
  title = {Recognition and reconstruction of primitives in music scores},
  year = {1996},
  issn = {0262-8856},
  number = {1},
  pages = {39--46},
  volume = {14},
  abstract = {Music recognition bears similarities and differences to {OCR}. In this paper we identify some of the problems peculiar to musical scores, and propose an approach which succeeds in a wide range of non-trivial cases. The composer customarily proceeds by writing notes, then stems, beams, ties and slurs — we have inverted this approach by segmenting and then subsegmenting scores to recapture the component parts of symbols. In this paper, we concentrate on the strategy of recognizing sub-segmented primitives, and the reassembly process which reconstructs low level graphical primitives back to musical symbols. The sub-segmentation process proves to be worthwhile, since many primitives complement each other and high level musical theory can be employed to enhance the recognition process.},
  doi = {10.1016/0262-8856(95)01038-6},
  file = {:pdfs/1996 - Recognition and Reconstruction of Primitives in Music Scores.pdf:PDF},
  keywords = {Document analysis, OCR, Score recognition, to classify},
  url = {http://www.sciencedirect.com/science/article/pii/0262885695010386}
}
@inproceedings{Ng1999,
  author = {Ng, Kia and Cooper, David and Stefani, Ewan and Boyle, Roger and Bailey, Nick},
  booktitle = {International Computer Music Conference},
  title = {Embracing the Composer : Optical Recognition of Handwrtten Manuscripts},
  year = {1999},
  pages = {500--503},
  file = {:pdfs/1999 - Embracing the Composer - Optical Recognition of Handwrtten Manuscripts.pdf:PDF},
  url = {https://ci.nii.ac.jp/naid/10011612045/en/}
}
@article{Ng2002,
  author = {Ng, Kia},
  journal = {Lecture Notes in Computer Science},
  title = {Music manuscript tracing},
  year = {2002},
  issn = {1611-3349},
  pages = {322--334},
  volume = {2390},
  doi = {10.1007/3-540-45868-9{\_}29},
  file = {:pdfs/2002 - Music Manuscript Tracing.pdf:PDF},
  isbn = {3 540 44066 6},
  url = {http://www.springerlink.com/index/1JA4UUJULCH2XNTB.pdf}
}
@incollection{Ng2004,
  author = {Ng, Kia},
  booktitle = {Visual Perception of Music Notation: On-Line and Off Line Recognition},
  publisher = {IGI Global},
  title = {Optical Music Analysis for Printed Music Score and Handwritten Music Manuscript},
  year = {2004},
  pages = {108--127},
  doi = {10.4018/978-1-59140-298-5.ch004},
  file = {:pdfs/2004 - Optical Music Analysis for Printed Music Score and Handwritten Music Manuscript.pdf:PDF}
}
@inproceedings{Ng2014,
  author = {Ng, Kia and McLean, Alex and Marsden, Alan},
  booktitle = {EVA London 2014 on Electronic Visualisation and the Arts},
  title = {Big Data Optical Music Recognition with Multi Images and Multi Recognisers},
  year = {2014},
  organization = {BCS},
  pages = {215--218},
  doi = {10.14236/ewic/eva2014.26},
  file = {:pdfs/2014 - Big Data Optical Music Recognition with Multi Images and Multi Recognisers.pdf:PDF},
  groups = {datasets},
  url = {http://www.bcs.org/upload/pdf/ewic_ev14_s14paper4.pdf}
}
@inproceedings{Nguyen2014,
  author = {Nguyen, Hong Quy and Yang, Hyung-Jeong and Kim, Soo-Hyung and Lee, Guee-Sang},
  booktitle = {8th International Conference on Ubiquitous Information Management and Communication},
  title = {Automatic Touching Detection and Recognition of Music Chord Using Auto-encoding and Softmax},
  year = {2014},
  address = {Siem Reap},
  publisher = {Association for Computing Machinery},
  abstract = {Humankind envisioned an age of automatic where many machines perform
	all cumbersome and tedious tasks and we just enjoy. Playing music
	is not a tedious work but a program that plays music from music sheet
	image automatically can increase productivity of musician or bring
	convenience to amateurs. Following its requirement, we studied a
	specific task in Optical Music Recognition problem that is touching
	chord. Specially, touching chord becomes a critical problem on mobile
	device captured image because of some objective conditions. In this
	paper we showed our proposed method which used Autoencoder and Softmax
	classifier. The experiment results showed that our method is very
	promising. We get 94.117% accuracy in detect non-touching phase and
	96.261% in separate phase.},
  affiliation = {Department of Computer Engineering, Chonnam National University, 500-757 Gwangju, South Korea},
  author_keywords = {Autoencoder; Optical Music Recognition; Softmax classifier; Touching chord},
  correspondence_address1 = {Yang, H.-J.; Department of Computer Engineering, Chonnam National University, 500-757 Gwangju, South Korea; email: hyungjeong@gmail.com},
  doi = {10.1145/2557977.2558055},
  file = {:pdfs/2014 - Automatic Touching Detection and Recognition of Music Chord using Auto-Encoding and Softmax.pdf:PDF},
  keywords = {Communication; Learning systems; Mobile devices, Auto encoders; Critical problems; Optical music recognition; Specific tasks; Touching chord, Information management}
}
@article{Nguyen2015,
  author = {Nguyen, Tam and Lee, Gueesang},
  journal = {Journal of Information Processing Systems},
  title = {A Lightweight and Effective Music Score Recognition on Mobile Phones},
  year = {2015},
  number = {3},
  pages = {438--449},
  volume = {11},
  doi = {10.3745/JIPS.02.0023},
  file = {:pdfs/2015 - A Lightweight and Effective Music Score.pdf:PDF},
  keywords = {mobile camera, music score, svm, symbol classification, to classify}
}
@inproceedings{Nhat2014,
  author = {Nhat, Vo Quang and Lee, GueeSang},
  booktitle = {8th International Conference on Ubiquitous Information Management and Communication},
  title = {Adaptive Line Fitting for Staff Detection in Handwritten Music Score Images},
  year = {2014},
  address = {Siem Reap, Cambodia},
  pages = {991--996},
  publisher = {ACM},
  abstract = {The target of staff line detection is to extract staff lines accurately in order to remove them while preserves the shape of musical symbols. There are several researches in staff line detection and removal which provide good results with printed scores. However, in case of handwritten music scores, detecting staff lines still has problems due to the diversity of musical symbol shape, line curvature and disconnection. In this paper, we present a novel line fitting method for detecting the staff line in handwritten music score images. Our method first starts with the estimation of staff line height and staff space height. Then the staff segments are selected. Based on these staff candidates, we construct a line with the orientation of the staff segment and gradually fit it to the real lines. The staff line is then removed and the process is continuing until no line is detected. To show the effectiveness of our proposed method with different types of handwritten music score, images from the ICDAR/GREC 2013 dataset are tested. The experiment results show the advantages of our algorithm comparing with the previous approaches. Copyright 2014 ACM.},
  acmid = {2558057},
  doi = {10.1145/2557977.2558057},
  file = {:pdfs/2014 - Adaptive Line Fitting for Staff Detection in Handwritten Music Score Images.pdf:PDF},
  groups = {staff-removal},
  isbn = {978-1-4503-2644-5},
  keywords = {line fitting, stable path, staff line detection, staff line removal}
}
@article{Niitsuma2018,
  author = {Niitsuma, Masahiro and Tomita, Yo and Yan, Wei Qi and Bell, David},
  journal = {IEEE Intelligent Systems},
  title = {Towards Musicologist-Driven Mining of Handwritten Scores},
  year = {2018},
  issn = {1541-1672},
  number = {4},
  pages = {24--34},
  volume = {33},
  abstract = {Historical musicologists have been seeking for objective and powerful techniques to collect, analyse and verify their findings for many decades. The aim of this study is to propose a musicologist-driven mining method for extracting quantitative information from early music manuscripts. Our focus is on finding evidence for the chronological ordering of J.S. Bachs manuscripts. Bachs C-clefs were extracted from a wide range of manuscripts under the direction of domain experts, and with these the classification of C-clefs was conducted. The proposed methods were evaluated on a dataset containing over 1000 clefs extracted from J.S. Bachs manuscripts. The results show more than 70% accuracy for dating J.S. Bachs manuscripts, providing a rough barometer to be combined with other evidence to evaluate musicologists hypotheses, and the practicability of this domain-driven approach is demonstrated.},
  doi = {10.1109/MIS.2018.111144115},
  file = {:pdfs/2018 - Towards Musicologist Driven Mining of Handwritten Scores.pdf:PDF},
  keywords = {Data mining;Feature extraction;Intelligent systems;Knowledge discovery;Object recognition;Radio frequency;Support vector machines;applications;arts and humanities;computer applications;computing methodologies;data mining;database applications;database management;handwriting analysis;information technology and systems;music;pattern recognition}
}
@article{Noll2019,
  author = {Noll, Justus},
  journal = {c't},
  title = {Intelligentes Notenlesen},
  year = {2019},
  pages = {122--126},
  volume = {18},
  file = {:pdfs/2019 - Intelligentes Notenlesen.pdf:PDF},
  language = {German},
  subtitle = {Programme zum Digitalisieren gedruckter Musiknoten},
  url = {https://shop.heise.de/katalog/intelligentes-notenlesen}
}
@misc{NotateMe,
  author = {Neuratron},
  howpublished = {\url{http://www.neuratron.com/notateme.html}},
  title = {NotateMe},
  year = {2015},
  url = {http://www.neuratron.com/notateme.html}
}
@inproceedings{Novotny2015,
  author = {Novotn\`{y}, Jiri and Pokorn\`{y}, Jaroslav},
  booktitle = {Annual International Workshop on DAtabases, TExts, Specifications and Objects},
  title = {Introduction to Optical Music Recognition: Overview and Practical Challenges},
  year = {2015},
  editor = {Necasky M., Moravec P., Pokorny J.},
  pages = {65--76},
  publisher = {CEUR-WS},
  abstract = {Music has been always an integral part of human culture. In our computer age, it is not surprising that there is a growing interest to store music in a digitized form. Optical music recognition (OMR) refers to a discipline that investigates music score recognition systems. This is similar to well-known optical character recognition systems, except OMR systems try to automatically transform scanned sheet music into a computer-readable format. In such a digital format, semantic information is also stored (instrumentation, notes, pitches and duration, contextual information, etc.). This article introduces the OMR field and presents an overview of the relevant literature and basic techniques. Practical challenges and questions arising from the automatic recognition of music notation and its semantic interpretation are discussed as well as the most important open issues.},
  affiliation = {Department of Software Engineering, Faculty of Mathematics and Physics, Charles University, Malostranské nám. 25, Prague, Czech Republic},
  author_keywords = {Document image analysis; Machine learning; Optical music recognition},
  file = {:pdfs/2015 - Introduction to Optical Music Recognition - Overview and Practical Challenges.pdf:PDF},
  issn = {1613-0073},
  keywords = {Artificial intelligence; Character recognition; Learning systems; Optical character recognition; Specifications, Automatic recognition; Contextual information; Document image analysis; Optical character recognition system; Optical music recognition; Recognition systems; Semantic information; Semantic interpretation, Semantics},
  url = {http://ceur-ws.org/Vol-1343/paper6.pdf}
}
@mastersthesis{NunezAlcover2019,
  author = {N{\'{u}}{\~{n}}ez Alcover, Alicia},
  school = {Universidad de Alicante},
  title = {Glyph and Position Classification of Music Symbols in Early Manuscripts},
  year = {2019},
  type = {mathesis},
  abstract = {In this research, we study how to classify of handwritten music symbols in early music manuscripts written in white Mensural notation, a common notation system used since the fourteenth century and until the Renaissance. The field of Optical Music Recognition researches how to automate the reading of musical scores to transcribe its content to a structured digital format such as MIDI. When dealing with music manuscripts, the traditional workflow establishes two separate stages of detection and classification of musical symbols. In the classification stage, most of the research focuses on detecting musical symbols, without taking into account that a musical note is defined in two components: glyph and its position with respect to the staff. Our purpose will consist of the design and implementation of architectures in the field of Deep Learning, using Convolutional Neural Networks (CNNs) as well as its evaluation and comparison to determine which model provides the best performance in terms of efficiency and precision for its implementation in an interactive scenario.},
  file = {:pdfs/2019 - Glyph and Position Classification of Music Symbols in Early Manuscripts.pdf:PDF},
  url = {http://hdl.handle.net/10045/96451}
}
@inproceedings{Nunez-Alcover2019,
  author = {Nu{\~{n}}ez-Alcover, Alicia and de Le{\'o}n, Pedro J. Ponce and Calvo-Zaragoza, Jorge},
  booktitle = {Pattern Recognition and Image Analysis},
  title = {Glyph and Position Classification of Music Symbols in Early Music Manuscripts},
  year = {2019},
  address = {Cham},
  editor = {Morales, Aythami and Fierrez, Julian and S{\'a}nchez, Jos{\'e} Salvador and Ribeiro, Bernardete},
  pages = {159--168},
  publisher = {Springer International Publishing},
  abstract = {Optical Music Recognition is a field of research that automates the reading of musical scores so as to transcribe their content into a structured digital format. When dealing with music manuscripts, the traditional workflow establishes separate stages of detection and classification of musical symbols. In the latter, most of the research has focused on detecting musical glyphs, ignoring that the meaning of a musical symbol is defined by two components: its glyph and its position within the staff. In this paper we study how to perform both glyph and position classification of handwritten musical symbols in early music manuscripts written in white Mensural notation, a common notation system used for the most part of the XVI and XVII centuries. We make use of Convolutional Neural Networks as the classification method, and we tested several alternatives such as using independent models for each component, combining label spaces, or using both multi-input and multi-output models. Our results on early music manuscripts provide insights about the effectiveness and efficiency of each approach.},
  doi = {10.1007/978-3-030-31321-0_14},
  file = {:pdfs/2019 - Glyph and Position Classification of Music Symbols in Early Music Manuscripts.pdf:PDF},
  isbn = {978-3-030-31321-0}
}
@article{Oh2017,
  author = {Oh, Jiyong and Son, Sung Joon and Lee, Sangkuk and Kwon, Ji-Won and Kwak, Nojun},
  journal = {International Journal on Document Analysis and Recognition},
  title = {Online recognition of handwritten music symbols},
  year = {2017},
  number = {2},
  pages = {79--89},
  volume = {20},
  doi = {10.1007/s10032-017-0281-y},
  file = {:pdfs/2017 - Online Recognition of Handwritten Music Symbols.pdf:PDF},
  publisher = {Springer}
}
@misc{OmrBibliography,
  author = {Pacha, Alexander},
  howpublished = {\url{https://omr-research.github.io}},
  title = {The definitive bibliography for research on Optical Music Recognition},
  year = {2019},
  url = {https://omr-research.github.io}
}
@misc{OmrDatasetsProject,
  author = {Pacha, Alexander},
  howpublished = {\url{https://apacha.github.io/OMR-Datasets}},
  title = {The {OMR} Datasets Project},
  year = {2017},
  url = {https://apacha.github.io/OMR-Datasets}
}
@misc{OmrDatasetTools,
  author = {Pacha, Alexander},
  howpublished = {\url{https://omr-datasets.readthedocs.io/en/latest}},
  title = {Documentation of the {OMR} Dataset Tools Python package},
  year = {2018},
  url = {https://omr-datasets.readthedocs.io/en/latest}
}
@misc{OmrTutorialOnYoutube,
  author = {Calvo-Zaragoza, Jorge and Haji{\v{c}} jr., Jan and Pacha, Alexander and Fujinaga, Ichiro},
  howpublished = {\url{https://www.youtube.com/playlist?list=PL1jvwDVNwQke-04UxzlzY4FM33bo1CGS0}},
  title = {The recording of the {ISMIR} Tutorial "OMR for Dummies" on YouTube},
  year = {2018},
  url = {https://www.youtube.com/playlist?list=PL1jvwDVNwQke-04UxzlzY4FM33bo1CGS0}
}
@inproceedings{Pacha2017,
  author = {Pacha, Alexander and Eidenberger, Horst},
  booktitle = {14th International Conference on Document Analysis and Recognition},
  title = {Towards a Universal Music Symbol Classifier},
  year = {2017},
  address = {Kyoto, Japan},
  organization = {IAPR TC10 (Technical Committee on Graphics Recognition)},
  pages = {35--36},
  publisher = {IEEE Computer Society},
  abstract = {Optical Music Recognition (OMR) aims to recognize and understand written
	music scores. With the help of Deep Learning, researchers were able
	to significantly improve the state-of-the-art in this research area.
	However, Deep Learning requires a substantial amount of annotated
	data for supervised training. Various datasets have been collected
	in the past, but without a common standard that defines data formats
	and terminology, combining them is a challenging task. In this paper
	we present our approach towards unifying multiple datasets into the
	largest currently available body of over 90000 musical symbols that
	belong to 79 classes, containing both handwritten and printed music
	symbols. A universal music symbol classifier, trained on such a dataset
	using Deep Learning, can achieve an accuracy that exceeds 98%.},
  doi = {10.1109/ICDAR.2017.265},
  file = {:pdfs/2017 - Towards A Universal Music Symbol Classifier.pdf:PDF},
  isbn = {978-1-5386-3586-5},
  issn = {2379-2140}
}
@inproceedings{Pacha2017a,
  author = {Pacha, Alexander and Eidenberger, Horst},
  booktitle = {16th International Conference on Machine Learning and Applications},
  title = {Towards Self-Learning Optical Music Recognition},
  year = {2017},
  pages = {795--800},
  abstract = {Optical Music Recognition (OMR) is a branch of
artificial intelligence that aims at automatically recognizing
and understanding the content of music scores in images.
Several approaches and systems have been proposed that try to
solve this problem by using expert knowledge and specialized
algorithms that tend to fail at generalization to a broader
set of scores, imperfect image scans or data of different
formatting. In this paper we propose a new approach to solve
OMR by investigating how humans read music scores and by
imitating that behavior with machine learning. To demonstrate
the power of this approach, we conduct two experiments
that teach a machine to distinguish entire music sheets from
arbitrary content through frame-by-frame classification and
distinguishing between 32 classes of handwritten music symbols
which can be a basis for object detection. Both tasks can
be performed at high rates of confidence (>98%) which is
comparable to the performance of humans on the same task.},
  doi = {10.1109/ICMLA.2017.00-60},
  file = {:pdfs/2017 - Towards Self-Learning Optical Music Recognition.pdf:PDF}
}
@inproceedings{Pacha2018,
  author = {Pacha, Alexander and Choi, Kwon-Young and Co{\"{u}}asnon, Bertrand and Ricquebourg, Yann and Zanibbi, Richard and Eidenberger, Horst},
  booktitle = {13th International Workshop on Document Analysis Systems},
  title = {Handwritten Music Object Detection: Open Issues and Baseline Results},
  year = {2018},
  pages = {163--168},
  abstract = {Optical Music Recognition (OMR) is the challenge of understanding the content of musical scores. Accurate detection of individual music objects is a critical step in processing musical documents because a failure at this stage corrupts any further processing. So far, all proposed methods were either limited to typeset music scores or were built to detect only a subset of the available classes of music symbols. In this work, we propose an end-to-end trainable object detector for music symbols that is capable of detecting almost the full vocabulary of modern music notation in handwritten music scores. By training deep convolutional neural networks on the recently released MUSCIMA++ dataset which has symbol-level annotations, we show that a machine learning approach can be used to accurately detect music objects with a mean average precision of over 80%.},
  doi = {10.1109/DAS.2018.51},
  file = {:pdfs/2018 - Handwritten Music Object Detection - Open Issues and Baseline Results.pdf:PDF},
  keywords = {Optical Music Recognition; Object Detection; Handwritten Scores; Deep Learning}
}
@inproceedings{Pacha2018a,
  author = {Pacha, Alexander},
  booktitle = {Vienna Young Scientists Symposium},
  title = {Self-learning Optical Music Recognition},
  year = {2018},
  editor = {Hans, Philipp and Artner, Gerald and Grames, Johanna and Krebs, Heinz and Khosravi, Hamid Reza Mansouri and Rouhi, Taraneh},
  note = {ISBN: 978-3-9504017-8-3},
  organization = {TU Wien},
  pages = {34--35},
  publisher = {Book-of-Abstracts.com, Heinz A. Krebs},
  file = {:pdfs/2018 - Self-Learning Optical Music Recognition.pdf:PDF},
  isbn = {978-3-9504017-8-3},
  url = {http://vss.tuwien.ac.at/}
}
@inproceedings{Pacha2018b,
  author = {Pacha, Alexander and Calvo-Zaragoza, Jorge},
  booktitle = {19th International Society for Music Information Retrieval Conference},
  title = {Optical Music Recognition in Mensural Notation with Region-Based Convolutional Neural Networks},
  year = {2018},
  address = {Paris, France},
  pages = {240--247},
  abstract = {In this work, we present an approach for the task of optical music recognition (OMR) using deep neural networks. Our intention is to simultaneously detect and categorize musical symbols in handwritten scores, written in mensural notation. We propose the use of region-based convolutional neural networks, which are trained in an end-toend fashion for that purpose. Additionally, we make use of a convolutional neural network that predicts the relative position of a detected symbol within the staff, so that we cover the entire image-processing part of the OMR pipeline. This strategy is evaluated over a set of 60 ancient scores in mensural notation, with more than 15000 annotated symbols belonging to 32 different classes. The results reflect the feasibility and capability of this approach, with a weighted mean average precision of around 76% for symbol detection, and over 98% accuracy for predicting the position.},
  file = {:pdfs/2018 - Optical Music Recognition in Mensural Notation with Region Based Convolutional Neural Networks.pdf:PDF},
  isbn = {978-2-9540351-2-3},
  url = {http://ismir2018.ircam.fr/doc/pdfs/32_Paper.pdf}
}
@article{Pacha2018c,
  author = {Pacha, Alexander and Haji{\v{c}} jr., Jan and Calvo-Zaragoza, Jorge},
  journal = {Applied Sciences},
  title = {A Baseline for General Music Object Detection with Deep Learning},
  year = {2018},
  issn = {2076-3417},
  number = {9},
  pages = {1488--1508},
  volume = {8},
  abstract = {Deep learning is bringing breakthroughs to many computer vision subfields including Optical Music Recognition (OMR), which has seen a series of improvements to musical symbol detection achieved by using generic deep learning models. However, so far, each such proposal has been based on a specific dataset and different evaluation criteria, which made it difficult to quantify the new deep learning-based state-of-the-art and assess the relative merits of these detection models on music scores. In this paper, a baseline for general detection of musical symbols with deep learning is presented. We consider three datasets of heterogeneous typology but with the same annotation format, three neural models of different nature, and establish their performance in terms of a common evaluation standard. The experimental results confirm that the direct music object detection with deep learning is indeed promising, but at the same time illustrates some of the domain-specific shortcomings of the general detectors. A qualitative comparison then suggests avenues for OMR improvement, based both on properties of the detection model and how the datasets are defined. To the best of our knowledge, this is the first time that competing music object detection systems from the machine learning paradigm are directly compared to each other. We hope that this work will serve as a reference to measure the progress of future developments of OMR in music object detection.},
  doi = {10.3390/app8091488},
  file = {:pdfs/2018 - A Baseline for General Music Object Detection with Deep Learning.pdf:PDF},
  keywords = {optical music recognition; deep learning; object detection; music scores},
  url = {http://www.mdpi.com/2076-3417/8/9/1488}
}
@inproceedings{Pacha2018d,
  author = {Pacha, Alexander},
  booktitle = {1st International Workshop on Reading Music Systems},
  title = {Advancing OMR as a Community: Best Practices for Reproducible Research},
  year = {2018},
  address = {Paris, France},
  editor = {Calvo-Zaragoza, Jorge and Haji{\v{c}} jr., Jan and Pacha, Alexander},
  pages = {19--20},
  file = {:pdfs/2018 - Advancing OMR As a Community - Best Practices for Reproducible Research.pdf:PDF},
  url = {https://sites.google.com/view/worms2018/proceedings}
}
@phdthesis{Pacha2019,
  author = {Pacha, Alexander},
  school = {TU Wien},
  title = {Self-Learning Optical Music Recognition},
  year = {2019},
  type = {phdthesis},
  abstract = {Music is an essential part of our culture and heritage. Throughout the centuries, millions of songs were composed and written down in documents using music notation. Optical Music Recognition (OMR) is the research field that investigates how the computer can learn to read those documents. Despite decades of research, OMR is still considered far from being solved. One reason is that traditional approaches rely heavily on heuristics and often do not generalize well. In this thesis, I propose a different approach to let the computer learn to read music notation documents mostly by itself using machine learning, especially deep learning.

In several experiments, I have demonstrated that the computer can learn to robustly solve many tasks involved in OMR by using supervised learning. These include the structural analysis of the document, the detection and classification of symbols in the scores as well as the construction of the music notation graph, which is an intermediate representation that can be exported into a format suitable for further processing. A trained deep convolutional neural network can reliably detect whether an image contains music or not, while another one is capable of finding and linking individual measures across multiple sources for easy navigation between them. Detecting symbols in typeset and handwritten scores can be learned, given a sufficient amount of annotated data, and classifying isolated symbols can be performed at even lower error rates than those of humans. For scores written in mensural notation the complete recognition can even be simplified into just three steps, two of which can be solved with machine learning.

Apart from publishing a number of scientific articles, I have gathered and documented the most extensive collection of datasets for OMR as well as the probably most comprehensive bibliography currently available. Both are available online. Moreover I was involved in the organization of the International Workshop on Reading Music Systems, in a joint tutorial at the International Society For Music Information Retrieval Conference on OMR as well as in another workshop at the Music Encoding Conference.

Many challenges of OMR can be solved efficiently with deep learning, such as the layout analysis or music object detection. As music notation is a configurational writing system where the relations and interplay between symbols determine the musical semantic, these relationships have to be recognized as well. A music notation graph is a suitable representation for storing this information. It allows to clearly distinguish between the challenges involved in recovering information from the music score image and the encoding of the recovered information into a specific output format while complying with the rules of music notation. While the construction of such a graph can be learned as well, there are still many open issues that need future research. But I am confident that training the computer on a sufficiently large dataset under human supervision is a sustainable approach that will help to solve many applications of OMR in the future.},
  file = {:pdfs/2019 - Self Learning Optical Music Recognition.pdf:PDF},
  url = {https://alexanderpacha.files.wordpress.com/2019/07/dissertation-self-learning-optical-music-recognition-alexander-pacha.pdf}
}
@inproceedings{Pacha2019a,
  author = {Pacha, Alexander and Calvo-Zaragoza, Jorge and Haji{\v{c}} jr., Jan},
  booktitle = {20th International Society for Music Information Retrieval Conference},
  title = {Learning Notation Graph Construction for Full-Pipeline Optical Music Recognition},
  year = {2019},
  pages = {75--82},
  file = {:pdfs/2019 - Learning Notation Graph Construction for Full Pipeline Optical Music Recognition.pdf:PDF},
  url = {https://archives.ismir.net/ismir2019/paper/000006.pdf}
}
@inproceedings{Pacha2019b,
  author = {Pacha, Alexander},
  booktitle = {2nd International Workshop on Reading Music Systems},
  title = {Incremental Supervised Staff Detection},
  year = {2019},
  address = {Delft, The Netherlands},
  editor = {Jorge Calvo-Zaragoza and Alexander Pacha},
  pages = {16--20},
  file = {:pdfs/2019 - Incremental Supervised Staff Detection.pdf:PDF},
  url = {https://sites.google.com/view/worms2019/proceedings}
}
@inproceedings{Pacha2021,
  author = {Pacha, Alexander},
  booktitle = {Proceedings of the 3rd International Workshop on Reading Music Systems},
  title = {The Challenge of Reconstructing Digits in Music Scores},
  year = {2021},
  address = {Alicante, Spain},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander},
  pages = {4--7},
  file = {:pdfs/2021 - The Challenge of Reconstructing Digits in Music Scores.pdf:PDF},
  url = {https://sites.google.com/view/worms2021/proceedings}
}
@inproceedings{Padilla2014,
  author = {Padilla, Victor and Marsden, Alan and McLean, Alex and Ng, Kia},
  booktitle = {1st International Workshop on Digital Libraries for Musicology},
  title = {Improving OMR for Digital Music Libraries with Multiple Recognisers and Multiple Sources},
  year = {2014},
  address = {London, United Kingdom},
  pages = {1--8},
  publisher = {ACM},
  abstract = {Large quantities of scanned music are now available in public digital music libraries. However, the information in such sources is represented as pixel data in images rather than symbolic information about the notes of a piece of music, and therefore it is opaque to musically meaningful computational processes (e.g., to search for a particular melodic pattern). Optical Music Recognition (Optical Character Recognition for music) holds out the prospect of a solution to this issue and allowing access to very large quantities of musical information in digital libraries. Despite the efforts made by the different commercial OMR developers to improve the accuracy of their systems, mistakes in the output are currently too frequent to make OMR a practical tool for bulk processing.

One possibility for improving the accuracy of OMR is to use multiple recognisers and combine the results to achieve an output better than each of them individually. The general process presented here can be divided into three subtasks, S1, S2, and S3. S1 is focused in the correction of rhythmical errors at bar level, counting the errors of the different OMR outputs, establish a ranking of the results, and make a pairwise alignment to select the best measures. S2 is based on the alignment and voting of individual symbols. For this task we have implemented a conversion of the most important symbols to a simple grammar. Finally, S3 improves the output of S2 by comparing and adding symbols from S1 and detecting gaps through the alignment of wrong measures.

The process described in this paper is part of our "Big Data Approach" where a large amount of data is available in music score libraries, such as the International Music Score Library Project (IMSLP), for the purpose of Music Information Retrieval (MIR).},
  acmid = {2660175},
  doi = {10.1145/2660168.2660175},
  file = {:pdfs/2014 - Improving OMR for Digital Music Libraries with Multiple Recognisers and Multiple Sources.pdf:PDF},
  groups = {evaluation, recognition, datasets},
  isbn = {978-1-4503-3002-2},
  keywords = {Big Data, Image processing, Library, Optical Music Recognition, Pattern Recognition}
}
@inproceedings{Paeaekkoenen2018,
  author = {P{\"{a}}{\"{a}}kk{\"{o}}nen, Tuula and Kervinen, Jukka and Kettunen, Kimmo},
  booktitle = {1st International Workshop on Reading Music Systems},
  title = {Digitisation and Digital Library Presentation System -- Sheet Music to the Mix},
  year = {2018},
  address = {Paris, France},
  editor = {Calvo-Zaragoza, Jorge and Haji{\v{c}} jr., Jan and Pacha, Alexander},
  pages = {21--22},
  file = {:pdfs/2018 - Digitisation and Digital Library Presentation System Sheet Music to the Mix.pdf:PDF},
  url = {https://sites.google.com/view/worms2018/proceedings}
}
@techreport{Panadero2019,
  author = {Panadero, Ivan Santos},
  institution = {Universitat Aut{\'{o}}noma de Barcelona},
  title = {Alignment of handwritten music scores},
  year = {2019},
  abstract = {There are musicologists that spend their time in analyzing musical pieces of more than a century ago in order to link them to another pre-existing pieces from the same author but written by different hands. It is a tedious task, since there are many representations done of a single piece through the time, and the writing variability among those representations can be extensive. The purpose would be in having a varied database of these old compositions for the study, reproduction and difusion. This work is divided into two phases. The first one, constitent in the detection of primitive present elements in each of the measures of a score using the existing transcription of the piece, thus obtaining the desired guided alignment. The second one will seek to analyze this alignment. Obtained results are encouraging.},
  file = {:pdfs/2019 - Alignment of Handwritten Music Scores.pdf:PDF},
  url = {https://ddd.uab.cat/pub/tfg/2019/tfg_151917/TFG_-_Informe_Final.pdf}
}
@inproceedings{Parada-Cabaleiro2017,
  author = {Parada-Cabaleiro, Emilia and Batliner, Anton and Baird, Alice and Schuller, Bj{\"{o}}rn},
  booktitle = {18th International Society for Music Information Retrieval Conference},
  title = {The SEILS Dataset: Symbolically Encoded Scores in Modern-Early Notation for Computational Musicology},
  year = {2017},
  address = {Suzhou, China},
  comment = {Poster: https://www.informatik.uni-augsburg.de/lehrstuehle/eihw/pdfs/ISMIR_poster_DEF.pdf},
  file = {:pdfs/2017 - The SEILS Dataset - Symbolically Encoded Scores In Modern-Early Notation for Computational Musicology.pdf:PDF},
  isbn = {978-981-11-5179-8},
  url = {https://ismir2017.smcnus.org/wp-content/uploads/2017/10/14_Paper.pdf}
}
@inproceedings{Parada-Cabaleiro2019,
  author = {Parada-Cabaleiro, Emilia and Batliner, Anton and Schuller, Bj{\"{o}}rn},
  booktitle = {20th International Society for Music Information Retrieval Conference},
  title = {A Diplomatic Edition of Il Lauro Secco: Ground Truth for OMR of White Mensural Notation},
  year = {2019},
  address = {Delft, The Netherlands},
  pages = {557--564},
  abstract = {Early musical sources in white mensural notation—the
most common notation in European printed music during the Renaissance—are nowadays preserved by libraries
worldwide trough digitalisation. Still, the application of
music information retrieval to this repertoire is restricted
by the use of digitalisation techniques which produce an
uncodified output. Optical Music Recognition (OMR) automatically generates a symbolic representation of imagebased musical content, thus making this repertoire reachable from the computational point of view; yet, further
improvements are often constricted by the limited ground
truth available. We address this lacuna by presenting a
symbolic representation in original notation of Il Lauro
Secco, an anthology of Italian madrigals in white mensural notation. For musicological analytic purposes, we
encoded the repertoire in **mens and MEI formats; for
OMR ground truth, we automatically codified the repertoire in agnostic and semantic formats, via conversion from
the **mens files.},
  file = {:pdfs/2019 - A Diplomatic Edition of Il Lauro Secco_ Ground Truth for OMR of White Mensural Notation.pdf:PDF},
  url = {http://archives.ismir.net/ismir2019/paper/000067.pdf}
}
@article{Pedersoli2016,
  author = {Pedersoli, Fabrizio and Tzanetakis, George},
  journal = {International Journal on Document Analysis and Recognition},
  title = {Document segmentation and classification into musical scores and text},
  year = {2016},
  issn = {1433-2825},
  number = {4},
  pages = {289--304},
  volume = {19},
  abstract = {A new algorithm for segmenting documents into regions containing musical
	scores and text is proposed. Such segmentation is a required step
	prior to applying optical character recognition and optical music
	recognition on scanned pages that contain both music notation and
	text. Our segmentation technique is based on the bag-of-visual-words
	representation followed by random block voting (RBV) in order to
	detect the bounding boxes containing the musical score and text within
	a document image. The RBV procedure consists of extracting a fixed
	number of blocks whose position and size are sampled from a discrete
	uniform distribution that ``over''-covers the input image. Each block
	is automatically classified as either coming from musical score or
	text and votes with a particular posterior probability of classification
	in its spatial domain. An initial coarse segmentation is obtained
	by summarizing all the votes in a single image. Subsequently, the
	final segmentation is obtained by subdividing the image in microblocks
	and classifying them using a N-nearest neighbor classifier which
	is trained using the coarse segmentation. We demonstrate the potential
	of the proposed method by experiments on two different datasets.
	One is on a challenging dataset of images collected and artificially
	combined and manipulated for this project. The other is a music dataset
	obtained by the scanning of two music books. The results are reported
	using precision/recall metrics of the overlapping area with respect
	to the ground truth. The proposed system achieves an overall averaged
	F-measure of 85 {\%}. The complete source code package and associated
	data are available at https://github.com/fpeder/mscr under the FreeBSD
	license to support reproducibility.},
  doi = {10.1007/s10032-016-0271-5},
  file = {:pdfs/2016 - Document segmentation and classification into musical scores and text.pdf:PDF}
}
@inproceedings{Penarrubia2022,
  author = {Penarrubia, Carlos and Garrido-Mu{\~{n}}oz, Carlos and Valero-Mas, Jose J. and Calvo-Zaragoza, Jorge},
  booktitle = {Proceedings of the 4th International Workshop on Reading Music Systems},
  title = {Efficient Approaches for Notation Assembly in Optical Music Recognition},
  year = {2022},
  address = {Online},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {29--32},
  doi = {10.48550/arXiv.2211.13285},
  file = {:pdfs/2022 - Efficient Approaches for Notation Assembly in Optical Music Recognition.pdf:PDF},
  url = {https://sites.google.com/view/worms2022/proceedings}
}
@inproceedings{Pham2015,
  author = {Pham, Viet-Khoi and Nguyen, Hai-Dang and Tran, Minh-Triet},
  booktitle = {International Conference on Learning and Collaboration Technologies},
  title = {Virtual Music Teacher for New Music Learners with Optical Music Recognition},
  year = {2015},
  organization = {Springer},
  pages = {415--426},
  doi = {10.1007/978-3-319-20609-7_39},
  file = {:pdfs/2015 - Virtual Music Teacher for new Music Learners with Optical Music Recognition.pdf:PDF},
  keywords = {others}
}
@article{Pham2015a,
  author = {Pham, Van Khien and Lee, Guee-Sang},
  journal = {International Journal of Multimedia and Ubiquitous Engineering},
  title = {Music Score Recognition Based on a Collaborative Model},
  year = {2015},
  issn = {1975-0080},
  number = {8},
  pages = {379--390},
  volume = {10},
  abstract = {Recognition musical symbols are very important in music score system and they depend on these methods of researchers. Most of existing approaches for OMR (optical music recognition) removes staff lines before symbols are detected, therefore the symbols can get damaged easily. Another method recognizes symbols without staff line removal but all of them have a low accuracy rate and high processing time for recognizing symbols. In this paper, none staff removal and staff removal are suggested and these new methods are proposed to improve appreciation result of symbols. A lot of symbols are detected before deleted staff line as vertical lines, note head, pitch, beam, tail and then these staff lines are removed to identify other symbols using connected component. The proposed method is applied to the Samsung smart phone which embeds a high resolution camera. Experimental results show that the recognition rate is higher than existing methods and the computation time is reduced significantly.},
  affiliation = {Chonnam National University, Kwangju, South Korea},
  author_keywords = {Line detection; Music scores; Note head; Projection; Staff line detection; Staff line removal; Template matching},
  correspondence_address1 = {Lee, G.; Chonnam National UniversitySouth Korea},
  doi = {10.14257/ijmue.2015.10.8.37},
  file = {:pdfs/2015 - Music Score Recognition Based on a Collaborative Model.pdf:PDF},
  funding_details = {2014-024950, NRF, National Research Foundation of Korea; S2173771, SMBA, National Research Foundation of Korea},
  keywords = {Template matching, Line detection; Line removal; Music scores; Note head; Projection, Smartphones},
  publisher = {Science and Engineering Research Support Society}
}
@inproceedings{Pham2015b,
  author = {Pham, Viet-Khoi and Nguyen, Hai-Dang and Nguyen-Khac, Tung-Anh and Tran, Minh-Triet},
  booktitle = {7th International Conference on Machine Vision},
  title = {Apply lightweight recognition algorithms in optical music recognition},
  year = {2015},
  publisher = {SPIE},
  abstract = {The problems of digitalization and transformation of musical scores
	into machine-readable format are necessary to be solved since they
	help people to enjoy music, to learn music, to conserve music sheets,
	and even to assist music composers. However, the results of existing
	methods still require improvements for higher accuracy. Therefore,
	the authors propose lightweight algorithms for Optical Music Recognition
	to help people to recognize and automatically play musical scores.
	In our proposal, after removing staff lines and extracting symbols,
	each music symbol is represented as a grid of identical M â- N cells,
	and the features are extracted and classified with multiple lightweight
	SVM classifiers. Through experiments, the authors find that the size
	of 10 â- 12 cells yields the highest precision value. Experimental
	results on the dataset consisting of 4929 music symbols taken from
	18 modern music sheets in the Synthetic Score Database show that
	our proposed method is able to classify printed musical scores with
	accuracy up to 99.56%.},
  affiliation = {Faculty of Information Technology, University of Science, VNU-HCM, Viet Nam},
  author_keywords = {lightweight algorithm; Optical Music Recognition; Stable Paths approach; Support Vector Machine},
  doi = {10.1117/12.2180715},
  isbn = {9781628415605},
  issn = {0277-786X},
  keywords = {Algorithms; Classification (of information); Support vector machines, Machine-readable format; Music composers; Musical score; Optical music recognition; Recognition algorithm; Stable Paths approach; SVM classifiers, Computer vision}
}
@misc{PhotoScore,
  author = {Neuratron},
  howpublished = {\url{http://www.neuratron.com/photoscore.htm}},
  title = {PhotoScore 2018},
  year = {2018},
  url = {http://www.neuratron.com/photoscore.htm}
}
@inproceedings{PinheiroPereira2016,
  author = {Pinheiro Pereira, Roberto M. and Matos, Caio E.F. and Braz, Geraldo Jr. and de Almeida, Jo{\~{a}}o D.S. and de Paiva, Anselmo C.},
  booktitle = {22nd Brazilian Symposium on Multimedia and the Web},
  title = {A Deep Approach for Handwritten Musical Symbols Recognition},
  year = {2016},
  address = {Teresina, Piau; Brazil},
  pages = {191--194},
  publisher = {ACM},
  acmid = {2988171},
  doi = {10.1145/2976796.2988171},
  file = {:pdfs/2016 - A Deep Approach for Handwritten Musical Symbols Recognition.pdf:PDF},
  isbn = {978-1-4503-4512-5},
  keywords = {convolutional neural network, deep learning, document analyses, optical musical recognition}
}
@inproceedings{Pinto2000,
  author = {Pinto, Jo{\~a}o Caldas and Vieira, Pedro and Ramalho, M. and Mengucci, M. and Pina, P. and Muge, F.},
  booktitle = {Research and Advanced Technology for Digital Libraries},
  title = {Ancient Music Recovery for Digital Libraries},
  year = {2000},
  address = {Berlin, Heidelberg},
  editor = {Borbinha, Jos{\'e} and Baker, Thomas},
  pages = {24--34},
  publisher = {Springer Berlin Heidelberg},
  abstract = {The purpose of this paper is to present a description and current state of the ``ROMA'' (Reconhecimento {\'O}ptico de M{\'u}sica Antiga or Ancient Music Optical Recognition) Project that consists on building an application, for the recognition and restoration specialised in ancient music manuscripts (from XVI to XVIII century). This project, beyond the inventory of the Biblioteca Geral da Universidade de Coimbra musical funds aims to develop algorithms for scores restoration and musical symbols recognition in order to allow a suitable representation and restoration on digital format. Both objectives have an intrinsic research nature one in the area of musicology and other in digital libraries.},
  doi = {10.1007/3-540-45268-0_3},
  file = {:pdfs/2000 - Ancient Music Recovery for Digital Libraries.pdf:PDF},
  isbn = {978-3-540-45268-3}
}
@article{Pinto2003,
  author = {Pinto, Jo{\~a}o Caldas and Vieira, Pedro and Sousa, Jo{\~a}o M.},
  journal = {Document Analysis and Recognition},
  title = {A new graph-like classification method applied to ancient handwritten musical symbols},
  year = {2003},
  issn = {1433-2825},
  number = {1},
  pages = {10--22},
  volume = {6},
  abstract = {Several algorithms have been proposed in the past to solve the problem of binary pattern recognition. The problem of finding features that clearly distinguish two or more different patterns is a key issue in the design of such algorithms. In this paper, a graph-like recognition process is proposed that combines a number of different classifiers to simplify the type of features and classifiers used in each classification step. The graph-like classification method is applied to ancient music optical recogniti on, and a high degree of accuracy has been achieved.},
  doi = {10.1007/s10032-003-0102-3},
  file = {:pdfs/2003 - A New Graph like Classification Method Applied to Ancient Handwritten Musical Symbols.pdf:PDF},
  url = {https://doi.org/10.1007/s10032-003-0102-3}
}
@techreport{Pinto2010,
  author = {Pinto, Telmo and Rebelo, Ana and Giraldi, Gilson and Cardoso, Jamie dos Santos},
  institution = {Universidade do Porto, Portugal},
  title = {Content Aware Music Score Binarization},
  year = {2010},
  file = {:pdfs/2010 - Content Aware Music Score Binarization.pdf:PDF},
  keywords = {binarization},
  publisher = {INESC Porto},
  url = {http://www.inescporto.pt/~jsc/publications/conferences/2010TPintoACCV.pdf}
}
@inproceedings{Pinto2011,
  author = {Pinto, Telmo and Rebelo, Ana and Giraldi, Gilson and Cardoso, Jamie dos Santos},
  booktitle = {Pattern Recognition and Image Analysis},
  title = {Music Score Binarization Based on Domain Knowledge},
  year = {2011},
  editor = {Vitri{\`a}, Jordi and Sanches, Jo{\~a}o Miguel and Hern{\'a}ndez, Mario},
  pages = {700--708},
  publisher = {Springer Berlin Heidelberg},
  abstract = {Image binarization is a common operation in the pre- processing stage in most Optical Music Recognition (OMR) systems. The choice of an appropriate binarization method for handwritten music scores is a difficult problem. Several works have already evaluated the performance of existing binarization processes in diverse applications. However, no goal-directed studies for music sheets documents were carried out. This paper presents a novel binarization method based in the content knowledge of the image. The method only needs the estimation of the staffline thickness and the vertical distance between two stafflines. This information is extracted directly from the gray level music score. The proposed binarization procedure is experimentally compared with several state of the art methods.},
  doi = {10.1007/978-3-642-21257-4_87},
  file = {:pdfs/2011 - Music Score Binarization Based on Domain Knowledge.pdf:PDF},
  isbn = {978-3-642-21257-4}
}
@misc{PlayScore,
  author = {Organum},
  howpublished = {\url{http://www.playscore.co}},
  title = {PlayScore},
  year = {2016},
  url = {http://www.playscore.co}
}
@inproceedings{PoulaindAndecy1994,
  author = {Poulain d'Andecy, Vincent and Camillerapp, Jean and Leplumey, Ivan},
  booktitle = {12th International Conference on Pattern Recognition},
  title = {Kalman filtering for segment detection: application to music scores analysis},
  year = {1994},
  publisher = {IEEE Comput. Soc. Press},
  doi = {10.1109/icpr.1994.576283},
  file = {:pdfs/1994 - Kalman Filtering For Segment Detection - Application To Music Scores Analysis.pdf:PDF}
}
@inproceedings{PoulaindAndecy1994a,
  author = {Poulain d'Andecy, Vincent and Camillerapp, Jean and Leplumey, Ivan},
  booktitle = {Actes 9 {\`{e}}me Congr{\'{e}}s AFCET Reconnaissance des Formes et Intelligence Artificielle},
  title = {D{\'{e}}tecteur robuste de segments; Application {\`{a}} l'analyse de partitions musicales},
  year = {1994}
}
@article{PoulaindAndecy1995,
  author = {Poulain d'Andecy, Vincent and Camillerapp, Jean and Leplumey, Ivan},
  journal = {Traitement du Signal},
  title = {Analyse de Partitions Musicales},
  year = {1995},
  number = {6},
  pages = {653--661},
  volume = {12},
  file = {:pdfs/1995 - Analyse De Partitions Musicales.pdf:PDF},
  language = {French},
  url = {http://hdl.handle.net/2042/1939}
}
@phdthesis{Prerau1970,
  author = {Prerau, David S.},
  school = {Massachusetts Institute of Technology},
  title = {Computer pattern recognition of standard engraved music notation},
  year = {1970}
}
@inproceedings{Prerau1971,
  author = {Prerau, David S.},
  booktitle = {Fall Joint Computer Conference},
  title = {Computer pattern recognition of printed music},
  year = {1971},
  pages = {153--162},
  abstract = {The standard notation used to specify most instrumental and vocal music forms a conventionalized, two-dimensional, visual pattern class. This paper discusses computer recognition of the music information specified by a sample of this standard notation. A sample of printed music notation is scanned optically, and a digitized version of the music sample is fed into the computer. The digitized sample may be considered the data-set sensed by the computer. The computer performs the recognition and then produces an output in the Ford-Columbia music representation. Ford-Columbia is an alphanumeric language isomorphic to standard music notation It is therefore capable of representing the music information specified by the original sample},
  file = {:pdfs/1971 - Computer Pattern Recognition of Printed Music.pdf:PDF},
  keywords = {computer pattern recognition; printed music; Ford, to classify}
}
@phdthesis{Pruslin1966,
  author = {Pruslin, Dennis Howard},
  school = {Massachusetts Institute of Technology},
  title = {Automatic Recognition of Sheet Music},
  year = {1966},
  address = {Cambridge, Massachusetts, USA}
}
@techreport{Pugin2001,
  author = {Pugin, Laurent},
  institution = {Geneva University},
  title = {R{\'{e}}alisation d'un syst{\`{e}}me de superposition de partitions de musique anciennes},
  year = {2001},
  address = {Geneva, Switzerland},
  file = {:pdfs/2001 - Realisation dun Systeme De Superposition De Partitions De Musique Anciennes.pdf:PDF},
  url = {http://www.unige.ch/lettres/armus/music/devrech/aruspix/pdf/licence.pdf}
}
@inproceedings{Pugin2006,
  author = {Pugin, Laurent},
  booktitle = {7th International Conference on Music Information Retrieval},
  title = {Optical Music Recognitoin of Early Typographic Prints using Hidden {Markov} Models},
  year = {2006},
  address = {Victoria, Canada},
  pages = {53--56},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/ismir/Pugin06},
  file = {:pdfs/2006 - Optical Music Recognition of Early Typographic Prints using Hidden Markov Models.pdf:PDF},
  url = {http://ismir2006.ismir.net/PAPERS/ISMIR06152_Paper.pdf}
}
@article{Pugin2006a,
  author = {Pugin, Laurent},
  journal = {Computing in Musicology},
  title = {Aruspix: an Automatic Source-Comparison System},
  year = {2006},
  issn = {1057-9478},
  pages = {49--59},
  volume = {14},
  location = {Cambridge, MA},
  url = {https://dialnet.unirioja.es/servlet/articulo?codigo=3476563}
}
@phdthesis{Pugin2006b,
  author = {Pugin, Laurent},
  school = {Geneva University},
  title = {Lecture et traitement informatique de typographies musicales anciennes: un logiciel de reconnaissance de partitions par mod{\`{e}}les de Markov cach{\'{e}}s},
  year = {2006},
  address = {Geneva, Switzerland},
  doi = {10.13097/archive-ouverte/unige:30024},
  file = {:pdfs/2006 - Lecture Et Traitement Informatique De Typographies Musicales Anciennes_ Un Logiciel De Reconnaissance De Partitions Par Modeles De Markov Caches.pdf:PDF}
}
@inproceedings{Pugin2007,
  author = {Pugin, Laurent and Burgoyne, John Ashley and Fujinaga, Ichiro},
  booktitle = {7th ACM/IEEE-CS Joint Conference on Digital Libraries},
  title = {Goal-directed Evaluation for the Improvement of Optical Music Recognition on Early Music Prints},
  year = {2007},
  address = {Vancouver, Canada},
  pages = {303--304},
  publisher = {ACM},
  acmid = {1255233},
  doi = {10.1145/1255175.1255233},
  file = {:pdfs/2007 - Goal-Directed Evaluation for the Improvement of Optical Music Recognition on Early Music Prints.pdf:PDF},
  isbn = {978-1-59593-644-8},
  keywords = {adaptive binarization, early music, goal-directed evaluation, optical music recognition, test-driven development}
}
@inproceedings{Pugin2007a,
  author = {Pugin, Laurent and Burgoyne, John Ashley and Fujinaga, Ichiro},
  booktitle = {8th International Conference on Music Information Retrieval},
  title = {MAP Adaptation to Improve Optical Music Recognition of Early Music Documents Using Hidden Markov Models},
  year = {2007},
  pages = {513--516},
  file = {:pdfs/2007 - Map Adaptation to Improve Optical Music Recognition of Early Music Documents using Hidden Markov Models.pdf:PDF},
  url = {http://ismir2007.ismir.net/proceedings/ISMIR2007_p513_pugin.pdf}
}
@inproceedings{Pugin2007b,
  author = {Pugin, Laurent and Burgoyne, John Ashley and Fujinaga, Ichiro},
  booktitle = {Research and Advanced Technology for Digital Libraries},
  title = {Reducing Costs for Digitising Early Music with Dynamic Adaptation},
  year = {2007},
  address = {Berlin, Heidelberg},
  editor = {Kov{\'a}cs, L{\'a}szl{\'o} and Fuhr, Norbert and Meghini, Carlo},
  pages = {471--474},
  publisher = {Springer Berlin Heidelberg},
  abstract = {Optical music recognition (OMR) enables librarians to digitise early music sources on a large scale. The cost of expert human labour to correct automatic recognition errors dominates the cost of such projects. To reduce the number of recognition errors in the OMR process, we present an innovative approach to adapt the system dynamically, taking advantage of the human editing work that is part of any digitisation project. The corrected data are used to perform MAP adaptation, a machine-learning technique used previously in speech recognition and optical character recognition (OCR). Our experiments show that this technique can reduce editing costs by more than half.},
  file = {:pdfs/2007 - Reducing Costs for Digitising Early Music with Dynamic Adaptation.pdf:PDF},
  isbn = {978-3-540-74851-9}
}
@techreport{Pugin2007c,
  author = {Pugin, Laurent and Burgoyne, John Ashley and Eck, Douglas and Fujinaga, Ichiro},
  institution = {McGill University},
  title = {Book-Adaptive and Book-Dependent Models to Accelerate Digitization of Early Music},
  year = {2007},
  address = {Whistler, BC},
  file = {:pdfs/2007 - Book Adaptive and Book Dependent Models to Accelerate Digitization of Early Music.pdf:PDF},
  pages = {1--8},
  url = {https://www.researchgate.net/publication/255604238_Book-Adaptive_and_Book-Dependent_Models_to_Accelerate_Digitization_of_Early_Music}
}
@inproceedings{Pugin2008,
  author = {Pugin, Laurent and Hockman, Jason and Burgoyne, John Ashley and Fujinaga, Ichiro},
  booktitle = {9th International Conference on Music Information Retrieval},
  title = {Gamera versus Aruspix -- Two Optical Music Recognition Approaches},
  year = {2008},
  file = {:pdfs/2008 - Gamera versus Aruspix - Two Optical Music Recognition Approaches.pdf:PDF},
  url = {http://ismir2008.ismir.net/papers/ISMIR2008_247.pdf}
}
@inproceedings{Pugin2013,
  author = {Pugin, Laurent and Crawford, Tim},
  booktitle = {14th International Society for Music Information Retrieval Conference},
  title = {Evaluating {OMR} on the Early Music Online Collection},
  year = {2013},
  address = {Curitiba, Brazil},
  editor = {Alceu de Souza Britto Jr. and Fabien Gouyon and Simon Dixon},
  pages = {439--444},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/ismir/PuginC13},
  file = {:pdfs/2013 - Evaluating OMR on the Early Music Online Collection.pdf:PDF},
  url = {http://ismir2013.ismir.net/wp-content/uploads/2013/09/65_Paper.pdf}
}
@article{Ramirez2014,
  author = {Ramirez, Carolina and Ohya, Jun},
  journal = {Journal of New Music Research},
  title = {Automatic Recognition of Square Notation Symbols in Western Plainchant Manuscripts},
  year = {2014},
  issn = {0929-8215},
  number = {4},
  pages = {390--399},
  volume = {43},
  abstract = {Abstract: While the Optical Music Recognition (OMR) of printed and
	handwritten music scores in modern standard notation has been broadly
	studied, this is not the case for early music manuscripts. This is
	mainly due to the high variability in the sources introduced by their
	severe physical degradation, the lack of notation standards and,
	in the case of the scanned versions, by non-homogenous image-acquisition
	protocols. The volume of early musical manuscripts available is considerable,
	and therefore we believe that computational methods can be extremely
	useful in helping to preserve, share and analyse this information.
	This paper presents an approach to recognizing handwritten square
	musical notation in degraded western plainchant manuscripts from
	the XIVth to XVIth centuries. We propose the use of image processing
	techniques that behave robustly under high data variability and which
	do not require strong hypotheses regarding the condition of the sources.
	The main differences from traditional OMR approaches are our avoidance
	of the staff line removal stage and the use of grey-level images
	to perform primitive segmentation and feature extraction. We used
	136 images from the Digital Scriptorium repository (DS, 2007), from
	which we were able to extract over 90% of the staves and over 88%
	of all symbols present. For symbol classification, we used gradient-based
	features and SVM classifiers, obtaining over 90% precision and recall
	over eight basic symbol classes.},
  affiliation = {Waseda University, Japan},
  author_keywords = {musical manuscript analysis; OMR (Optical Music Recognition); plainchant; SVM},
  correspondence_address1 = {Ramirez, C.; Waseda UniversityJapan},
  doi = {10.1080/09298215.2014.931438},
  publisher = {Taylor and Francis Ltd.}
}
@inproceedings{Randriamahefa1993,
  author = {R. Randriamahefa and J. P. Cocquerez and C. Fluhr and F. Pepin and S. Philipp},
  booktitle = {2nd International Conference on Document Analysis and Recognition},
  title = {Printed music recognition},
  year = {1993},
  pages = {898--901},
  abstract = {The different steps to recognize printed music are described. The first step is to detect and to eliminate the staff lines. A robust method based on finding regions where are only the staff lines, linking between them the staff lines pieces in these regions is used. After staff lines elimination, symbols are isolated and a representation called attributed graph is constructed for each symbol. Thinning, polygonalization, spurious segments cleaning, and segment fusion are performed. A first classification, separating all notes with black heads from others, is performed. To recognize notes with black heads (beamed group or quarter notes), a straightforward structural approach using this representation is sufficient and efficient in most cases. In the ambiguous cases (chord or black head linked to two stems), an ellipse matching method is used. To recognize half notes and bar lines, a structural method using the graph is used.<>},
  doi = {10.1109/ICDAR.1993.395592},
  file = {:pdfs/1993 - Printed Music Recognition.pdf:PDF},
  keywords = {image recognition;music;printed music recognition;staff lines;robust method;symbols;attributed graph;polygonalization;spurious segments cleaning;segment fusion;beamed group;quarter notes;ellipse matching method;half notes;bar lines;graph;Multiple signal classification;Joining processes;Image resolution;Robustness;Cleaning;System testing;Speech recognition;Automatic speech recognition;Head;Image segmentation}
}
@techreport{Raphael2011,
  author = {Raphael, Christopher},
  institution = {Indiana University, Bloomington},
  title = {Optical Music Recognition on the {IMSLP}},
  year = {2011},
  file = {:pdfs/2011 - Optical Music Recognition on the IMSLP.pdf:PDF}
}
@inproceedings{Raphael2011a,
  author = {Raphael, Christopher and Wang, Jingya},
  booktitle = {12th International Society for Music Information Retrieval Conference},
  title = {New Approaches to Optical Music Recognition},
  year = {2011},
  address = {Miami, Florida},
  editor = {Anssi Klapuri and Colby Leider},
  pages = {305--310},
  publisher = {University of Miami},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/ismir/RaphaelW11},
  file = {:pdfs/2011 - New Approaches to Optical Music Recognition.pdf:PDF},
  url = {http://ismir2011.ismir.net/papers/OS3-3.pdf}
}
@inproceedings{Raphael2013,
  author = {Raphael, Christopher and Jin, Rong},
  booktitle = {IS\&T/SPIE Electronic Imaging},
  title = {Optical music recognition on the international music score library project},
  year = {2013},
  organization = {International Society for Optics and Photonics},
  doi = {10.1117/12.2040247},
  file = {:pdfs/2014 - Optical Music Recognition on the International Music Score Library Project.pdf:PDF}
}
@inproceedings{Rebelo2007,
  author = {Rebelo, Ana and Capela, Artur and Pinto da Costa, Joaquim F. and Guedes, Carlos and Carrapatoso, Eurico and Cardoso, Jamie dos Santos},
  booktitle = {3rd International Conference on Automated Production of Cross Media Content for Multi-Channel Distribution},
  title = {A Shortest Path Approach for Staff Line Detection},
  year = {2007},
  pages = {79--85},
  abstract = {Many music works produced in the past still exist only as original manuscripts or as photocopies. Preserving them entails their digitalization and consequent accessibility in a digital format easy-to-manage. The manual process to carry out this task is very time consuming and error prone. Optical music recognition (OMR) is a form of structured document image analysis where music symbols are isolated and identified so that the music can be conveniently processed. While OMR systems perform well on printed scores, current methods for reading handwritten musical scores by computers remain far from ideal. One of the fundamental stages of this process is the staff line detection. In this paper a new method for the automatic detection of music stave lines based on a shortest path approach is presented. Lines with some curvature, discontinuities, and inclination are robustly detected. The proposed algorithm behaves favourably when compared experimentally with well-established algorithms.},
  doi = {10.1109/AXMEDIS.2007.16},
  file = {:pdfs/2007 - A Shortest Path Approach for Staff Line Detection.pdf:PDF},
  keywords = {document image processing;graph theory;music;optical character recognition;automatic detection;music stave lines;music symbol;optical music recognition;shortest path approach;staff line detection;structured document image analysis;Computer errors;Cultural differences;Handwriting recognition;Image analysis;Image recognition;Music information retrieval;Ordinary magnetoresistance;Production;Robustness;Text analysis}
}
@mastersthesis{Rebelo2008,
  author = {Rebelo, Ana},
  school = {Universidade do Porto},
  title = {New Methodologies Towards an Automatic Optical Recognition of Handwritten Musical Scores},
  year = {2008},
  file = {:pdfs/2008 - MScThesisAnaRebelo - New methodologies towards an automatic optical recognition of handwritten musical scores.pdf:PDF},
  url = {http://www.inescporto.pt/~arebelo/education/2008MScThesisDefenseAnaRebelo.pdf}
}
@article{Rebelo2010,
  author = {Rebelo, Ana and Capela, G. and Cardoso, Jamie dos Santos},
  journal = {International Journal on Document Analysis and Recognition},
  title = {Optical recognition of music symbols},
  year = {2010},
  issn = {1433-2825},
  number = {1},
  pages = {19--31},
  volume = {13},
  abstract = {Many musical works produced in the past are still currently available only as original manuscripts or as photocopies. The preservation of these works requires their digitalization and transformation into a machine-readable format. However, and despite the many research activities on optical music recognition (OMR), the results for handwritten musical scores are far from ideal. Each of the proposed methods lays the emphasis on different properties and therefore makes it difficult to evaluate the efficiency of a proposed method. We present in this article a comparative study of several recognition algorithms of music symbols. After a review of the most common procedures used in this context, their respective performances are compared using both real and synthetic scores. The database of scores was augmented with replicas of the existing patterns, transformed according to an elastic deformation technique. Such transformations aim to introduce invariances in the prediction with respect to the known variability in the symbols, particularly relevant on handwritten works. The following study and the adopted databases can constitute a reference scheme for any researcher who wants to confront a new OMR algorithm face to well-known ones.},
  doi = {10.1007/s10032-009-0100-1},
  file = {:pdfs/2010 - Optical Recognition of Music Symbols.pdf:PDF},
  keywords = {Document image processing, Music, Off-line recognition, Performance evaluation, Symbol recognition, to classify}
}
@inproceedings{Rebelo2011,
  author = {Rebelo, Ana and Tkaczuk, Jakub and Sousa, Sousa and Cardoso, Jamie dos Santos},
  booktitle = {10th International Conference on Machine Learning and Applications and Workshops},
  title = {Metric Learning for Music Symbol Recognition},
  year = {2011},
  pages = {106--111},
  abstract = {Although Optical Music Recognition (OMR) has been the focus of much research for decades, the processing of handwritten musical scores is not yet satisfactory. The efforts made to find robust symbol representations and learning methodologies have not found a similar quality in the learning of the dissimilarity concept. Simple Euclidean distances are often used to measure dissimilarity between different examples. However, such distances do not necessarily yield the best performance. In this paper, we propose to learn the best distance for the k-nearest neighbor (k-NN) classifier. The distance concept will be tuned both for the application domain and the adopted representation for the music symbols. The performance of the method is compared with the support vector machine (SVM) classifier using both real and synthetic music scores. The synthetic database includes four types of deformations inducing variability in the printed musical symbols which exist in handwritten music sheets. The work presented here can open new research paths towards a novel automatic musical symbols recognition module for handwritten scores.},
  doi = {10.1109/ICMLA.2011.94},
  file = {:pdfs/2011 - Metric Learning for Music Symbol Recognition.pdf:PDF},
  keywords = {database management systems;learning (artificial intelligence);music;support vector machines;metric learning;optical music recognition;robust symbol representations;dissimilarity concept;k-nearest neighbor classifier;support vector machine;SVM classifier;real music scores;synthetic music scores;synthetic database;printed musical symbols;handwritten music sheets;automatic musical symbols recognition module;Measurement;Support vector machines;Training;Feature extraction;Machine learning;Kernel;Vectors}
}
@inproceedings{Rebelo2011a,
  author = {Rebelo, Ana and Paszkiewicz, Filipe and Guedes, Carlos and Marcal, Andre R. S. and Cardoso, Jamie dos Santos},
  booktitle = {Bridges 2011: Mathematics, Music, Art, Architecture, Culture},
  title = {A Method for Music Symbols Extraction based on Musical Rules},
  year = {2011},
  pages = {81--88},
  file = {:pdfs/2011 - A Method for Music Symbols Extraction Based on Musical Rules.pdf:PDF},
  isbn = {098460426X},
  url = {http://www.inescporto.pt/~jsc/publications/conferences/2011ARebeloBRIDGES.pdf}
}
@article{Rebelo2012,
  author = {Rebelo, Ana and Fujinaga, Ichiro and Paszkiewicz, Filipe and Marcal, Andre R.S. and Guedes, Carlos and Cardoso, Jamie dos Santos},
  journal = {International Journal of Multimedia Information Retrieval},
  title = {Optical music recognition: state-of-the-art and open issues},
  year = {2012},
  number = {3},
  pages = {173--190},
  volume = {1},
  doi = {10.1007/s13735-012-0004-6},
  file = {:pdfs/2012 - Optical Music Recognition - State of the Art and Open Issues.pdf:PDF},
  publisher = {Springer}
}
@phdthesis{Rebelo2012a,
  author = {Rebelo, Ana},
  school = {University of Porto},
  title = {Robust Optical Recognition of Handwritten Musical Scores based on Domain Knowledge},
  year = {2012},
  file = {:pdfs/2012 - Robust Optical Recognition of Handwritten Musical Scores based on Domain Knowledge - Ana Rebelo Phd Thesis.pdf:PDF},
  institution = {INESC Porto},
  url = {http://www.inescporto.pt/~arebelo/arebeloThesis.pdf}
}
@inproceedings{Rebelo2013,
  author = {Rebelo, Ana and Mar\c{c}al, Andr{\'{e}} and Cardoso, Jamie dos Santos},
  booktitle = {International Conference on Image Analysis and Recognition},
  title = {Global constraints for syntactic consistency in {OMR}: an ongoing approach},
  year = {2013},
  file = {:pdfs/2013 - Global Constraints for Syntactic Consistency in OMR - an Ongoing Approach.pdf:PDF},
  groups = {recognition},
  url = {http://www.inescporto.pt/~jsc/publications/conferences/2013ARebeloICIAR.pdf}
}
@inproceedings{Rebelo2013a,
  author = {Rebelo, Ana and Cardoso, Jamie dos Santos},
  booktitle = {12th International Conference on Document Analysis and Recognition},
  title = {Staff Line Detection and Removal in the Grayscale Domain},
  year = {2013},
  pages = {57--61},
  abstract = {The detection of staff lines is the first step of most Optical Music Recognition (OMR) systems. Its great significance derives from the ease with which we can then proceed with the extraction of musical symbols. All OMR tasks are usually achieved using binary images by setting thresholds that can be local or global. These techniques however, may remove relevant information of the music sheet and introduce artifacts which will degrade results in the later stages of the process. It arises therefore a need to create a method that reduces the loss of information due to the binarization. The baseline for the methodology proposed in this paper follows the shortest path algorithm proposed in [CardosoTPAMI08]. The concept of strong staff pixels (SSP's), which is a set of pixels with a high probability of belonging to a staff line, is proposed to guide the cost function. The SSP allows to overcome the results of the binary based detection and to generalize the binary framework to grayscale music scores. The proposed methodology achieves good results.},
  doi = {10.1109/ICDAR.2013.20},
  file = {:pdfs/2013 - Staff Line Detection and Removal in the Grayscale Domain.pdf:PDF},
  issn = {1520-5363},
  keywords = {feature extraction;graph theory;music;object detection;optical character recognition;grayscale music scores;binary framework;binary based detection;cost function;SSP;strong staff pixels;shortest path algorithm;global threshold;local threshold;binary images;musical symbol extraction;OMR systems;optical music recognition system;grayscale domain;staff line removal;staff line detection;Gray-scale;Cost function;Noise;Optical imaging;Robustness;Image segmentation;Standards}
}
@inproceedings{Reed1996,
  author = {Reed, K. Todd and Parker, J. R.},
  booktitle = {13th International Conference on Pattern Recognition},
  title = {Automatic Computer Recognition of Printed Music},
  year = {1996},
  pages = {803--807},
  abstract = {This paper provides an overview to the implementation of Lemon, a complete optical music recognition system. Among the techniques employed by the implementation are: template matching, the Hough transform, line adjacency graphs, character profiles, and graph grammars. Experimental results, including comparisons with commercial systems, are provided},
  doi = {10.1109/ICPR.1996.547279},
  file = {:pdfs/1996 - Automatic Computer Recognition of Printed Music.pdf:PDF},
  isbn = {081867282X},
  issn = {1051-4651},
  keywords = {Optical Music Recognition, to classify}
}
@inproceedings{Regimbal2019,
  author = {Regimbal, Juliette and McLennan Zo{\'{e}} and Vigliensoni, Gabriel and Tran, Andrew and Fujinaga, Ichiro},
  booktitle = {Music Encoding Conference 2019},
  title = {Neon2: A Verovio-based square-notation editor},
  year = {2019},
  address = {Vienna, Austria},
  file = {:pdfs/2019 - Neon2_ a Verovio Based Square Notation Editor.pdf:PDF},
  url = {https://music-encoding.org/conference/2019/abstracts_mec2019/Neon2.pdf}
}
@inproceedings{Repolusk2023,
  author = {Repolusk, Tristan and Veas, Eduardo},
  booktitle = {Proceedings of the 5th International Workshop on Reading Music Systems},
  title = {The Suzipu Musical Annotation Tool for the Creation of Machine-Readable Datasets of Ancient Chinese Music},
  year = {2023},
  address = {Milan, Italy},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {7--11},
  doi = {10.48550/arXiv.2311.04091},
  file = {:pdfs/2023 - The Suzipu Musical Annotation Tool for the Creation of Machine Readable Datasets of Ancient Chinese Music.pdf:PDF},
  url = {https://sites.google.com/view/worms2023/proceedings}
}
@inproceedings{Reuse2019,
  author = {de Reuse, Timothy and Fujinaga, Ichiro},
  booktitle = {2nd International Workshop on Reading Music Systems},
  title = {Robust Transcript Alignment on Medieval Chant Manuscripts},
  year = {2019},
  address = {Delft, The Netherlands},
  editor = {Jorge Calvo-Zaragoza and Alexander Pacha},
  pages = {21--26},
  file = {:pdfs/2019 - Robust Transcript Alignment on Medieval Chant Manuscripts.pdf:PDF},
  url = {https://sites.google.com/view/worms2019/proceedings}
}
@incollection{Rhodes2016,
  author = {Rhodes, Christophe and Crawford, Tim and d'Inverno, Mark},
  booktitle = {Analysis of Large and Complex Data},
  publisher = {Springer International Publishing},
  title = {Duplicate Detection in Facsimile Scans of Early Printed Music},
  year = {2016},
  address = {Cham},
  isbn = {978-3-319-25226-1},
  pages = {449--459},
  abstract = {There is a growing number of collections of readily available scanned
	musical documents, whether generated and managed by libraries, research
	projects, or volunteer efforts. They are typically digital images;
	for computational musicology we also need the musical data in machine-readable
	form. Optical Music Recognition (OMR) can be used on printed music,
	but is prone to error, depending on document condition and the quality
	of intermediate stages in the digitization process such as archival
	photographs. This work addresses the detection of one such error---duplication
	of images---and the discovery of other relationships between images
	in the process.},
  doi = {10.1007/978-3-319-25226-1_38},
  file = {:pdfs/2016 - Duplicate detection in facsimile scans of early printed music.pdf:PDF}
}
@inproceedings{Riba2017,
  author = {Riba, Pau and Forn{\'{e}}s, Alicia and Llad{\'{o}}s, Josep},
  booktitle = {Graphic Recognition. Current Trends and Challenges},
  title = {Towards the Alignment of Handwritten Music Scores},
  year = {2017},
  editor = {Lamiroy B., Lins R.D.},
  pages = {103--116},
  publisher = {Springer Verlag},
  series = {Lecture Notes in Computer Science},
  abstract = {It is very common to find different versions of the same music work in archives of Opera Theaters. These differences correspond to modifications and annotations from the musicians. From the musicologist point of view, these variations are very interesting and deserve study. This paper explores the alignment of music scores as a tool for automatically detecting the passages that contain such differences. Given the difficulties in the recognition of handwritten music scores, our goal is to align the music scores and at the same time, avoid the recognition of music elements as much as possible. After removing the staff lines, braces and ties, the bar lines are detected. Then, the bar units are described as a whole using the Blurred Shape Model. The bar units alignment is performed by using Dynamic Time Warping. The analysis of the alignment path is used to detect the variations in the music scores. The method has been evaluated on a subset of the CVC-MUSCIMA dataset, showing encouraging results. © Springer International Publishing AG 2017.},
  affiliation = {Computer Vision Center - Computer Science Department, Universitat Autònoma de Barcelona, Bellaterra, Catalonia, Spain},
  author_keywords = {Dynamic time warping alignment; Handwritten music scores; Optical music recognition},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/grec/RibaFL15},
  correspondence_address1 = {Fornés, A.; Computer Vision Center - Computer Science Department, Universitat Autònoma de BarcelonaSpain; email: afornes@cvc.uab.es},
  doi = {10.1007/978-3-319-52159-6_8},
  file = {:pdfs/2017 - Towards the Alignment of Handwritten Music Scores.pdf:PDF},
  isbn = {9783319521589},
  issn = {0302-9743},
  keywords = {Pattern recognition, Bar units; Dynamic time warping; Music scores; Optical music recognition; Shape model, Character recognition}
}
@inproceedings{RicoBlanes2017,
  author = {Rico Blanes, Adri{\`{a}} and Forn{\'{e}}s Bisquerra, Alicia},
  booktitle = {14th International Conference on Document Analysis and Recognition},
  title = {Camera-Based Optical Music Recognition Using a Convolutional Neural Network},
  year = {2017},
  address = {Kyoto, Japan},
  organization = {IEEE},
  pages = {27--28},
  doi = {10.1109/ICDAR.2017.261},
  file = {:pdfs/2017 - Camera-based Optical Music Recognition using a Convolutional Neural Network.pdf:PDF}
}
@article{Riley2003,
  author = {Riley, Jenn and Fujinaga, Ichiro},
  journal = {OCLC Systems {\&} Services},
  title = {Recommended best practices for digital image capture of musical scores},
  year = {2003},
  issn = {1065-075X},
  number = {2},
  pages = {62--69},
  volume = {19},
  doi = {10.1108/10650750310481784},
  file = {:pdfs/2003 - Recommended Best Practices for Digital Image Capture of Musical Scores.pdf:PDF},
  keywords = {digital documents, file structures, imaging, music, to classify}
}
@inproceedings{Ringwalt2015,
  author = {Ringwalt, Dan and Dannenberg, Roger and Russell, Andrew},
  booktitle = {International Conference on New Interfaces for Musical Expression},
  title = {Optical Music Recognition for Interactive Score Display},
  year = {2015},
  address = {Baton Rouge, Louisiana, USA},
  editor = {Edgar Berdahl and Jesse T. Allison},
  pages = {95--98},
  publisher = {The School of Music and the Center for Computation and Technology (CCT), Louisiana State University},
  acmid = {2993805},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/nime/RingwaltDR15},
  file = {:pdfs/2015 - Optical Music Recognition for Interactive Score Display.pdf:PDF},
  isbn = {978-0-692-49547-6},
  url = {http://dl.acm.org/citation.cfm?id=2993778.2993805}
}
@inproceedings{Ringwalt2015a,
  author = {Ringwalt, Dan and Dannenberg, Roger B.},
  booktitle = {16th International Society for Music Information Retrieval Conference},
  title = {Image Quality Estimation for Multi-Score {OMR}},
  year = {2015},
  pages = {17--23},
  file = {:pdfs/2015 - Image Quality Estimation for Multi-Score OMR.pdf:PDF},
  isbn = {978-84-606-8853-2},
  url = {http://ismir2015.uma.es/articles/37_Paper.pdf}
}
@inproceedings{Rios-Vila2019,
  author = {R{\'{i}}os-Vila, Antonio and Calvo-Zaragoza, Jorge and Rizo, David and I{\~{n}}esta, Jos{\'{e}} M.},
  booktitle = {2nd International Workshop on Reading Music Systems},
  title = {ReadSco: An Open-Source Web-Based Optical Music Recognition Tool},
  year = {2019},
  address = {Delft, The Netherlands},
  editor = {Jorge Calvo-Zaragoza and Alexander Pacha},
  pages = {27--30},
  file = {:pdfs/2019 - ReadSco_ an Open Source Web Based Optical Music Recognition Tool.pdf:PDF},
  url = {https://sites.google.com/view/worms2019/proceedings}
}
@inproceedings{RiosVila2021,
  author = {R{\'i}os-Vila, Antonio and Rizo, David and Calvo-Zaragoza, Jorge and I{\~{n}}esta, Jos{\'e} Manuel},
  booktitle = {Proceedings of the 3rd International Workshop on Reading Music Systems},
  title = {Completing Optical Music Recognition with Agnostic Transcription and Machine Translation},
  year = {2021},
  address = {Alicante, Spain},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander},
  pages = {28--32},
  file = {:pdfs/2021 - Completing Optical Music Recognition with Agnostic Transcription and Machine Translation.pdf:PDF},
  url = {https://sites.google.com/view/worms2021/proceedings}
}
@inproceedings{RiosVila2022,
  author = {R{\'{i}}os-Vila, Antonio and I{\~{n}}esta, Jose M. and Calvo-Zaragoza, Jorge},
  booktitle = {Proceedings of the 4th International Workshop on Reading Music Systems},
  title = {End-To-End Full-Page Optical Music Recognition of Monophonic Documents via Score Unfolding},
  year = {2022},
  address = {Online},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {20--24},
  doi = {10.48550/arXiv.2211.13285},
  file = {:pdfs/2022 - End to End Full Page Optical Music Recognition of Monophonic Documents Via Score Unfolding.pdf:PDF},
  url = {https://sites.google.com/view/worms2022/proceedings}
}
@inproceedings{RiosVila2023,
  author = {R\'{i}os-Vila, Antonio},
  booktitle = {Proceedings of the 5th International Workshop on Reading Music Systems},
  title = {Rotations Are All You Need: A Generic Method For End-To-End Optical Music Recognition},
  year = {2023},
  address = {Milan, Italy},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {34--38},
  doi = {10.48550/arXiv.2311.04091},
  file = {:pdfs/2023 - Rotations Are All You Need_ a Generic Method for End to End Optical Music Recognition.pdf:PDF},
  url = {https://sites.google.com/view/worms2023/proceedings}
}
@misc{RISM,
  author = {Eitner, Robert},
  howpublished = {\url{http://www.rism.info}},
  title = {R{\'{e}}pertoire International des Sources Musicales},
  year = {1952},
  url = {http://www.rism.info}
}
@phdthesis{Rizo2010,
  author = {Rizo, David},
  school = {Universidad de Alicante},
  title = {Symbolic music comparison with tree data structures},
  year = {2010},
  file = {:pdfs/2010 - Symbolic Music Comparison with Tree Data Structures.pdf:PDF},
  url = {http://rua.ua.es/dspace/bitstream/10045/18331/1/Tesis_Rizo.pdf}
}
@inproceedings{Rizo2018,
  author = {Rizo, David and Calvo-Zaragoza, Jorge and I\~{n}esta, Jos{\'e} M.},
  booktitle = {5th International Conference on Digital Libraries for Musicology},
  title = {MuRET: A Music Recognition, Encoding, and Transcription Tool},
  year = {2018},
  address = {Paris, France},
  pages = {52--56},
  publisher = {ACM},
  acmid = {3273029},
  doi = {10.1145/3273024.3273029},
  file = {:pdfs/2018 - MuRET - a Music Recognition, Encoding, and Transcription Tool.pdf:PDF},
  isbn = {978-1-4503-6522-2},
  keywords = {encoding, notation transcription, optical music recognition},
  url = {http://doi.acm.org/10.1145/3273024.3273029}
}
@article{Roach1988,
  author = {Roach, {JW} W and Tatem, J E},
  journal = {Pattern Recognition},
  title = {Using domain knowledge in low-level visual processing to interpret handwritten music: an experiment},
  year = {1988},
  issn = {0031-3203},
  number = {1},
  pages = {33--44},
  volume = {21},
  abstract = {Turning handwritten scores into engraved scores consumes a significant portion of music publishing companies' budgets. Pattern recognition is the major bottleneck holding up automation of this process. Human beings who know music can easily read a handwritten score, but without musical knowledge, even people cannot correctly perceive the markings in a handwritten score. This paper reports an experiment in which knowledge of music, a highly structured domain is applied to extract primitive musical features. This experiment shows that if the domain of image processing is well defined, significant improvements in low-level segmentations can be achieved (17 Refs.) recognition; computerised picture processing; expert systems; music},
  doi = {10.1016/0031-3203(88)90069-6},
  file = {:pdfs/1988 - Using Domain Knowledge in Low-Level Visual Processing To Interpret Handwritten Music - An Experiment.pdf:PDF},
  keywords = {character recogniti, handwritten music recognition, to classify},
  url = {http://www.sciencedirect.com/science/article/pii/0031320388900696}
}
@article{Roads1986,
  author = {Roads, Curtis},
  journal = {Computer Music Journal},
  title = {The Tsukuba Musical Robot},
  year = {1986},
  issn = {01489267, 15315169},
  number = {2},
  pages = {39--43},
  volume = {10},
  publisher = {The MIT Press},
  url = {http://www.jstor.org/stable/3679483}
}
@inproceedings{Roggenkemper2018,
  author = {Roggenkemper, Heinz and Roggenkemper, Ryan},
  booktitle = {1st International Workshop on Reading Music Systems},
  title = {How can Machine Learning make Optical Music Recognition more relevant for practicing musicians?},
  year = {2018},
  address = {Paris, France},
  editor = {Calvo-Zaragoza, Jorge and Haji{\v{c}} jr., Jan and Pacha, Alexander},
  pages = {25--26},
  file = {:pdfs/2018 - How Can Machine Learning Make Optical Music Recognition More Relevant for Practicing Musicians.pdf:PDF},
  url = {https://sites.google.com/view/worms2018/proceedings}
}
@inproceedings{Roland2002,
  author = {Roland, Perry},
  booktitle = {1st International Conference on Musical Applications Using XML},
  title = {The music encoding initiative ({MEI})},
  year = {2002},
  pages = {55--59},
  file = {:pdfs/2002 - The Music Encoding Initiative (MEI).pdf:PDF},
  url = {https://pdfs.semanticscholar.org/7fc4/16754b0508837dde8b505b3fd4dc517c7292.pdf}
}
@inproceedings{Rossant2001,
  author = {Rossant, Florence and Bloch, Isabelle},
  booktitle = {GRETSI},
  title = {Reconnaissance de Partitions Musicales par Mod{\'{e}}lisation Floue et Int{\'{e}}gration de R{\`{e}}gles Musicales},
  year = {2001},
  address = {Toulouse, France},
  file = {:pdfs/2001 - Reconnaissance De Partitions Musicales Par Modelisation Floue Et Integration De Regles Musicales (in French).pdf:PDF},
  language = {French},
  url = {http://hdl.handle.net/2042/13310}
}
@article{Rossant2002,
  author = {Rossant, Florence},
  journal = {Pattern Recognition Letters},
  title = {A global method for music symbol recognition in typeset music sheets},
  year = {2002},
  issn = {0167-8655},
  number = {10},
  pages = {1129--1141},
  volume = {23},
  abstract = {This paper presents an optical music recognition (OMR) system that can automatically recognize the main musical symbols of a scanned paper-based music score. Two major stages are distinguished: the first one, using low-level pre-processing, detects the isolated objects and outputs some hypotheses about them; the second one has to take the final correct decision, through high-level processing including contextual information and music writing rules. This article exposes both stages of the method: after explaining in detail the first one, the symbol analysis process, it shows through first experiments that its outputs can efficiently be used as inputs for a high-level decision process.},
  doi = {10.1016/S0167-8655(02)00036-3},
  file = {:pdfs/2002 - A global method for music symbol recognition in typeset music sheets.pdf:PDF},
  keywords = {Optical music recognition}
}
@article{Rossant2004,
  author = {Rossant, Florence and Bloch, Isabelle},
  journal = {Fuzzy Sets and Systems},
  title = {A fuzzy model for optical recognition of musical scores},
  year = {2004},
  issn = {0165-0114},
  number = {2},
  pages = {165--201},
  volume = {141},
  abstract = {Optical music recognition aims at reading automatically scanned scores in order to convert them in an electronic format, such as a midi file. We only consider here classical monophonic music: we exclude any music written on several staves, but also any music that contains chords. In order to overcome recognition failures due to the lack of methods dealing with structural information, non-local rules and corrections, we propose a recognition approach integrating structural information in the form of relationships between symbols and of musical rules. Another contribution of this paper is to solve ambiguities by accounting for sources of imprecision and uncertainty, within the fuzzy set and possibility theory framework. We add to a single symbol analysis several rules for checking the consistency of hypotheses: graphical consistency (compatibility between accidental and note, between grace note and note, between note and augmentation dot, etc.), and syntactic consistency (accidentals, tonality, metric). All these rules are combined in order to lead to better decisions. Experimental results on 65 music sheets show that our approach leads to very good results, and is able to correct errors made by other approaches, such as the one of SmartScore.},
  doi = {10.1016/S0165-0114(03)00094-0},
  file = {:pdfs/2004 - A Fuzzy Model for Optical Recognition of Musical Scores.pdf:PDF},
  keywords = {Optical music recognition, Image processing, Pattern recognition, Fuzzy sets and possibility theory, Flexible rules, Fusion, Structural music information},
  url = {http://www.sciencedirect.com/science/article/pii/S0165011403000940}
}
@inproceedings{Rossant2005,
  author = {Rossant, Florence and Bloch, Isabelle},
  booktitle = {IEEE International Conference on Image Processing 2005},
  title = {Optical music recognition based on a fuzzy modeling of symbol classes and music writing rules},
  year = {2005},
  pages = {II--538},
  abstract = {We propose an OMR method based on fuzzy modeling of the information extracted from the scanned score and of musical rules. The aim is to disambiguate the recognition hypotheses output by the individual symbol analysis process. Fuzzy modeling allows to account for imprecision in symbol detection, for typewriting variations, and for flexibility of rules. Tests conducted on a hundred of music sheets result in a global recognition rate of 98.55%, and show good performances compared to SmartScore.},
  doi = {10.1109/ICIP.2005.1530111},
  file = {:pdfs/2005 - Optical Music Recognition Based on a Fuzzy Modeling of Symbol Classes and Music Writing Rules.pdf:PDF},
  issn = {1522-4880},
  keywords = {music;image recognition;object detection;fuzzy set theory;optical information processing;music writing rules;symbol classes;fuzzy modeling;optical music recognition;information extraction;scanned score;recognition hypotheses;individual symbol analysis process;symbol detection;typewriting variations;rules flexibility;SmartScore;Multiple signal classification;Writing;Pattern recognition;Pattern analysis;Image recognition;Ordinary magnetoresistance;Data mining;Testing;Performance evaluation;Printing}
}
@article{Rossant2006,
  author = {Rossant, Florence and Bloch, Isabelle},
  journal = {EURASIP Journal on Advances in Signal Processing},
  title = {Robust and Adaptive {OMR} System Including Fuzzy Modeling, Fusion of Musical Rules, and Possible Error Detection},
  year = {2006},
  issn = {1687-6180},
  number = {1},
  pages = {081541},
  volume = {2007},
  abstract = {This paper describes a system for optical music recognition (OMR) in case of monophonic typeset scores. After clarifying the difficulties specific to this domain, we propose appropriate solutions at both image analysis level and high-level interpretation. Thus, a recognition and segmentation method is designed, that allows dealing with common printing defects and numerous symbol interconnections. Then, musical rules are modeled and integrated, in order to make a consistent decision. This high-level interpretation step relies on the fuzzy sets and possibility framework, since it allows dealing with symbol variability, flexibility, and imprecision of music rules, and merging all these heterogeneous pieces of information. Other innovative features are the indication of potential errors and the possibility of applying learning procedures, in order to gain in robustness. Experiments conducted on a large data base show that the proposed method constitutes an interesting contribution to OMR.},
  doi = {10.1155/2007/81541},
  file = {:pdfs/2006 - Robust and Adaptive OMR System Including Fuzzy Modeling, Fusion of Musical Rules, and Possible Error Detection.pdf:PDF}
}
@techreport{Roth1994,
  author = {Roth, Martin},
  institution = {Swiss Federal Institute of Technology},
  title = {An approach to recognition of printed music},
  year = {1994},
  doi = {10.3929/ethz-a-000930574},
  file = {:pdfs/1994 - An Approach to Recognition of Printed Music.pdf:PDF}
}
@article{Roy2017,
  author = {Roy, Partha Pratim and Bhunia, Ayan Kumar and Pal, Umapada},
  journal = {Expert Systems with Applications},
  title = {{HMM}-based writer identification in music score documents without staff-line removal},
  year = {2017},
  issn = {0957-4174},
  pages = {222--240},
  volume = {89},
  abstract = {Writer identification from musical score documents is a challenging task due to its inherent problem of overlapping of musical symbols with staff-lines. Most of the existing works in the literature of writer identification in musical score documents were performed after a pre-processing stage of staff-lines removal. In this paper we propose a novel writer identification framework in musical score documents without removing staff-lines from the documents. In our approach, Hidden Markov Model (HMM) has been used to model the writing style of the writers without removing staff-lines. The sliding window features are extracted from musical score-lines and they are used to build writer specific HMM models. Given a query musical sheet, writer specific confidence for each musical line is returned by each writer specific model using a log-likelihood score. Next, a log-likelihood score in page level is computed by weighted combination of these scores from the corresponding line images of the page. A novel Factor Analysis-based feature selection technique is applied in sliding window features to reduce the noise appearing from staff-lines which proves efficiency in writer identification performance. In our framework we have also proposed a novel score-line detection approach in musical sheet using HMM. The experiment has been performed in CVC-MUSCIMA data set and the results obtained show that the proposed approach is efficient for score-line detection and writer identification without removing staff-lines. To get the idea of computation time of our method, detail analysis of execution time is also provided.},
  doi = {https://doi.org/10.1016/j.eswa.2017.07.031},
  file = {:pdfs/2017 - HMM Based Writer Identification in Music Score Documents without Staff Line Removal.pdf:PDF},
  keywords = {Music score documents, Writer identification, Hidden Markov model, Factor analysis},
  url = {http://www.sciencedirect.com/science/article/pii/S0957417417305080}
}
@mastersthesis{Ruttenberg1991,
  author = {Ruttenberg, Alan},
  school = {Massachusetts Institute of Technology},
  title = {Optical Reading of Typeset Music},
  year = {1991},
  address = {Boston, MA},
  file = {:pdfs/1991 - Optical Reading of Typeset Music.pdf:PDF},
  url = {https://dspace.mit.edu/bitstream/handle/1721.1/69715/24680744-MIT.pdf}
}
@inproceedings{Saitis2014,
  author = {Saitis, Charalampos and Hankinson, Andrew and Fujinaga, Ichiro},
  booktitle = {1st International Workshop on Digital Libraries for Musicology},
  title = {Correcting Large-Scale {OMR} Data with Crowdsourcing},
  year = {2014},
  organization = {ACM},
  pages = {1--3},
  doi = {10.1145/2660168.2660186},
  file = {:pdfs/2014 - Correcting Large-Scale OMR Data with Crowdsourcing.pdf:PDF}
}
@inproceedings{Saleh2017,
  author = {Saleh, Zeyad and Zhang, Ke and Calvo-Zaragoza, Jorge and Vigliensoni, Gabriel and Fujinaga, Ichiro},
  booktitle = {14th International Conference on Document Analysis and Recognition},
  title = {Pixel.js: Web-Based Pixel Classification Correction Platform for Ground Truth Creation},
  year = {2017},
  address = {Kyoto, Japan},
  pages = {39--40},
  doi = {10.1109/ICDAR.2017.267},
  file = {:pdfs/2017 - Pixel.js - Web-based Pixel Classification Correction Platform for Ground Truth Creation.pdf:PDF},
  keywords = {Internet;image classification;image segmentation;Pixel.js;Web-based pixel classification correction platform;aforementioned ground truth data;document recognition;inaccurate heuristic trained image segmentation algorithms;misclassified pixels;open-source;pixel-level classification correction platform;segmentation algorithm output;Algorithm design and analysis;Graphics;Image segmentation;Labeling;Mice;Text analysis;Tools;classification;correction;document analysis;graphics;ground truth;image segmentation;labeling;layers;pixel;pixel-level;platform;user interface;web-based}
}
@inproceedings{Samiotis2021,
  author = {Samiotis, Ioannis Petros and Lofi, Christoph and Bozzon, Alessandro},
  booktitle = {Proceedings of the 3rd International Workshop on Reading Music Systems},
  title = {Hybrid Annotation Systems for Music Transcription},
  year = {2021},
  address = {Alicante, Spain},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander},
  pages = {23--27},
  file = {:pdfs/2021 - Hybrid Annotation Systems for Music Transcription.pdf:PDF},
  url = {https://sites.google.com/view/worms2021/proceedings}
}
@misc{Sapp2013,
  author = {Sapp, Craig},
  howpublished = {\url{https://ccrma.stanford.edu/~craig/mro-compare-beethoven}},
  title = {{OMR} Comparison of SmartScore and SharpEye},
  year = {2013},
  file = {:pdfs/2013 - OMR Comparison of SmartScore and SharpEye.pdf:PDF},
  url = {https://ccrma.stanford.edu/~craig/mro-compare-beethoven}
}
@inproceedings{Seales1995,
  author = {Seales, W. Brent and Rajasekar, Arcot},
  booktitle = {Image Analysis Applications and Computer Graphics},
  title = {Interpreting music manuscripts: A logic-based, object-oriented approach},
  year = {1995},
  address = {Berlin, Heidelberg},
  editor = {Chin, Roland T. and Ip, Horace H. S. and Naiman, Avi C. and Pong, Ting-Chuen},
  pages = {181--188},
  publisher = {Springer Berlin Heidelberg},
  abstract = {This paper presents a complete framework for recognizing classes of machine-printed musical manuscripts. Our framework is designed around the decomposition of a manuscript into objects such as staves and bars which are processed with a knowledge base module that encodes rules in Prolog. Object decomposition focuses the recognition problem, and the rule base provides a powerful and flexible way to encode the rules of a particular manuscript class. Our rule-base registers notes and stems, eliminates false-positives and correctly labels notes according to their position on the staff. We present results that show 99{\%} accuracy at detecting note-heads and 95{\%} accuracy in finding stems.},
  doi = {10.1007/3-540-60697-1_101},
  file = {:pdfs/1995 - Interpreting Music Manuscripts_ a Logic Based, Object Oriented Approach.pdf:PDF},
  isbn = {978-3-540-49298-6}
}
@inproceedings{Sebastien2012,
  author = {S{\'{e}}bastien, V{\'{e}}ronique and Ralambondrainy, Henri and S{\'{e}}bastien, Olivier and Conruyt, No{\"{e}}l},
  booktitle = {13th International Society for Music Information Retrieval Conference},
  title = {Score Analyzer: Automatically Determining Scores Difficulty Level for Instrumental e-Learning},
  year = {2012},
  address = {Porto, Portugal},
  editor = {Fabien Gouyon and Perfecto Herrera and Luis Gustavo Martins and Meinard M{\"{u}}ller},
  pages = {571--576},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/ismir/SebastienRSC12},
  file = {:pdfs/2012 - Score Analyzer - Automatically Determining Scores Difficulty Level for Instrumental E-Learning.pdf:PDF},
  url = {http://ismir2012.ismir.net/event/papers/571-ismir-2012.pdf}
}
@inproceedings{Sharif2009,
  author = {Sharif, Muhammad and Arshad, Quratul-Ain and Raza, Mudassar and Khan, Wazir Zada},
  booktitle = {7th International Conference on Frontiers of Information Technology},
  title = {[{COMSCAN}]: An Optical Music Recognition System},
  year = {2009},
  organization = {ACM},
  pages = {34},
  doi = {10.1145/1838002.1838040},
  file = {:pdfs/2009 - COMSCAN An optical Music Recognition System.pdf:PDF}
}
@inproceedings{Shatri2021,
  author = {Shatri, Elona and Fazekas, Gy{\"o}rgy},
  booktitle = {Proceedings of the 3rd International Workshop on Reading Music Systems},
  title = {DoReMi: First glance at a universal OMR dataset},
  year = {2021},
  address = {Alicante, Spain},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander},
  pages = {43--49},
  file = {:pdfs/2021 - DoReMi_ First Glance at a Universal OMR Dataset.pdf:PDF},
  url = {https://sites.google.com/view/worms2021/proceedings}
}
@inproceedings{Sheridan2004,
  author = {Sheridan, Scott and George, Susan E.},
  booktitle = {2nd Australian Undergraduate Students' Computing Conference},
  title = {Defacing Music Scores for Improved Recognition},
  year = {2004},
  pages = {142--148},
  abstract = {The area of Optical Music Recognition (OMR) has long been plagued by an inability to provide a definitive method for locating and identifying musical objects superimposed on musical stave lines. The first step in the process of recognising musical symbols in OMR has previously been to either remove the stave lines, or ignore them. Removing stave lines leads to many problems of fragmented and deformed musical symbols, or in the case of ignoring them, a lowered chance of recognition. Most OMR systems attempt to correct these deficiencies later on in the process through many varied approaches including bounding box analysis, k-nearest-neighbour (k-NN) and neural network (ANN) classification schemes. All of these have a level of success, but none have provided nearly the desired level of accuracy.

This paper aims to show that this removal of the stave lines before symbol recognition is not the only first step and may not be the best. Instead of removing stave lines, more should be added! This process is called ‘defacing’ since it adds stave lines to the score at a 1/2 stave line width, and actually overwrites the score - apparently complicating the recognition procedure. However, the addition of signal to the image means that subsequent symbol recognition is ‘normalised’ and a musical symbol will look the same whether it was above, below or on a stave line. As a result of this, a classification system trained with double stave lines should provide a higher level of accuracy than the traditional approaches of removing/ignoring the stave lines.},
  file = {:pdfs/2004 - Defacing Music Scores for Improved Recognition.pdf:PDF},
  url = {https://sites.google.com/site/theauscc/auscc04/papers/sheridan-auscc04.pdf}
}
@article{Shi2017,
  author = {Shi, Baoguang and Bai, Xiang and Yao, Cong},
  journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
  title = {An End-to-End Trainable Neural Network for Image-Based Sequence Recognition and Its Application to Scene Text Recognition},
  year = {2017},
  issn = {0162-8828},
  number = {11},
  pages = {2298--2304},
  volume = {39},
  abstract = {Image-based sequence recognition has been a long-standing research topic in computer vision. In this paper, we investigate the problem of scene text recognition, which is among the most important and challenging tasks in image-based sequence recognition. A novel neural network architecture, which integrates feature extraction, sequence modeling and transcription into a unified framework, is proposed. Compared with previous systems for scene text recognition, the proposed architecture possesses four distinctive properties: (1) It is end-to-end trainable, in contrast to most of the existing algorithms whose components are separately trained and tuned. (2) It naturally handles sequences in arbitrary lengths, involving no character segmentation or horizontal scale normalization. (3) It is not confined to any predefined lexicon and achieves remarkable performances in both lexicon-free and lexicon-based scene text recognition tasks. (4) It generates an effective yet much smaller model, which is more practical for realworld application scenarios. The experiments on standard benchmarks, including the IIIT-5K, Street View Text and ICDAR datasets, demonstrate the superiority of the proposed algorithm over the prior arts. Moreover, the proposed algorithm performs well in the task of image-based music score recognition, which evidently verifies the generality of it.},
  doi = {10.1109/TPAMI.2016.2646371},
  file = {:pdfs/2017 - An End to End Trainable Neural Network for Image Based Sequence Recognition and Its Application to Scene Text Recognition.pdf:PDF},
  keywords = {computer vision;feature extraction;image recognition;image segmentation;learning (artificial intelligence);music;neural nets;text detection;sequence modeling;transcription;end-to-end trainable neural network;neural network architecture;image-based sequence recognition;computer vision;feature extraction;lexicon-free scene text recognition tasks;lexicon-based scene text recognition tasks;IIIT-5K datasets;street view text datasets;image-based music score recognition;ICDAR datasets;Feature extraction;Text recognition;Neural networks;Image recognition;Logic gates;Convolutional codes;Context;Sequence recognition;scene text recognition;neural network;convolutional neural network;long-short term memory;optical music recognition}
}
@inproceedings{Sicard1992,
  author = {Sicard, Etienne},
  booktitle = {11th International Conference on Pattern Recognition},
  title = {An efficient method for the recognition of printed music},
  year = {1992},
  pages = {573--576},
  abstract = {Deals with the recognition mechanisms of printed music scores. The techniques for extracting linear features, keys, noteheads and other musical figures from a digitized image are presented. Experimental results are given to show the effectiveness of the proposed methodology with a discussion of its performances and limits. Applications to full automated music score extraction, printed or handwritten are also discussed.<>},
  doi = {10.1109/ICPR.1992.202052},
  file = {:pdfs/1992 - An Efficient Method for the Recognition of Printed Music.pdf:PDF},
  keywords = {feature extraction;music;pattern recognition;pattern recognition;feature extraction;handwritten music;printed music scores;linear features;keys;noteheads;digitized image;Multiple signal classification;Feature extraction;Image converters;Pattern recognition;Histograms;Handwriting recognition;Real time systems;Machine vision;Robot vision systems;Robotics and automation}
}
@mastersthesis{Silva2013,
  author = {Silva, Rui Miguel Filipe da},
  school = {Universidade do Porto},
  title = {Mobile framework for recognition of musical characters},
  year = {2013},
  file = {:pdfs/2013 - Mobile framework for recognition of musical characters - Master Thesis.pdf:PDF;:2013 - Mobile Framework for Recognition of Musical Characters - Poster.pdf:PDF},
  url = {https://repositorio-aberto.up.pt/bitstream/10216/68500/2/26777.pdf}
}
@misc{SmartScore,
  author = {Musitek},
  howpublished = {\url{http://www.musitek.com/smartscore-pro.html}},
  title = {SmartScore X2},
  year = {2017},
  url = {http://www.musitek.com/smartscore-pro.html}
}
@inproceedings{Smiatacz2008,
  author = {Smiatacz, Maciej and Malina, Witold},
  booktitle = {1st International Conference on Information Technology},
  title = {Matrix-based classifiers applied to recognition of musical notation symbols},
  year = {2008},
  pages = {1--4},
  abstract = {The paper presents the application of matrix-based classifiers to the problem of automatic recognition of musical notation symbols. The idea of classification algorithms operating on matrices instead of feature vectors is briefly introduced together with a short description of methods that we have recently proposed. The experiments that we report show that the matrix-based approach can be used to improve the effectiveness and usefulness of the OMR system developed in our department as a part of the digital library of musical documents.},
  doi = {10.1109/INFTECH.2008.4621678},
  file = {:pdfs/2008 - Matrix Based Classifiers Applied to Recognition of Musical Notation Symbols.pdf:PDF},
  keywords = {image classification;matrix algebra;music;optical character recognition;matrix-based classifier;musical notation symbol recognition;optical music recognition;musical document;Classification algorithms;Training;Covariance matrix;Support vector machine classification;Entropy;Feature extraction;Pixel}
}
@inproceedings{Soak2002,
  author = {Soak, Sang Moon and Chang, Seok Cheol and Shin, Taehwan and Ahn, Byung-Ha},
  booktitle = {AeroSense 2002},
  title = {Music recognition system using {ART}-1 and {GA}},
  year = {2002},
  abstract = {Previously, most optical music recognition (OMR) systems have used the neural network, and used mainly back- propagation training method. One of the disadvantages of BP is that much time is required to train data sets. For example, when new data sets are added, all data sets have to be trained. Another disadvantage is that weighting values cannot be guaranteed as global optima after training them. It means that weighting values can fall down to local optimum solution. In this paper, we propose the new OMR method which combines the adaptive resonance theory (ART-1) with the genetic algorithms (GA). For reducing the training time, we use ART-1 which classifies several music symbols. It has another advantage to reduce the number of datasets, because classified symbols through ART-1 are used as input vectors of BP. And for guaranteeing the global optima in training data set, we use GA which is known as one of the best method for finding optimal solutions at complex problems.},
  doi = {10.1117/12.458413}
}
@inproceedings{Sober-Mira2017,
  author = {Sober-Mira, Javier and Calvo-Zaragoza, Jorge and Rizo, David and I{\~{n}}esta, Jos{\'{e}} Manuel},
  booktitle = {14th International Conference on Document Analysis and Recognition},
  title = {Pen-Based Music Document Transcription},
  year = {2017},
  address = {Kyoto, Japan},
  organization = {IEEE},
  pages = {21--22},
  doi = {10.1109/ICDAR.2017.258},
  file = {:pdfs/2017 - Pen-based Music Document Transcription.pdf:PDF}
}
@inproceedings{Sober-Mira2017a,
  author = {Sober-Mira, Javier and Calvo-Zaragoza, Jorge and Rizo, David and I{\~{n}}esta, Jos{\'{e}} Manuel},
  booktitle = {10th International Workshop on Machine Learning and Music},
  title = {Multimodal Recognition for Music Document Transcription},
  year = {2017},
  address = {Barcelona, Spain},
  file = {:pdfs/2017 - Multimodal Recognition for Music Document Transcription.pdf:PDF},
  url = {https://grfia.dlsi.ua.es/repositori/grfia/pubs/380/mml17proceedings-67.pdf}
}
@article{Sotoodeh2017,
  author = {Sotoodeh, Mahmood and Tajeripour, Farshad and Teimori, Sadegh and Jorgensen, Kirk},
  journal = {Multimedia Tools and Applications},
  title = {A music symbols recognition method using pattern matching along with integrated projection and morphological operation techniques},
  year = {2018},
  issn = {1573-7721},
  number = {13},
  pages = {16833--16866},
  volume = {77},
  abstract = {Optical Music Recognition (OMR) can be divided into three main phases: (i) staff line detection and removal. The goal of this phase is to detect and to remove staff lines from sheet music images. (ii) music symbol detection and segmentation. The propose of this phase is to detect the remaining musical symbols such as single symbols and group symbols, then segment the group symbols to single or primitive symbols after removing staff lines. (iii) musical symbols recognition. In this phase, recognition of musical symbols is the main objective. The method presented in this paper, covers all three phases. One advantage of the first phase of the proposed method is that it is robust to staff lines rotation and staff lines which have curvature in sheet music images. Moreover, the staff lines are removed accurately and quickly and also fewer details of the musical symbols are omitted. The proposed method in the first phase focuses on the hand-written documents databases which have been introduced in the CVC-MUSCIMA and ICDAR 2013. It has the lowest error rate among well-known methods and outperforms the state of the art in CVC-MUSCIMA database. In ICDAR 2013, the specificity measure of this method is 99.71{\%} which is the highest specificity among available methods. Also, in terms of accuracy, recall rate and f-measure is only slightly less than the best method. Therefor our method is comparable favorably to the existing methods. In the second phase, the symbols are divided into two categories, single and group. In the recognition phase, we use a pattern matching method to identify single symbols. For recognizing group symbols, a hierarchical method is proposed. The proposed method in the third phase has several advantages over the previous methods. It is quite robust to skewness of musical group symbols. Furthermore, it provides high accuracy in recognition of the symbols.},
  doi = {10.1007/s11042-017-5256-y},
  file = {:pdfs/2017 - A music symbols recognition method using pattern matching along with integrated projection and morphological operation techniques.pdf:PDF}
}
@misc{StaffPad,
  author = {{StaffPad Ltd.}},
  howpublished = {\url{http://www.staffpad.net}},
  title = {StaffPad},
  year = {2017},
  url = {http://www.staffpad.net}
}
@article{Stevens1992,
  author = {Stevens, Catherine and Latimer, Cyril},
  journal = {Minds and Machines},
  title = {A comparison of connectionist models of music recognition and human performance},
  year = {1992},
  issn = {1572-8641},
  number = {4},
  pages = {379--400},
  volume = {2},
  abstract = {Current artificial neural network or connectionist models of music cognition embody feature-extraction and feature-weighting principles. This paper reports two experiments which seek evidence for similar processes mediating recognition of short musical compositions by musically trained and untrained listeners. The experiments are cast within a pattern recognition framework based on the vision-audition analogue wherein music is considered an auditory pattern consisting of local and global features. Local features such as inter-note interval, and global features such as melodic contour, are derived from a two-dimensional matrix in which music is represented as a series of frequencies plotted over time.},
  doi = {10.1007/BF00419420},
  url = {https://doi.org/10.1007/BF00419420}
}
@techreport{Stramer2014,
  author = {Stramer, Tal},
  institution = {Stanford University},
  title = {Digitizing sheet music},
  year = {2014},
  file = {:pdfs/2014 - Digitizing sheet music.pdf:PDF},
  url = {https://web.stanford.edu/class/ee368/Project_Spring_1415/Reports/Stramer.pdf}
}
@inproceedings{Su2001,
  author = {Su, Mu-Chun and Tew, Chee-Yuen and Chen, Hsin-Hua},
  booktitle = {Joint 9th IFSA World Congress and 20th NAFIPS International Conference},
  title = {Musical symbol recognition using SOM-based fuzzy systems},
  year = {2001},
  pages = {2150--2153 vol.4},
  abstract = {A large number of research activities have been undertaken to investigate optical music recognition (OMR). OMR involves identifying musical symbols on a scanned sheet of music and transforming them into a computer readable format. We propose an efficient method based on SOM-based fuzzy systems to recognize musical symbols. A database consisting of 9 kinds of musical symbols were used to test the performance of the SOM-based fuzzy systems.},
  doi = {10.1109/NAFIPS.2001.944402},
  file = {:pdfs/2001 - Musical Symbol Recognition Using SOM Based Fuzzy Systems.pdf:PDF},
  keywords = {music;optical character recognition;document image processing;self-organising feature maps;fuzzy neural nets;visual databases;performance evaluation;musical symbol recognition;neurofuzzy systems;pattern recognition;optical music recognition;computer readable format;fuzzy SOM systems;fuzzy self organising feature maps;optical character recognition;performance evaluation;database;Fuzzy systems;Ordinary magnetoresistance;Optical character recognition software;Fuzzy neural networks;Pattern recognition;Neural networks;Optical computing;Computational efficiency;Vector quantization;Computer science}
}
@inproceedings{Su2012,
  author = {Su, Bolan and Lu, Shijian and Pal, Umapada and Tan, Chew Lim},
  booktitle = {10th International Workshop on Document Analysis Systems},
  title = {An effective staff detection and removal technique for musical documents},
  year = {2012},
  organization = {IEEE},
  pages = {160--164},
  abstract = {Abstract Musical staff line detection and removal techniques detect the staff positions in musical documents and segment musical score from musical documents by removing those staff lines. It is an important preprocessing step for ensuing the Optical Music Recognition ...},
  doi = {10.1109/DAS.2012.16},
  file = {:pdfs/2012 - An Effective Staff Detection and Removal Technique for Musical Documents.pdf:PDF},
  isbn = {9780769546612},
  keywords = {Musical Staff, Optical Music Recognition, Staff Line Removal, Staff Line Segmentation, Staff Line Shape Modeling, to classify}
}
@inproceedings{Sugano1987,
  author = {Sugano, Shigeki and Kato, Ichiro},
  booktitle = {IEEE International Conference on Robotics and Automation},
  title = {{WABOT}-2: Autonomous robot with dexterous finger-arm--Finger-arm coordination control in keyboard performance},
  year = {1987},
  pages = {90--97},
  abstract = {Advanced robots will have to not only have 'hard' functions but also have 'soft' functions. Therefore, the purpose of this study is to realize 'soft' functions of robots such as dexterity, speediness and intelligence by the development of an anthropomorphic intelligent robot playing keyboard instrument. This paper describes the development of keyboard playing robot WABOT-2(WAseda roBOT-2) with a focus on the mechanisms of arm-and-hand which has 21 degrees of freedom in total, their hierarchically structured control computer system, the information processing method at the high level computer and finger-arm coordination control which realizes the autonomous movement of WABOT-2.},
  doi = {10.1109/ROBOT.1987.1088025},
  file = {:pdfs/1987 - WABOT 2 - Autonomous Robot with Dexterous Finger Arm Finger Arm Coordination Control in Keyboard Performance.pdf:PDF},
  keywords = {Robot kinematics;Keyboards;Fingers;Humans;Intelligent robots;Information processing;Anthropomorphism;Service robots;Control systems;Humanoid robots}
}
@inproceedings{Szwoch2005,
  author = {Szwoch, Mariusz},
  booktitle = {Computer Analysis of Images and Patterns},
  title = {A Robust Detector for Distorted Music Staves},
  year = {2005},
  address = {Berlin, Heidelberg},
  editor = {Gagalowicz, Andr{\'e} and Philips, Wilfried},
  pages = {701--708},
  publisher = {Springer Berlin Heidelberg},
  abstract = {In this paper an algorithm for music staves detection is presented. The algorithm bases on horizontal projections in local windows of a score image and farther processing of resulting histograms and their connections. Experiments carried out, proved high efficiency of presented algorithm and its robustness in case of non-ideal staff lines: skew and with barrel and pincushion distortions. The algorithm allows for usage of acquisition devices alternative to scanner such as digital cameras.},
  doi = {10.1007/11556121_86},
  file = {:pdfs/2005 - A Robust Detector for Distorted Music Staves.pdf:PDF},
  isbn = {978-3-540-32011-1}
}
@inproceedings{Szwoch2007,
  author = {Szwoch, Mariusz},
  booktitle = {9th International Conference on Document Analysis and Recognition},
  title = {Guido: A Musical Score Recognition System},
  year = {2007},
  pages = {809--813},
  abstract = {This paper presents an optical music recognition system Guido that can automatically recognize the main musical symbols of music scores that were scanned or taken by a digital camera. The application is based on object model of musical notation and uses linguistic approach for symbol interpretation and error correction. The system offers musical editor with a partially automatic error correction.},
  doi = {10.1109/ICDAR.2007.4377027},
  file = {:pdfs/2007 - Guido - a Musical Score Recognition System.pdf:PDF},
  issn = {1520-5363},
  keywords = {error correction;image recognition;linguistics;music;Guido;musical score recognition system;optical music recognition system;musical symbols;digital camera;linguistic;symbol interpretation;automatic error correction;Error correction;Ordinary magnetoresistance;Nonlinear optics;Digital cameras;Image segmentation;Books;Optical devices;Optical distortion;Speech analysis;Performance analysis}
}
@inproceedings{Szwoch2008,
  author = {Szwoch, Mariusz},
  booktitle = {International Conference on Theory and Application of Diagrams},
  title = {Using {MusicXML} to Evaluate Accuracy of {OMR} Systems},
  year = {2008},
  address = {Herrsching, Germany},
  organization = {Springer},
  pages = {419--422},
  publisher = {Springer-Verlag},
  abstract = {In this paper a methodology for automatic accuracy evaluation in optical music recognition (OMR) applications is proposed. Presented approach assumes using ground truth images together with digital music scores describing their content. The automatic evaluation algorithm measures differences between the tested score and the reference one, both stored in MusicXML format. Some preliminary test results of this approach are presented based on the algorithm’s implementation in OMR Guido application.},
  acmid = {1432584},
  doi = {10.1007/978-3-540-87730-1_53},
  file = {:pdfs/2008 - Using MusicXML to Evaluate Accuracy of OMR Systems.pdf:PDF},
  groups = {evaluation},
  isbn = {978-3-540-87729-5},
  keywords = {MusicXML, Optical Music Recognition, Performance of Systems}
}
@inproceedings{Taele2015,
  author = {Taele, Paul and Barreto, Laura and Hammond, Tracy},
  booktitle = {27th Conference on Innovative Applications of Artificial Intelligence},
  title = {Maestoso: An Intelligent Educational Sketching Tool for Learning Music Theory},
  year = {2015},
  address = {Austin, Texas},
  pages = {3999--4005},
  publisher = {AAAI Press},
  abstract = {Learning music theory not only has practical benefits for musicians
	to write, perform, understand, and express music better, but also
	for both non-musicians to improve critical thinking, math analytical
	skills, and music appreciation. However, current external tools applicable
	for learning music theory through writing when human instruction
	is unavailable are either limited in feedback, lacking a written
	modality, or assuming already strong familiarity of music theory
	concepts. In this paper, we describe Maestoso, an educational tool
	for novice learners to learn music theory through sketching practice
	of quizzed music structures. Maestoso first automatically recognizes
	students' sketched input of quizzed concepts, then relies on existing
	sketch and gesture recognition techniques to automatically recognize
	the input, and finally generates instructor-emulated feedback. From
	our evaluations, we demonstrate that Maestoso performs reasonably
	well on recognizing music structure elements and that novice students
	can comfortably grasp introductory music theory in a single session.},
  acmid = {2888271},
  file = {:pdfs/2015 - Maestoso - An intelligent educational sketching tool for learning music theory.pdf:PDF},
  isbn = {0-262-51129-0},
  url = {http://dl.acm.org/citation.cfm?id=2888116.2888271}
}
@inproceedings{Tambouratzis2011,
  author = {Tambouratzis, Tatiana},
  booktitle = {International Joint Conference on Neural Networks},
  title = {Identification of key music symbols for optical music recognition and on-screen presentation},
  year = {2011},
  pages = {1935--1942},
  abstract = {A novel optical music recognition (OMR) system is put forward, where the custom-made on-screen presentation of the music score (MS) is promoted via the recognition of key music symbols only. The proposed system does not require perfect manuscript alignment or noise removal. Following the segmentation of each MS page into systems and, subsequently, into staves, staff lines, measures and candidate music symbols (CMS's), music symbol recognition is limited to the identification of the clefs, accidentals and time signatures. Such an implementation entails significantly less computational effort than that required by classic OMR systems, without an observable compromise in the quality of the on-screen presentation of the MS. The identification of the music symbols of interest is performed via probabilistic neural networks (PNN's), which are trained on a small set of exemplars from the MS itself. The initial results are promising in terms of efficiency, identification accuracy and quality of viewing.},
  doi = {10.1109/IJCNN.2011.6033461},
  file = {:pdfs/2011 - Identification of Key Music Symbols for Optical Music Recognition and on Screen Presentation.pdf:PDF},
  issn = {2161-4407},
  keywords = {image recognition;image segmentation;music;neural nets;key music symbol identification;optical music recognition;on-screen music score presentation;MS segmentation;probabilistic neural networks;Image segmentation;Training;Accuracy;Noise;Educational institutions;Multiple signal classification;Time measurement}
}
@article{Tambouratzis2013,
  author = {Tambouratzis, Tatiana},
  journal = {International Journal of Intelligent Systems},
  title = {The Digital Music Stand as a Minimal Processing Custom-Made Optical Music Recognition System, Part 1: Key Music Symbol Recognition},
  year = {2013},
  issn = {0884-8173},
  number = {5},
  pages = {474--504},
  volume = {28},
  abstract = {The digital music stand is proposed as a minimal-processing optical music recognition implementation, where music score ({MS}) presentation is realized without prior alignment, noise, or staff line removal. After each {MS} page is segmented into systems, staves, measures, and candidate music symbols, music symbol recognition is accomplished via probabilistic neural networks: Only the key music symbols (namely clefs, global accidentals, time signatures) of the {MS} are identified, while the remaining music symbols are generally classified. Subsequently, satisfactory quality of on-screen {MS} viewing is accomplished via the concatenation and/or substitution of appropriately selected parts and isolated music symbols of the original {MS}. In this piece of research, the processing stages leading to on-screen {MS} presentation are detailed. © 2013 Wiley Periodicals, Inc.},
  doi = {10.1002/int.21586},
  file = {:pdfs/2013 - The Digital Music Stand As a Minimal Processing Custom Made Optical Music Recognition System.pdf:PDF}
}
@article{Tardon2009,
  author = {Tard{\'o}n, Lorenzo J. and Sammartino, Simone and Barbancho, Isabel and G{\'o}mez, Ver{\'o}nica and Oliver, Antonio},
  journal = {EURASIP Journal on Image and Video Processing},
  title = {Optical Music Recognition for Scores Written in White Mensural Notation},
  year = {2009},
  issn = {1687-5281},
  number = {1},
  pages = {843401},
  volume = {2009},
  abstract = {An Optical Music Recognition (OMR) system especially adapted for handwritten musical scores of the XVII-th and the early XVIII-th centuries written in white mensural notation is presented. The system performs a complete sequence of analysis stages: the input is the RGB image of the score to be analyzed and, after a preprocessing that returns a black and white image with corrected rotation, the staves are processed to return a score without staff lines; then, a music symbol processing stage isolates the music symbols contained in the score and, finally, the classification process starts to obtain the transcription in a suitable electronic format so that it can be stored or played. This work will help to preserve our cultural heritage keeping the musical information of the scores in a digital format that also gives the possibility to perform and distribute the original music contained in those scores.},
  doi = {10.1155/2009/843401},
  file = {:pdfs/2009 - Optical Music Recognition for Scores Written in White Mensural Notation.pdf:PDF},
  isbn = {1687-5176}
}
@article{Tardon2020,
  author = {Tard{\'{o}}n, Lorenzo J. and Barbancho, Isabel and Barbancho, Ana M. and Fujinaga, Ichiro},
  journal = {Applied Sciences},
  title = {Automatic Staff Reconstruction within SIMSSA Project},
  year = {2020},
  number = {7},
  pages = {2468--2484},
  volume = {10},
  doi = {10.3390/app10072468},
  file = {:pdfs/2020 - Automatic Staff Reconstruction within SIMSSA Project.pdf:PDF},
  url = {https://www.mdpi.com/2076-3417/10/7/2468}
}
@techreport{Taubman2005,
  author = {Taubman, Gabriel},
  institution = {Brown University},
  title = {MusicHand : A Handwritten Music Recognition System},
  year = {2005},
  file = {:pdfs/2005 - MusicHand - a Handwritten Music Recognition System.pdf:PDF},
  url = {http://static.cs.brown.edu/research/pubs/theses/ugrad/2005/gtaubman.pdf}
}
@inproceedings{Thomae2019,
  author = {Thomae, Martha E. and Cumming, Julie E. and Fujinaga, Ichiro},
  booktitle = {6th International Conference on Digital Libraries for Musicology},
  title = {The Mensural Scoring-up Tool},
  year = {2019},
  address = {New York, NY, USA},
  pages = {9--19},
  publisher = {Association for Computing Machinery},
  series = {DLfM ’19},
  doi = {10.1145/3358664.3358668},
  file = {:pdfs/2019 - The Mensural Scoring up Tool.pdf:PDF},
  isbn = {9781450372398},
  keywords = {Mensural MEI, mensural notation, parts to score transformation, encoding, automatic transcription},
  location = {The Hague, Netherlands},
  numpages = {11},
  url = {https://doi.org/10.1145/3358664.3358668}
}
@inproceedings{Thompson2011,
  author = {Thompson, Jessica and Hankinson, Andrew and Fujinaga, Ichiro},
  booktitle = {12th International Society for Music Information Retrieval Conference},
  title = {Searching the Liber Usualis: Using {CouchDB} and ElasticSearch to Query Graphical Music Documents},
  year = {2011},
  file = {:pdfs/2011 - Searching the Liber Usualis - Using CouchDB and ElasticSearch to Query Graphical Music Documents.pdf:PDF},
  url = {http://ismir2011.ismir.net/latebreaking/LB-10.pdf}
}
@inproceedings{Timofte2013,
  author = {Timofte, Radu and Van Gool, Luc},
  booktitle = {Computer Vision -- ACCV 2012},
  title = {Automatic Stave Discovery for Musical Facsimiles},
  year = {2013},
  address = {Berlin, Heidelberg},
  editor = {Lee, Kyoung Mu and Matsushita, Yasuyuki and Rehg, James M. and Hu, Zhanyi},
  pages = {510--523},
  publisher = {Springer Berlin Heidelberg},
  abstract = {Lately, there is an increased interest in the analysis of music score facsimiles, aiming at automatic digitization and recognition. Noise, corruption, variations in handwriting, non-standard page layouts and notations are common problems affecting especially the centuries-old manuscripts.},
  doi = {10.1007/978-3-642-37447-0_39},
  file = {:pdfs/2013 - Automatic Stave Discovery for Musical Facsimiles.pdf:PDF},
  isbn = {978-3-642-37447-0}
}
@inproceedings{Torras2022,
  author = {Torras, Pau and Bar{\'{o}}, Arnau and Kang, Lei and Forn{\'{e}}s, Alicia},
  booktitle = {Proceedings of the 4th International Workshop on Reading Music Systems},
  title = {Improving Handwritten Music Recognition through Language Model Integration},
  year = {2022},
  address = {Online},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  doi = {10.48550/arXiv.2211.13285},
  file = {:pdfs/2022 - Improving Handwritten Music Recognition through Language Model Integration.pdf:PDF},
  url = {https://sites.google.com/view/worms2022/proceedings}
}
@inproceedings{Toyama2006,
  author = {Toyama, Fubito and Shoji, Kenji and Miyamichi, Juichi},
  booktitle = {18th International Conference on Pattern Recognition},
  title = {Symbol Recognition of Printed Piano Scores with Touching Symbols},
  year = {2006},
  pages = {480--483},
  abstract = {To build a music database efficiently, an automatic score recognition system is a critical component. Many previous methods are applicable only to some simple music scores. In case of complex music scores it becomes difficult to detect symbols correctly because of noise and connection between symbols included in the scores. In this paper, we propose a score recognition method which is applicable to the complex music scores. Symbol candidates are detected by template matching. From these candidates correct symbols are selected by considering their relative positions and mutual connections. Under the presence of noise and connected symbols, the proposed method outperformed "Score Maker" which is an optical music score recognition software},
  doi = {10.1109/ICPR.2006.1099},
  file = {:pdfs/2006 - Symbol Recognition of Printed Piano Scores with Touching Symbols.pdf:PDF},
  issn = {1051-4651},
  keywords = {image matching;music;symbol recognition;printed piano scores;touching symbols;music database;automatic score recognition system;music scores;template matching;Head;Ordinary magnetoresistance;Optical noise;Databases;Optical character recognition software;Noise shaping;Image recognition;Data engineering;Software performance;Character recognition}
}
@article{Tsai2020,
  author = {Tsai, Timothy J. and Yang, Daniel and Shan, Mengyi and Tanprasert, Thitaree and Jenrungrot, Teerapat},
  journal = {IEEE Transactions on Multimedia},
  title = {Using Cell Phone Pictures of Sheet Music To Retrieve MIDI Passages},
  year = {2020},
  pages = {1--13},
  doi = {10.1109/TMM.2020.2973831},
  file = {:pdfs/2020 - Using Cell Phone Pictures of Sheet Music to Retrieve MIDI Passages.pdf:PDF},
  url = {https://arxiv.org/abs/2004.11724}
}
@inproceedings{Tsandilas2012,
  author = {Tsandilas, Theophanis},
  booktitle = {25th Annual ACM Symposium on User Interface Software and Technology},
  title = {Interpreting Strokes on Paper with a Mobile Assistant},
  year = {2012},
  address = {Cambridge, Massachusetts, USA},
  pages = {299--308},
  publisher = {ACM},
  acmid = {2380155},
  doi = {10.1145/2380116.2380155},
  file = {:pdfs/2012 - Interpreting Strokes on Paper with a Mobile Assistant.pdf:PDF},
  isbn = {978-1-4503-1580-7},
  keywords = {bimanual interaction, interactive recognition, mobile, music interfaces., paper interfaces, pen + touch}
}
@inproceedings{Tuggener2018,
  author = {Tuggener, Lukas and Elezi, Ismail and Schmidhuber, J{\"{u}}rgen and Pelillo, Marcello and Stadelmann, Thilo},
  booktitle = {24th International Conference on Pattern Recognition},
  title = {DeepScores - A Dataset for Segmentation, Detection and Classification of Tiny Objects},
  year = {2018},
  address = {Beijing, China},
  abstract = {We present the DeepScores dataset with the goal of advancing the state-of-the-art in small objects recognition, and by placing the question of object recognition in the context of scene understanding. DeepScores contains high quality images of musical scores, partitioned into 300,000 sheets of written music that contain symbols of different shapes and sizes. With close to a hundred millions of small objects, this makes our dataset not only unique, but also the largest public dataset. DeepScores comes with ground truth for object classification, detection and semantic segmentation. DeepScores thus poses a relevant challenge for computer vision in general, beyond the scope of optical music recognition (OMR) research. We present a detailed statistical analysis of the dataset, comparing it with other computer vision datasets like Caltech101/256, PASCAL VOC, SUN, SVHN, ImageNet, MS-COCO, smaller computer vision datasets, as well as with other OMR datasets. Finally, we provide baseline performances for object classification and give pointers to future research based on this dataset.},
  doi = {10.21256/zhaw-4255},
  file = {:pdfs/2018 - DeepScores a Dataset for Segmentation, Detection and Classification of Tiny Objects.pdf:PDF},
  school = {ZHAW},
  url = {https://arxiv.org/abs/1804.00525}
}
@inproceedings{Tuggener2018a,
  author = {Tuggener, Lukas and Elezi, Ismail and Schmidhuber, J{\"{u}}rgen and Stadelmann, Thilo},
  booktitle = {19th International Society for Music Information Retrieval Conference},
  title = {Deep Watershed Detector for Music Object Recognition},
  year = {2018},
  address = {Paris, France},
  pages = {271--278},
  file = {:pdfs/2018 - Deep Watershed Detector for Music Object Recognition.pdf:PDF},
  isbn = {978-2-9540351-2-3},
  url = {http://ismir2018.ircam.fr/doc/pdfs/225_Paper.pdf}
}
@inproceedings{Tuggener2020,
  author = {Tuggener, Lukas and Satyawan, Yvan Putra and Pacha, Alexander and Schmidhuber, J{\"{u}}rgen and Stadelmann, Thilo},
  booktitle = {Proceedings of the 25th International Conference on Pattern Recognition},
  title = {The DeepScoresV2 Dataset and Benchmark for Music Object Detection},
  year = {2020},
  address = {Milan, Italy},
  abstract = {In this paper, we present DeepScoresV2, an extended version of the DeepScores dataset for optical music recognition (OMR). We improve upon the original DeepScores dataset by providing much more detailed annotations, namely (a) annotations for 135 classes including fundamental symbols of non-fixed size and shape, increasing the number of annotated symbols by 23%; (b) oriented bounding boxes; (c) higher-level rhythm and pitch information (onset beat for all symbols and line position for noteheads); and (d) a compatibility mode for easy use in conjunction with the MUSCIMA++ dataset for OMR on handwritten documents. These additions open up the potential for future advancement in OMR research. Additionally, we release two state-of-the-art baselines for DeepScoresV2 based on Faster R-CNN and the Deep Watershed Detector. An analysis of the baselines shows that regular orthogonal bounding boxes are unsuitable for objects which are long, small, and potentially rotated, such as ties and beams, which demonstrates the need for detection algorithms that naturally incorporate object angles.},
  doi = {10.21256/zhaw-20647}
}
@techreport{Vidal2012,
  author = {Vidal, Vitor Hugo Couto},
  institution = {Universidade do Porto},
  title = {Optical Music Recognition in the grey-scale domain},
  year = {2012},
  file = {:pdfs/2012 - Optical Music Recognition in the Grey Scale Domain.pdf:PDF},
  url = {https://paginas.fe.up.pt/~ee03270/downloads/FinalReportPDI.pdf}
}
@inproceedings{Vieira2001,
  author = {Vieira, Pedro and Pinto, Jo{\~a}o Caldas},
  booktitle = {International Conference on Image Processing},
  title = {Recognition of musical symbols in ancient manuscripts},
  year = {2001},
  pages = {38--41 vol.3},
  abstract = {This paper presents a system for the automatic retrieval of music from ancient music collections (XVI-XVIII century), creating digital documents of music from images of music sheets. This is an optical music recognition system that uses image processing and pattern recognition techniques. Finally, we obtain a document that contains the music semantics: description of the notes, in time and pitches, as well as other relevant information.},
  doi = {10.1109/ICIP.2001.958045},
  file = {:pdfs/2001 - Recognition of Musical Symbols in Ancient Manuscripts.pdf:PDF},
  keywords = {music;optical character recognition;image segmentation;feature extraction;image classification;pattern clustering;Bayes methods;automatic music retrieval;ancient music collections;digital documents;optical music recognition;pattern recognition;image processing;music semantics;image segmentation;Bayes classifier;feature extraction;best class cluster search;Image segmentation;Optical character recognition software;Image recognition;Ordinary magnetoresistance;Pattern recognition;Cleaning;Image reconstruction;Bars;Labeling;Software libraries}
}
@inproceedings{Vigliensoni2011,
  author = {Vigliensoni, Gabriel and Burgoyne, John Ashley and Hankinson, Andrew and Fujinaga, Ichiro},
  booktitle = {12th International Society for Music Information Retrieval Conference},
  title = {Automatic Pitch Detection in Printed Square Notation},
  year = {2011},
  address = {Miami, Florida},
  editor = {Anssi Klapuri and Colby Leider},
  pages = {423--428},
  publisher = {University of Miami},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/ismir/VigliensoniBHF11},
  file = {:pdfs/2011 - Automatic Pitch Detection in Printed Square Notation.pdf:PDF},
  url = {http://ismir2011.ismir.net/papers/PS3-12.pdf}
}
@inproceedings{Vigliensoni2013,
  author = {Vigliensoni, Gabriel and Burlet, Gregory and Fujinaga, Ichiro},
  booktitle = {14th International Society for Music Information Retrieval Conference},
  title = {Optical measure recognition in common music notation},
  year = {2013},
  address = {Curitiba, Brazil},
  file = {:pdfs/2013 - Optical Measure Recognition in Common Music Notation.pdf:PDF},
  url = {http://ismir2013.ismir.net/wp-content/uploads/2013/09/207_Paper.pdf}
}
@inproceedings{Vigliensoni2018,
  author = {Vigliensoni, Gabriel and Calvo-Zaragoza, Jorge and Fujinaga, Ichiro},
  booktitle = {1st International Workshop on Reading Music Systems},
  title = {Developing an environment for teaching computers to read music},
  year = {2018},
  address = {Paris, France},
  editor = {Calvo-Zaragoza, Jorge and Haji{\v{c}} jr., Jan and Pacha, Alexander},
  pages = {27--28},
  file = {:pdfs/2018 - Developing an Environment for Teaching Computers to Read Music.pdf:PDF},
  url = {https://sites.google.com/view/worms2018/proceedings}
}
@inproceedings{Vigliensoni2019,
  author = {Vigliensoni, Gabriel and Daigle, Alex and Liu, Eric and Calvo-Zaragoza, Jorge and Regimbal, Juliette and Nguyen, Minh Anh and Baxter, Noah and McLennan, Zo{\'{e}} and Fujinaga, Ichiro},
  booktitle = {Music Encoding Conference},
  title = {From image to encoding: Full optical music recognition of Medieval and Renaissance music},
  year = {2019},
  file = {:pdfs/2019 - From Image to Encoding_ Full Optical Music Recognition of Medieval and Renaissance Music.pdf:PDF},
  url = {https://music-encoding.org/conference/2019/abstracts_mec2019/vigliensoni19from%20camera%20ready.pdf}
}
@inproceedings{Viro2011,
  author = {Viro, Vladimir},
  booktitle = {12th International Society for Music Information Retrieval Conference},
  title = {Peachnote: Music Score Search and Analysis Platform},
  year = {2011},
  address = {Miami, FL},
  pages = {359--362},
  abstract = {Our system takes the scores in PDF format, runs optical music recognition (OMR) softwareover them, indexes the data and makes them accessible for querying and data min- ing. Thesearch engine is built upon Hadoop and HBase and runs on a cluster.},
  file = {:pdfs/2011 - Peachnote_ Music Score Search and Analysis Platform.pdf:PDF},
  url = {http://ismir2011.ismir.net/papers/PS3-1.pdf}
}
@inproceedings{Visaniy2013,
  author = {Visaniy, Muriel and Kieu, V.C. and Forn{\'{e}}s, Alicia and Journet, Nicholas},
  booktitle = {12th International Conference on Document Analysis and Recognition},
  title = {The {ICDAR} 2013 Music Scores Competition: Staff Removal},
  year = {2013},
  pages = {1407--1411},
  abstract = {The first competition on music scores that was organized at ICDAR
	in 2011 awoke the interest of researchers, who participated both
	at staff removal and writer identification tasks. In this second
	edition, we focus on the staff removal task and simulate a real case
	scenario: old music scores. For this purpose, we have generated a
	new set of images using two kinds of degradations: local noise and
	3D distortions. This paper describes the dataset, distortion methods,
	evaluation metrics, the participant's methods and the obtained results.},
  doi = {10.1109/ICDAR.2013.284},
  file = {:pdfs/2013 - The ICDAR 2013 Music Scores Competition - Staff Removal.pdf:PDF},
  issn = {1520-5363},
  keywords = {image recognition;music;3D distortions;ICDAR 2013 music scores competition;distortion methods;evaluation metrics;local noise;old music;staff removal;writer identification;Databases;Degradation;Educational institutions;Noise;Solid modeling;Three-dimensional displays;Training;Competition;Music Scores;Staff Removal}
}
@inproceedings{Vo2014,
  author = {Vo, Quang Nhat and Nguyen, Tam and Kim, Soo-Hyung and Yang, Hyung-Jeong and Lee, Guee-Sang},
  booktitle = {22nd International Conference on Pattern Recognition},
  title = {Distorted music score recognition without Staffline removal},
  year = {2014},
  pages = {2956--2960},
  publisher = {Institute of Electrical and Electronics Engineers Inc.},
  abstract = {This paper proposes a new approach for recognizing the primitive musical symbols in distorted music scores without the staff line removal. We try to overcome two main issues. The first problem is the difficult and unreliable removal of staff lines required as a pre-processing step for most of recognition systems. The second problem is the non-linear distortion of the music score images captured by digital cameras. At the beginning, we detect the locations of bar-lines on each staff and segment it into sub-areas which can be rectified into undistorted shapes by biquadratic transformation. Then, musical rules, template matching, run length coding and projection methods are employed to extract the musical note information without the application of staff removal. The proposed method is implemented on smart phones and shows promising results. © 2014 IEEE.},
  affiliation = {Department of Electronics and Computer Engineering, Chonnam National University, Gwangju, South Korea},
  author_keywords = {Computer music; Music score recognition; Optical music recognition; Smart phone application},
  doi = {10.1109/ICPR.2014.510},
  file = {:pdfs/2014 - Distorted Music Score Recognition without Staffline Removal.pdf:PDF},
  isbn = {9781479952083},
  issn = {1051-4651},
  keywords = {Computer music; Image matching; Pattern recognition; Telephone sets; Template matching, Biquadratic transformations; Music scores; Optical music recognition; Pre-processing step; Projection method; Recognition systems; Run-length coding; Smart-phone applications, Smartphones},
  url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6977222}
}
@article{Vo2016,
  author = {Vo, Quang Nhat and Kim, Soo Hyung and Yang, Hyung Jeong and Lee, Gueesang},
  journal = {Pattern Recognition Letters},
  title = {An MRF model for binarization of music scores with complex background},
  year = {2016},
  issn = {0167-8655},
  pages = {88--95},
  volume = {69},
  abstract = {We present a Gaussian Mixture Markov Random Field (GMMRF) model that
	is effective for the binarization of music score images with complex
	backgrounds. The binarization of music score documents containing
	noises with arbitrary shapes and/or non-uniform colors in the background
	area is a very challenging problem. In order to extract the content
	knowledge of music score documents, the staff lines are extracted
	by first applying a stroke width transform. With the color and spatial
	information of the detected staff lines, we can accurately model
	the foreground and background color distribution, in which a GMMRF
	framework is used to make the binarization robust to variations in
	colors. Then, the staff line information is employed for guiding
	the GMMRF labeling process. In the experiment, the music score images
	captured by camera show promising results compared to existing methods.},
  doi = {10.1016/j.patrec.2015.10.017},
  file = {:pdfs/2016 - An MRF model for binarization of music scores with complex background.pdf:PDF},
  keywords = {Music score binarization}
}
@article{Vo2017,
  author = {Vo, Quang Nhat and Lee, Guee Sang and Kim, Soo Hyung and Yang, Hyung Jeong},
  journal = {Multimedia Tools and Applications},
  title = {Recognition of Music Scores with Non-Linear Distortions in Mobile Devices},
  year = {2018},
  issn = {1573-7721},
  number = {12},
  pages = {15951--15969},
  volume = {77},
  abstract = {Optical music recognition (OMR), when the input music score is captured
	by a handheld or a mobile phone camera, suffers from severe degradation
	in the image quality and distortions caused by non-planar document
	curvature and perspective projection. Hence the binarization of the
	input often fails to preserve the details of the original music score,
	leading to a poor performance in recognition of music symbols. This
	paper addresses the issue of staff line detection, which is the most
	important step in OMR, in the presence of nonlinear distortions and
	describes how to cope with severe degradations in recognition of
	music symbols. First, a RANSAC-based detection of curved staff lines
	is presented and staves are segmented into sub-areas for the rectification
	with bi-quadratic transformation. Then, run length coding is used
	to recognize music symbols such as stem, note head, flag, and beam.
	The proposed system is implemented on smart phones, and it shows
	promising results with music score images captured in the mobile
	environment.},
  doi = {10.1007/s11042-017-5169-9},
  file = {:pdfs/2017 - Recognition of Music Scores with Non-Linear Distortions in Mobile Devices.pdf:PDF}
}
@misc{Vrist2009,
  author = {Vrist, S{\o}ren Bjerregaard},
  title = {Optical Music Recognition for structural information from high-quality scanned music},
  year = {2009},
  file = {:pdfs/2009 - Optical Music Recognition for structural information from high-quality scanned music.pdf:PDF}
}
@techreport{VuilleumierStueckelberg1997,
  author = {Vuilleumier St{\"{u}}ckelberg, Marc and Pellegrini, Christian and Hillario, M{\'{e}}lanie},
  institution = {University of Geneva},
  title = {A preview of an architecture for musical score recognition},
  year = {1997},
  file = {:pdfs/1997 - A Preview of an Architecture for Musical Score Recognition.pdf:PDF},
  location = {Geneva, Switzerland},
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.52.3172&rep=rep1&type=pdf}
}
@inproceedings{VuilleumierStueckelberg1997a,
  author = {Vuilleumier St{\"{u}}ckelberg, Marc and Pellegrini, Christian and Hilario, M{\'{e}}lanie},
  booktitle = {4th International Conference on Document Analysis and Recognition},
  title = {An architecture for musical score recognition using high-level domain knowledge},
  year = {1997},
  pages = {813--818 vol.2},
  abstract = {Proposes an original approach to musical score recognition, a particular case of high-level document analysis. In order to overcome the limitations of existing systems, we propose an architecture which allows for a continuous and bidirectional interaction between high-level knowledge and low-level data, and which is able to improve itself over time by learning. This architecture is made of three cooperating layers, one made of parameterized feature detectors, another working as an object-oriented knowledge repository and the other as a supervising Bayesian metaprocessor. Although the implementation is still in progress, we show how this architecture is adequate for modeling and processing knowledge.},
  doi = {10.1109/ICDAR.1997.620624},
  file = {:pdfs/1997 - An Architecture for Musical Score Recognition Using High Level Domain Knowledge.pdf:PDF},
  keywords = {music;document image processing;feature extraction;knowledge based systems;learning (artificial intelligence);object-oriented databases;deductive databases;Bayes methods;image recognition;musical score recognition architecture;high-level domain knowledge;high-level document analysis;continuous bidirectional interaction;low-level data;learning;cooperating layers;parameterized feature detectors;object-oriented knowledge repository;supervising Bayesian metaprocessor;knowledge modelling;knowledge processing;Computer vision;Pattern recognition;Computer architecture;Detectors;Text analysis;Image analysis;Bayesian methods;Object oriented modeling;Artificial intelligence;Image segmentation}
}
@inproceedings{VuilleumierStueckelberg1999,
  author = {Vuilleumier St{\"{u}}ckelberg, Marc and Doermann, David},
  booktitle = {5th International Conference on Document Analysis and Recognition},
  title = {On musical score recognition using probabilistic reasoning},
  year = {1999},
  pages = {115--118},
  abstract = {We present a probabilistic framework for document analysis and recognition and illustrate it on the problem of musical score recognition. Our system uses an explicit descriptive model of the document class to find the most likely interpretation of a scanned document image. In contrast to the traditional pipeline architecture, we carry out all stages of the analysis with a single inference engine, allowing for an end-to-end propagation of the uncertainty. The global modeling structure is similar to a stochastic attribute grammar, and local parameters are estimated using hidden Markov models (10 Refs.) image processing; image recognition; inference mechanisms; music; uncertainty handling},
  doi = {10.1109/ICDAR.1999.791738},
  file = {:pdfs/1999 - On Musical Score Recognition using Probabilistic Reasoning.pdf:PDF},
  isbn = {0-7695-0318-7},
  keywords = {musical score recognition, probabilistic reasoning, to classify},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=791738}
}
@mastersthesis{Wallner2014,
  author = {Wallner, Matthias},
  school = {TU Wien},
  title = {A System for Optical Music Recognition and Audio Synthesis},
  year = {2014},
  file = {:pdfs/2014 - A System for Optical Music Recognition and Audio Synthesis.pdf:PDF},
  url = {https://www.ims.tuwien.ac.at/topics/331/downloads/masterarbeit-wallner.pdf}
}
@inproceedings{Waloschek2019,
  author = {Waloschek, Simon and Aristotelis Hadjakos and Pacha, Alexander},
  booktitle = {20th International Society for Music Information Retrieval Conference},
  title = {Identification and Cross-Document Alignment of Measures in Music Score Images},
  year = {2019},
  pages = {137--143},
  file = {:pdfs/2019 - Identification and Cross Document Alignment of Measures in Music Score Images.pdf:PDF},
  url = {https://archives.ismir.net/ismir2019/paper/000014.pdf}
}
@inproceedings{Walwadkar2022,
  author = {Walwadkar, Dnyanesh and Shatri, Elona and Timms, Benjamin and Fazekas, Gy{\"o}rgy},
  booktitle = {Proceedings of the 4th International Workshop on Reading Music Systems},
  title = {CompIdNet: Sheet Music Composer Identification using Deep Neural Network},
  year = {2022},
  address = {Online},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {9--14},
  doi = {10.48550/arXiv.2211.13285},
  file = {:pdfs/2022 - CompIdNet_ Sheet Music Composer Identification Using Deep Neural Network.pdf:PDF},
  url = {https://sites.google.com/view/worms2022/proceedings}
}
@inproceedings{Wei2008,
  author = {Wei, Lee Ling and Salih, Qussay A. and Hock, Ho Sooi},
  booktitle = {International Conference on Audio, Language and Image Processing},
  title = {Optical Tablature Recognition ({OTR}) system: Using Fourier Descriptors as a recognition tool},
  year = {2008},
  pages = {1532--1539},
  abstract = {This paper presents an optical recognition system for the guitar tablature. Images of guitar tablature are fed as input to the system whereby each image undergoes four main stages of processing to produce a music output in MIDI format. Algorithms both existing and self-devised were used. Each input image was first cropped to the desired region, followed by a process for removal of the string lines and detection of the numbers. Recognition of the numbers was carried out using Fourier descriptors based on 8 selected feature points. Once completed, the numbers were matched to their corresponding chords and then rearranged and played. The algorithms and methods used within the system are presented here with a justification on the selection of Fourier descriptors as the recognition tool.},
  doi = {10.1109/ICALIP.2008.4590235},
  file = {:pdfs/2008 - Optical Tablature Recognition (OTR) System_ Using Fourier Descriptors As a Recognition Tool.pdf:PDF},
  keywords = {Fourier transforms;image recognition;music;musical instruments;optical tablature recognition;Fourier descriptor;recognition tool;guitar tablature;image recognition;music;MIDI format;Pixel;Shape;Equations;Feature extraction;Mathematical model;Image edge detection;Music}
}
@inproceedings{Wel2017,
  author = {van der Wel, Eelco and Ullrich, Karen},
  booktitle = {18th International Society for Music Information Retrieval Conference},
  title = {Optical Music Recognition with Convolutional Sequence-to-Sequence Models},
  year = {2017},
  address = {Suzhou, China},
  archiveprefix = {arXiv},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  biburl = {http://dblp.org/rec/bib/journals/corr/WelU17},
  file = {:pdfs/2017 - Optical Music Recognition with Convolutional Sequence-To-Sequence Models.pdf:PDF},
  isbn = {978-981-11-5179-8},
  url = {https://archives.ismir.net/ismir2017/paper/000069.pdf}
}
@inproceedings{Wen2014,
  author = {Wen, Cuihong and Rebelo, Ana and Zhang, Jing and Cardoso, Jamie dos Santos},
  booktitle = {International Conference on Mechatronics and Control},
  title = {Classification of optical music symbols based on combined neural network},
  year = {2014},
  pages = {419--423},
  abstract = {In this paper, a new method for music symbol classification named
	Combined Neural Network (CNN) is proposed. Tests are conducted on
	more than 9000 music symbols from both real and scanned music sheets,
	which show that the proposed technique offers superior classification
	capability. At the same time, the performance of the new network
	is compared with the single Neural Network (NN) classifier using
	the same music scores. The average classification accuracy increased
	more than ten percent, reaching 98.82%.},
  doi = {10.1109/ICMC.2014.7231590},
  file = {:pdfs/2014 - Classification of Optical Music Symbols based on Combined Neural Networks.pdf:PDF},
  keywords = {image classification;music;neural nets;optical character recognition;CNN;NN classifier;classification accuracy;combined neural network;music scores;music sheets;optical music symbols classification;Accuracy;Artificial neural networks;Biological neural networks;Databases;Hidden Markov models;Integrated optics}
}
@article{Wen2015,
  author = {Wen, Cuihong and Rebelo, Ana and Zhang, Jing and Cardoso, Jamie dos Santos},
  journal = {Pattern Recognition Letters},
  title = {A new optical music recognition system based on combined neural network},
  year = {2015},
  issn = {0167-8655},
  pages = {1--7},
  volume = {58},
  abstract = {Abstract Optical music recognition (OMR) is an important tool to recognize a scanned page of music sheet automatically, which has been applied to preserving music scores. In this paper, we propose a new OMR system to recognize the music symbols without segmentation. We present a new classifier named combined neural network (CNN) that offers superior classification capability. We conduct tests on fifteen pages of music sheets, which are real and scanned images. The tests show that the proposed method constitutes an interesting contribution to OMR.},
  doi = {10.1016/j.patrec.2015.02.002},
  file = {:pdfs/2015 - A new optical music recognition system based on combined neural network.pdf:PDF},
  groups = {recognition},
  keywords = {Neural network},
  url = {http://www.sciencedirect.com/science/article/pii/S0167865515000392}
}
@article{Wen2016,
  author = {Wen, Cuihong and Zhang, Jing and Rebelo, Ana and Cheng, Fanyong},
  journal = {PLoS ONE},
  title = {A Directed Acyclic Graph-Large Margin Distribution Machine Model for Music Symbol Classification},
  year = {2016},
  number = {3},
  pages = {1--11},
  volume = {11},
  abstract = {Optical Music Recognition (OMR) has received increasing attention in recent years. In this paper, we propose a classifier based on a new method named Directed Acyclic Graph-Large margin Distribution Machine (DAG-LDM). The DAG-LDM is an improvement of the Large margin Distribution Machine (LDM), which is a binary classifier that optimizes the margin distribution by maximizing the margin mean and minimizing the margin variance simultaneously. We modify the LDM to the DAG-LDM to solve the multi-class music symbol classification problem. Tests are conducted on more than 10000 music symbol images, obtained from handwritten and printed images of music scores. The proposed method provides superior classification capability and achieves much higher classification accuracy than the state-of-the-art algorithms such as Support Vector Machines (SVMs) and Neural Networks (NNs).},
  doi = {10.1371/journal.pone.0149688},
  editor = {Mansour Ebrahimi},
  file = {:pdfs/2016 - A Directed Acyclic Graph-Large Margin Distribution Machine Model for Music Symbol Classification.PDF:PDF},
  publisher = {Public Library of Science}
}
@inproceedings{Wenzlitschke2021,
  author = {Wenzlitschke, Nils},
  booktitle = {Proceedings of the 3rd International Workshop on Reading Music Systems},
  title = {Implementation and evaluation of a neural network for the recognition of handwritten melodies},
  year = {2021},
  address = {Alicante, Spain},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander},
  pages = {38--42},
  file = {:pdfs/2021 - Implementation and Evaluation of a Neural Network for the Recognition of Handwritten Melodies.pdf:PDF},
  url = {https://sites.google.com/view/worms2021/proceedings}
}
@unpublished{Wick2019,
  author = {Wick, Christoph and Hartelt, Alexander and Puppe, Frank},
  title = {Staff, Symbol, and Melody Detection of Medieval Manuscripts Written in Square Notation Using Deep Fully Convolutional Networks},
  month = may,
  year = {2019},
  doi = {10.20944/preprints201905.0231.v1},
  url = {https://www.researchgate.net/publication/333212343_Staff_Symbol_and_Melody_Detection_of_Medieval_Manuscripts_Written_in_Square_Notation_Using_Deep_Fully_Convolutional_Networks}
}
@article{Wick2019a,
  author = {Wick, Christoph and Hartelt, Alexander and Puppe, Frank},
  journal = {Applied Sciences},
  title = {Staff, Symbol and Melody Detection of Medieval Manuscripts Written in Square Notation Using Deel Fully Convolutional Networks},
  year = {2019},
  issn = {2076-3417},
  number = {13},
  pages = {2646--2673},
  volume = {9},
  abstract = {Even today, the automatic digitisation of scanned documents in general, but especially the automatic optical music recognition (OMR) of historical manuscripts, still remains an enormous challenge, since both handwritten musical symbols and text have to be identified. This paper focuses on the Medieval so-called square notation developed in the 11th–12th century, which is already composed of staff lines, staves, clefs, accidentals, and neumes that are roughly spoken connected single notes. The aim is to develop an algorithm that captures both the neumes, and in particular its melody, which can be used to reconstruct the original writing. Our pipeline is similar to the standard OMR approach and comprises a novel staff line and symbol detection algorithm based on deep Fully Convolutional Networks (FCN), which perform pixel-based predictions for either staff lines or symbols and their respective types. Then, the staff line detection combines the extracted lines to staves and yields an F1-score of over 99% for both detecting lines and complete staves. For the music symbol detection, we choose a novel approach that skips the step to identify neumes and instead directly predicts note components (NCs) and their respective affiliation to a neume. Furthermore, the algorithm detects clefs and accidentals. Our algorithm predicts the symbol sequence of a staff with a diplomatic symbol accuracy rate (dSAR) of about 87%, which includes symbol type and location. If only the NCs without their respective connection to a neume, all clefs and accidentals are of interest, the algorithm reaches an harmonic symbol accuracy rate (hSAR) of approximately 90%. In general, the algorithm recognises a symbol in the manuscript with an F1-score of over 96%.},
  doi = {10.3390/app9132646},
  file = {:pdfs/2019 - Staff, Symbol and Melody Detection of Medieval Manuscripts Written in Square Notation Using Deel Fully Convolutional Networks.pdf:PDF},
  keywords = {optical music recognition; historical document analysis; medieval manuscripts; neume notation; fully convolutional neural networks},
  url = {https://www.mdpi.com/2076-3417/9/13/2646}
}
@inproceedings{Wick2019b,
  author = {Wick, Christoph and Puppe, Frank},
  booktitle = {2nd International Workshop on Reading Music Systems},
  title = {OMMR4all --- a Semiautomatic Online Editor for Medieval Music Notations},
  year = {2019},
  address = {Delft, The Netherlands},
  editor = {Jorge Calvo-Zaragoza and Alexander Pacha},
  pages = {31--34},
  file = {:pdfs/2019 - OMMR4all a Semiautomatic Online Editor for Medieval Music Notations.pdf:PDF},
  url = {https://sites.google.com/view/worms2019/proceedings}
}
@techreport{Wick2020,
  author = {Wick, Christoph and Puppe, Frank},
  institution = {University of Würzburg},
  title = {Automatic Neume Transcription of Medieval Music Manuscripts using CNN/LSTM-Networks and the segmentation-free CTC-Algorithm},
  year = {2020},
  doi = {10.20944/preprints202001.0149.v1},
  file = {:pdfs/2020 - Automatic Neume Transcription of Medieval Music Manuscripts Using CNN_LSTM Networks and the Segmentation Free CTC Algorithm.pdf:PDF}
}
@inproceedings{Wijaya1999,
  author = {Wijaya, K. and Bainbridge, David},
  booktitle = {7th International Conference on Image Processing and its Applications},
  title = {Staff line restoration},
  year = {1999},
  pages = {760--764},
  publisher = {Institution of Engineering and Technology},
  abstract = {Optical music recognition (OMR), the conversion of scanned pages of music into a musical database, has reached an exciting level of maturity. Like optical character recognition, it has now reached the point where the returns in accuracy from increasingly sophisticated pattern recognition algorithms appears saturated and more significant gains are being made from the application of structured a priori knowledge. This paper describes one such technique for improved staff line processing-the detection and subsequent correction of bowing in the staff lines, which is an important category given the significant source of music in book form. Two versions of the algorithm are tested: the first, based on mathematical morphology, has the added benefit of automatically fusing small breaks in staff lines, common for example in older works; the second, based on a flood-fill algorithm, requires a minor modification if fragmented staff lines are to be repaired. The correct detection and processing of staff lines is fundamental to OMR. Without adequate knowledge of staff line location, notation superimposed on the staves cannot be correctly separated, classified and processed.},
  affiliation = {Waikato Univ., Hamilton},
  doi = {10.1049/cp:19990426},
  file = {:pdfs/1999 - Staff Line Restoration.pdf:PDF},
  keywords = {music;flood-fill algorithm;machine readable notation;staff line location;structured a priori knowledge;optical character recognition;bowing correction;staff line processing;scanned pages conversion;bowing detection;fragmented staff lines repair;musical database;pattern recognition algorithms;staff line restoration;optical music recognition;mathematical morphology;}
}
@misc{Witt2013,
  author = {Witt, Carl},
  title = {Optical Music Recognition Symbol Detection using Contour Traces},
  year = {2013},
  abstract = {A novel approach to symbol detection in optical music recognition is presented.
The binarized image of a scanned score is transformed into an intermediate representation by computing its contours and assigning additional visual features to them.
The resulting contour points are accessed via a high dimensional spatial index that
aids a heuristic search to detect a given symbol as described by a template image.
An automatic and a manual method for generating ground truth data are presented,
amongst other web-based tools to evaluate and supervise the recognition process.},
  file = {:pdfs/2013 - Optical Music Recognition Symbol Detection Using Contour Traces.pdf:PDF},
  institution = {Institut für Informatik},
  school = {Freie Universität Berlin},
  type = {Bachelor Thesis}
}
@inproceedings{Wolman1992,
  author = {Wolman, Amnon and Choi, James and Asgharzadeh, Shahab and Kahana, Jason},
  booktitle = {International Computer Music Conference},
  title = {Recognition of Handwritten Music Notation},
  year = {1992},
  file = {:pdfs/1992 - Recognition of Handwritten Music Notation.pdf:PDF}
}
@inproceedings{Wu2016,
  author = {Wu, Fu-Hai Frank},
  booktitle = {International Symposium on Multimedia},
  title = {An Evaluation Framework of Optical Music Recognition in Numbered Music Notation},
  year = {2016},
  pages = {626--631},
  abstract = {In this study, we refine the ecosystem for optical music recognition
	(OMR) of numbered music notation with better accuracy. The ecosystem
	includes users, OMR system, dataset of music scores, groundtruth
	building, symbolic representation of sheet music, checking by musicological
	rules and performance evaluation. Especially, the evaluation metric
	includes exact and approximate approach to count accuracy automatically.
	The hands-on dataset comprises of 110 music score manuscripts in
	a songbook for singing reference. The experimental results justify
	the value of evaluation framework and show the necessity of checks
	complying with musicological properties.},
  doi = {10.1109/ISM.2016.0134},
  file = {:pdfs/2016 - An evaluation Framework of Optical Music Recognition in Numbered Music Notation.pdf:PDF},
  keywords = {acoustic signal processing;music;OMR;groundtruth building;music score dataset;music score manuscripts;musicological rules;numbered music notation;optical music recognition;sheet music symbolic representation;singing reference;Adaptive optics;Bars;Music;Optical imaging;Performance evaluation;Semantics;Optical music recognition;dataset;evaluation;groundtruth;numbered music notation}
}
@incollection{Wu2017,
  author = {Wu, Fu-Hai Frank},
  booktitle = {International Journal of Multimedia Data Engineering and Management},
  publisher = {IGI Global},
  title = {Applying Machine Learning in Optical Music Recognition of Numbered Music Notation},
  year = {2017},
  pages = {21},
  abstract = {Although research of optical music recognition (OMR) has existed for
	few decades, most of efforts were put in step of image processing
	to approach upmost accuracy and evaluations were not in common ground.
	And major music notations explored were the conventional western
	music notations with staff. On contrary, the authors explore the
	challenges of numbered music notation, which is popular in Asia and
	used in daily life for sight reading. The authors use different way
	to improve recognition accuracy by applying elementary image processing
	with rough tuning and supplementing with methods of machine learning.
	The major contributions of this work are the architecture of machine
	learning specified for this task, the dataset, and the evaluation
	metrics, which indicate the performance of OMR system, provide objective
	function for machine learning and highlight the challenges of the
	scores of music with the specified notation.},
  doi = {10.4018/IJMDEM.2017070102}
}
@article{Xiao2019,
  author = {Xiao, Zhe and Chen, Xin and Zhou, Li},
  journal = {Journal of Advanced Computational Intelligence and Intelligent Informatics},
  title = {Real-Time Optical Music Recognition System for Dulcimer Musical Robot},
  year = {2019},
  number = {4},
  pages = {782--790},
  volume = {23},
  abstract = {Traditional optical music recognition (OMR) is an important technology that automatically recognizes scanned paper music sheets. In this study, traditional OMR is combined with robotics, and a real-time OMR system for a dulcimer musical robot is proposed. This system gives the musical robot a stronger ability to perceive and understand music. The proposed OMR system can read music scores, and the recognized information is converted into a standard electronic music file for the dulcimer musical robot, thus achieving real-time performance. During the recognition steps, we treat note groups and isolated notes separately. Specially structured note groups are identified by primitive decomposition and structural analysis. The note groups are decomposed into three fundamental elements: note stem, note head, and note beams. Isolated music symbols are recognized based on shape model descriptors. We conduct tests on real pictures taken live by a camera. The tests show that the proposed method has a higher recognition rate.},
  doi = {10.20965/jaciii.2019.p0782},
  file = {:pdfs/2019 - Real Time Optical Music Recognition System for Dulcimer Musical Robot.pdf:PDF}
}
@article{Yadid-Pecht1996,
  author = {Yadid-Pecht, Orly and Gerner, Moty and Dvir, Lior and Brutman, Eliyahu and Shimony, Uri},
  journal = {Machine Vision and Applications},
  title = {Recognition of handwritten musical notes by a modified Neocognitron},
  year = {1996},
  issn = {1432-1769},
  number = {2},
  pages = {65--72},
  volume = {9},
  abstract = {A neural network for recognition of handwritten musical notes, based on the well-known Neocognitron model, is described. The Neocognitron has been used for the ``what'' pathway (symbol recognition), while contextual knowledge has been applied for the ``where'' (symbol placement). This way, we benefit from dividing the process for dealing with this complicated recognition task. Also, different degrees of intrusiveness in ``learning'' have been incorporated in the same network: More intrusive supervised learning has been implemented in the lower neuron layers and less intrusive in the upper one. This way, the network adapts itself to the handwriting of the user. The network consists of a 13x49 input layer and three pairs of ``simple'' and ``complex'' neuron layers. It has been trained to recognize 20 symbols of unconnected notes on a musical staff and was tested with a set of unlearned input notes. Its recognition rate for the individual unseen notes was up to 93{\%}, averaging 80{\%} for all categories. These preliminary results indicate that a modified Neocognitron could be a good candidate for identification of handwritten musical notes.},
  doi = {10.1007/BF01214361},
  url = {https://doi.org/10.1007/BF01214361}
}
@inproceedings{Yin2018,
  author = {Yin, Yu and Huang, Zhenya and Chen, Enhong and Liu, Qi and Zhang, Fuzheng and Xie, Xing and Hu, Guoping},
  booktitle = {24th International Conference on Knowledge Discovery \& Data Mining},
  title = {Transcribing Content from Structural Images with Spotlight Mechanism},
  year = {2018},
  address = {London, United Kingdom},
  pages = {2643--2652},
  publisher = {ACM},
  acmid = {3219962},
  doi = {10.1145/3219819.3219962},
  file = {:pdfs/2018 - Transcribing Content from Structural Images with Spotlight Mechanism.pdf:PDF},
  isbn = {978-1-4503-5552-0},
  keywords = {reinforcement learning, spotlight transcribing network, structural image},
  url = {http://doi.acm.org/10.1145/3219819.3219962}
}
@inproceedings{Yin-xian2012,
  author = {Yin-xian, Yang and Ding-li, Yang},
  booktitle = {4th International Conference on Computer Modeling and Simulation},
  title = {Staff Line Removal Algorithm Based on Trajectory Tracking and Topological Structure of Score},
  year = {2012},
  abstract = {Staff line removal plays a vital role in {OMR} technology, and is the preconditions of succeeding
segmentation & recognition of music sheets. For the phenomena of over-deletion or mistaken deletion and 
under-deletion which often appear in removal process of staff lines, a novel staff line removal algorithm 
based on tra1jectory tracking and topological structure of music symbols is put forward to solve the deletion 
faults of partial notions, Experimental results show the presented algorithms can remove staff lines fast and 
effectively.},
  file = {:pdfs/2012 - Staff Line Removal Algorithm Based on Trajectory tracking and Topological Structure of Score.pdf:PDF},
  keywords = {OMR, Staff Line Removal, Subsection Projection, Correlation Computation}
}
@inproceedings{Yoda1995,
  author = {Yoda, Ikushi and Yamamoto, Kazuhiko and Yamada, Hiromitsu},
  booktitle = {Document Analysis Systems},
  title = {Automatic Construction of Recognition Procedures for Musical Notes by Genetic Algorithm},
  year = {1995},
  editor = {A. Lawrence Spitz and Andreas Dengel},
  abstract = {The Table of Contents for the full book PDF is as follows: System Architecture Data Structures for Page Readers Palace: A Multilingual Document Recognition System Experiences with High-Volume, High Accuracy Document Capture OfficeMAID - A System for Office Mail Analysis, Interpretation and Delivery Programmable Contextual Analysis A System for Exploiting Context in Automatic Recognition An Adaptive Approach to Document Classification and Understanding Class Evaluation Document Image Analysis: Automated Performance Evaluation Using Consensus Sequence Voting to Correct OCR Errors A Handwritten Character Recognition System by Efficient Combination of Multiple Classifiers A Region-Based System for the Automatic Evaluation of Page Segmentation Algorithms Integration of Contextual Knowledge Sources into a Blackboard-Based Text Recognition System Automatic Construction of Recognition Procedures for Musical Notes by Genetic Algorithm Recognition of Handwritten Responses on US Census Forms A System for the Recognition of Handwritten Literal Amounts of Checks Handwritten Text Recognition Line Drawing Knowledge Organization and Interpretation Process in Engineering Drawing Interpretation Processing Imprecise and Structural Distorted Line Drawings by An Adaptable Drawing Interpretation Kernal Vector-Based Arc Segmentation in the Machine Drawing Understanding System Environment Robust Drawing Recognition Based on Model-Guided Segmentation Innovations Document Image Matching and Retrieval with Multiple Distortion-Invariant Descriptors Off-Line Interpretation and Execution of Corrections on Text Documents Analysis of Scanned Braille Documents Document Analysis by Fractal Signatures Working Groups Possibilities for International Collaboration Document Analysis and Learning Needs of the Market and User Requirements Evaluation-Criteria Handwriting Line Drawing and Music Recognition Multilingual Documents and Natural Language Processing Form Recognition},
  doi = {10.1142/9789812797933},
  file = {:pdfs/1995 - Automatic Construction of Recognition Procedures for Musical Notes by Genetic Algorithm.pdf:PDF}
}
@inproceedings{Yoo2008,
  author = {Yoo, JaeMyeong and Toan, Nguyen Dinh and Choi, DeokJai and Park, HyukRo and Lee, Gueesang},
  booktitle = {8th International Conference on Computer and Information Technology Workshops},
  title = {Advanced Binarization Method for Music Score Recognition Using Local Thresholds},
  year = {2008},
  pages = {417--420},
  abstract = {Application technology of mobile phone has been developing for the delivery of various contents over a simple voice channel. Music score recognition is one of such application services provided by mobile phone manufacturers which transform a music score taken by the phone camera into a midi file. For the successful recognition of the music score, the input image should be properly binarized to be fed into the recognition process. In this paper, Adaptive binary algorithm is proposed which exploits local thresholds with several levels to deal with illumination changes over the entire image. Experimental results shown advanced performance of music score recognition.},
  doi = {10.1109/CIT.2008.Workshops.101},
  file = {:pdfs/2008 - Advanced Binarization Method for Music Score Recognition Using Local Thresholds.pdf:PDF},
  keywords = {mobile computing;music;music score recognition;mobile phone;adaptive binary algorithm;local threshold;Histograms;Mobile handsets;Brightness;Image recognition;Application software;Frequency;Information technology;Conferences;Speech recognition;Computer science;Binarization;music score recognition;local threshold}
}
@inproceedings{Zalkow2019,
  author = {Zalkow, Frank and Corrales, Angel Villar and Tsai, TJ and Arifi-M{\"{u}}ller, Vlora and M{\"{u}}ller, Meinard},
  booktitle = {Late Breaking/Demo at 20th International Society for Music Information Retrieval},
  title = {Tools For Semi-Automatic Bounding Box Annotation Of Musical Measures In Sheet Music},
  year = {2019},
  address = {Delft, The Netherlands}
}
@mastersthesis{Zhang2017a,
  author = {Zhang, Emily H.},
  school = {Massachusetts Institute of Technology},
  title = {An Efficient Score Alignment Algorithm and its Applications},
  year = {2017},
  abstract = {String alignment and comparison in Computer Science is a well-explored space with
classic problems such as Longest Common Subsequence that have practical application
in bioinformatic genomic sequencing and data comparison in revision control systems.
In the field of musicology, score alignment and comparison is a problem with many
similarities to string comparison and alignment but also vast differences. In particular
we can use ideas in string alignment and comparison to compare a music score in the
MIDI format with a music score generated from Optical Musical Recognition (OMR),
both of which have incomplete or wrong information, and correct errors that were
introduced in the OMR process to create an improved third score. This thesis creates
a set of algorithms that align and compare MIDI and OMR music scores to produce a
corrected version of the OMR score that borrows ideas from classic computer science
string comparison and alignment algorithm but also incorporates optimizations and
heuristics from music theory.},
  file = {:pdfs/2017 - An Efficient Score Alignment Algorithm and its Applications.pdf:PDF},
  url = {http://hdl.handle.net/1721.1/113457}
}
@inproceedings{Zhang2023,
  author = {Zhang, Zihui and Shatri, Elona and Fazekas, Gy\"{o}rgy},
  booktitle = {Proceedings of the 5th International Workshop on Reading Music Systems},
  title = {Improving Sheet Music Recognition using Data Augmentation and Image Enhancement},
  year = {2023},
  address = {Milan, Italy},
  editor = {Calvo-Zaragoza, Jorge and Pacha, Alexander and Shatri, Elona},
  pages = {31--33},
  doi = {10.48550/arXiv.2311.04091},
  file = {:pdfs/2023 - Improving Sheet Music Recognition Using Data Augmentation and Image Enhancement.pdf:PDF},
  url = {https://sites.google.com/view/worms2023/proceedings}
}
@comment{{jabref-meta: databaseType:bibtex;}}
@comment{{jabref-meta: saveActions:enabled;
all-text-fields[identity]
author[unicode_to_latex]
booktitle[unicode_to_latex]
date[normalize_date]
editor[unicode_to_latex]
journal[unicode_to_latex]
pages[normalize_page_numbers]
title[html_to_latex,unicode_to_latex]
;}}
@comment{{jabref-meta: saveOrderConfig:specified;citationkey;false;}}

This file was generated by bibtex2html 1.96.