@conference {5062, title = {What can we learn from massive music archives?}, booktitle = {Dagstuhl Seminar 13451: Computational Audio Analysis}, year = {2013}, month = {04/11/2013}, address = {Wadern, Germany}, abstract = {As a scientific community we are slowly but steadily progressing towards the availability of massive amounts of music and music-related data for research purposes. The Million Song Dataset, Peachnote, the Yahoo! Music Dataset, the Last.fm API, Musicbrainz, or Wikipedia are just but some examples. Certainly, such big data availability will shift the perspective in which we approach many (if not all) of the traditional music information retrieval tasks. From genre or mood classification to audio or cover song identification, practically all tasks will experiment a change of paradigm that will frame them under more realistic, large-scale scenarios. However, I am perhaps more interested in the new avenues for research that are awaiting for us. In particular, I am excited about the knowledge that we can distill from such massive amounts of data. Not only knowledge about music itself (rules, patterns, anti-patterns, and their evolution), but also knowledge about ourselves, as music listeners, users, or creators. Music is an extremely powerful means of communication that shapes our brain in intricate ways, unique to mankind, and transversal to all societies. Thus, we would expect to gain relevant knowledge from mining massive amounts of music archives. But what can we learn, exactly?}, author = {Joan Serr{\`a}}, editor = {M. M{\"u}ller, S. Narayanan, and B. Schuller} }