Skip to content

Commit ffa02eb

Browse files
author
faradox
committed
update
1 parent 5d18e21 commit ffa02eb

File tree

4 files changed

+113
-96
lines changed

4 files changed

+113
-96
lines changed

README.md

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,13 @@
11
# Polymath
22

3-
Polymath uses machine learning to convert any music library (*e.g from Hard-Drive or YouTube*) into a music production sample-library. The tool automatically separates tracks into stems (*beats, bass, etc.*), quantizes them to the same tempo and beat-grid (*e.g. 120bpm*), analyzes musical structure (*e.g. verse, chorus, etc.*), key (*e.g C4, E3, etc.*) and other infos (*timbre, loudness, etc.*), and converts audio to midi. The result is a searchable sample library that streamlines the workflow for music producers, DJs, and ML audio developers.
3+
Polymath uses machine learning to convert any music library (*e.g from Hard-Drive or YouTube*) into a music production sample-library. The tool automatically separates tracks into stems (_drums, bass, etc._), quantizes them to the same tempo and beat-grid (*e.g. 120bpm*), analyzes tempo, key (_e.g C4, E3, etc._) and other infos (*timbre, loudness, etc.*) and cuts loop out of them. The result is a searchable sample library that streamlines the workflow for music producers, DJs, and ML audio developers.
44

55
Try it in colab:
66
<a target="_blank" href="https://colab.research.google.com/drive/1TjRVFdh1BPdQ_5_PL5EsfS278-EUYt90?usp=sharing">
77
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
88
</a>
99

10-
11-
<p align="center"><img alt="Polymath" src="https://samim.io/static/upload/illustration3.688a510b-bocuz8wh.png" /></p>
10+
![Polymath](docs/images/polymath.png)
1211

1312
## Use-cases
1413

docs/images/polymath.png

881 KB
Loading

polymath.py

Lines changed: 105 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,8 @@
1212
import string
1313
import tempfile
1414

15-
from nendo import Nendo, NendoConfig
16-
from pathlib import Path
15+
import librosa
16+
from nendo import Nendo, NendoConfig, NendoTrack
1717
from yt_dlp import YoutubeDL
1818

1919

@@ -69,22 +69,11 @@ def input_tracks(nendo, input_string):
6969
input_items = input_string.split(",")
7070
for item in input_items:
7171
if os.path.isdir(item):
72-
imported_tracks = nendo.add_tracks(path=item)
73-
for imported_track in imported_tracks.tracks():
74-
original_filename = imported_track.resource.meta["original_filename"]
75-
imported_track.resource.meta.update(
76-
{"original_info": Path(original_filename).stem},
77-
)
78-
imported_track.save()
72+
nendo.add_tracks(path=item)
7973
elif os.path.isfile(item):
80-
imported_track = nendo.add_track(file_path=item)
81-
original_filename = imported_track.resource.meta["original_filename"]
82-
imported_track.resource.meta.update(
83-
{"original_info": Path(original_filename).stem},
84-
)
85-
imported_track.save()
74+
nendo.add_track(file_path=item)
8675
else:
87-
imported_track = import_youtube(nendo, item)
76+
import_youtube(nendo, item)
8877
print(f"Added {item}")
8978

9079
def process_tracks(
@@ -112,93 +101,123 @@ def process_tracks(
112101
beats_per_loop (int): Beats per loop to extract.
113102
"""
114103
n = 0
115-
processed_tracks = []
116-
analysis_data = []
117104
for track in tracks:
118-
original_info = track.resource.meta["original_info"]
119-
print(f"Processing track {1}/{len(tracks)}: {original_info}")
120-
if (analyze is True and
105+
original_title = track.meta["title"]
106+
print(f"Processing track {1}/{len(tracks)}: {original_title}")
107+
duration = round(librosa.get_duration(y=track.signal, sr=track.sr), 3)
108+
if (analyze is True and (
121109
len(
122110
track.get_plugin_data(
123111
plugin_name="nendo_plugin_classify_core",
124112
),
125-
) == 0
113+
) == 0 or nendo.config.replace_plugin_data is True)
126114
):
127115
print("Analyzing...")
128116
track.process("nendo_plugin_classify_core")
129-
analysis_data = track.get_plugin_data(
130-
plugin_name="nendo_plugin_classify_core",
131-
)
117+
# analysis_data = track.get_plugin_data(
118+
# plugin_name="nendo_plugin_classify_core",
119+
# )
120+
stems = track
132121
if (stemify is True and
133122
track.track_type != "stem" and
134123
"has_stems" not in track.resource.meta):
135124
print("Stemifying...")
136125
stems = track.process("nendo_plugin_stemify_demucs")
137-
track.resource.meta.update({"has_stems": True })
138-
track.save()
139-
for t in stems.tracks():
140-
print(t.track_type)
141-
processed_tracks = processed_tracks + stems.tracks()
126+
track.set_meta({"has_stems": True })
127+
for stem in stems:
128+
stem_type = stem.get_meta("stem_type")
129+
stem.meta = dict(track.meta)
130+
stem.set_meta(
131+
{
132+
"title": f"{original_title} - {stem_type} stem",
133+
"stem_type": stem_type,
134+
"duration": duration,
135+
},
136+
)
137+
quantized = stems
142138
if quantize is True:
143139
print("Quantizing...")
144-
quantized_tracks = []
145-
for pt in processed_tracks:
146-
# don't re-quantize tracks
147-
if pt.track_type != "quantized":
148-
quantized_track = pt.process(
149-
"nendo_plugin_quantize_core",
150-
bpm=quantize_to_bpm,
140+
quantized = stems.process(
141+
"nendo_plugin_quantize_core",
142+
bpm=quantize_to_bpm,
143+
)
144+
if type(quantized) == NendoTrack: # is a single track
145+
if not quantized.has_related_track(track_id=track.id, direction="from"):
146+
quantized.relate_to_track(
147+
track_id=track.id,
148+
relationship_type="quantized",
151149
)
152-
if pt.has_meta("stem_type"):
153-
quantized_track.set_meta(
154-
{"stem_type": pt.get_meta("stem_type")},
150+
quantized.meta = dict(track.meta)
151+
duration = round(librosa.get_duration(y=quantized.signal, sr=quantized.sr), 3)
152+
quantized.set_meta(
153+
{
154+
"title": f"{original_title} - ({quantize_to_bpm} bpm)",
155+
"duration": duration,
156+
},
157+
)
158+
else: # is a collection
159+
for j, qt in enumerate(quantized):
160+
if not qt.has_related_track(track_id=track.id, direction="from"):
161+
qt.relate_to_track(
162+
track_id=track.id,
163+
relationship_type="quantized",
155164
)
156-
quantized_tracks.append(quantized_track)
157-
processed_tracks = processed_tracks + quantized_tracks
165+
qt.meta = dict(track.meta)
166+
duration = round(librosa.get_duration(y=qt.signal, sr=qt.sr), 3)
167+
if stems[j].track_type == "stem":
168+
qt.set_meta(
169+
{
170+
"title": (
171+
f"{original_title} - "
172+
f"{stems[j].meta['stem_type']} "
173+
f"({quantize_to_bpm} bpm)"
174+
),
175+
"stem_type": stems[j].meta["stem_type"],
176+
"duration": duration,
177+
},
178+
)
179+
else:
180+
qt.set_meta(
181+
{
182+
"title": f"{original_title} ({quantize_to_bpm} bpm)",
183+
"duration": duration,
184+
},
185+
)
186+
loopified = quantized
158187
if loopify is True:
159188
print("Loopifying...")
160-
loopified_tracks = []
161-
for pt in processed_tracks:
162-
# don't loop loops again
163-
if track.track_type != "loop":
164-
loops_col = pt.process(
165-
"nendo_plugin_loopify",
166-
n_loops=n_loops,
167-
beats_per_loop=beats_per_loop,
168-
)
169-
for loop in loops_col.tracks():
170-
if pt.has_meta("stem_type"):
171-
loop.set_meta({"stem_type": pt.get_meta("stem_type")})
172-
quantized_bpm = pt.get_plugin_data(
173-
plugin_name="nendo_plugin_quantize_core",
174-
key="tempo",
189+
loopified = []
190+
if type(quantized) == NendoTrack:
191+
quantized = [quantized]
192+
for qt in quantized:
193+
qt_loops = qt.process(
194+
"nendo_plugin_loopify",
195+
n_loops=n_loops,
196+
beats_per_loop=beats_per_loop,
197+
)
198+
loopified += qt_loops
199+
num_loop = 1
200+
for lp in qt_loops:
201+
if not lp.has_related_track(track_id=track.id, direction="from"):
202+
lp.relate_to_track(
203+
track_id=track.id,
204+
relationship_type="loop",
175205
)
176-
if len(quantized_bpm) == 0:
177-
loop.add_plugin_data(
178-
plugin_name="nendo_plugin_quantize_core",
179-
key="tempo",
180-
value=quantize_to_bpm,
181-
)
182-
loopified_tracks.append(loop)
183-
processed_tracks = processed_tracks + loopified_tracks
184-
# propagate information down derivative tracks
185-
for processed_track in processed_tracks:
186-
processed_track.resource.meta.update({"original_info": original_info})
187-
processed_track.save()
188-
# transfer plugin data to derived tracks
189-
quantized_bpm = processed_track.get_plugin_data(
190-
plugin_name="nendo_plugin_quantize_core",
191-
key="tempo",
192-
)
193-
# only transfer plugin data if track has not been quantized
194-
if len(quantized_bpm) == 0:
195-
for pd in analysis_data:
196-
processed_track.add_plugin_data(
197-
plugin_name=pd.plugin_name,
198-
plugin_version=pd.plugin_version,
199-
key=pd.key,
200-
value=pd.value,
206+
stem_type = qt.meta["stem_type"] if qt.has_meta("stem_type") else ""
207+
qt_info = (
208+
f" ({quantize_to_bpm} bpm)"
209+
if qt.track_type == "quantized"
210+
else ""
211+
)
212+
lp.meta = dict(track.meta)
213+
duration = round(librosa.get_duration(y=lp.signal, sr=lp.sr), 3)
214+
lp.set_meta(
215+
{
216+
"title": f"{original_title} - {stem_type} loop {num_loop} {qt_info}",
217+
"duration": duration,
218+
},
201219
)
220+
num_loop += 1
202221
n = n+1
203222
print(f"Track {n}/{len(tracks)} Done.\n")
204223
print("Processing completed. "
@@ -417,14 +436,13 @@ def main(): # noqa: D103
417436
# apply search
418437
tracks = []
419438
if args.find is None:
420-
tracks = nendo.get_tracks()
439+
tracks = nendo.filter_tracks(track_type="track")
421440
else:
422441
for search_value in args.find.split(","):
423-
tracks = tracks + nendo.find_tracks(
424-
value=search_value,
442+
tracks = tracks + nendo.filter_tracks(
443+
search_meta={"": search_value},
444+
track_type="track",
425445
)
426-
# only start with "original tracks", not with stems and the likes
427-
tracks = [t for t in tracks if t.track_type == "track"]
428446

429447
if args.process:
430448
process_tracks(

requirements.txt

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
nendo>=0.1.2
2-
nendo_plugin_classify_core>=0.1.1
3-
nendo_plugin_quantize_core>=0.1.2
4-
nendo_plugin_stemify_demucs>=0.1.0
5-
nendo_plugin_loopify>=0.1.1
1+
nendo==0.2.0
2+
nendo_plugin_classify_core==0.2.6
3+
nendo_plugin_quantize_core==0.2.6
4+
nendo_plugin_stemify_demucs==0.1.2
5+
nendo_plugin_loopify==0.1.6
66
tensorflow
77
git+https://github.com/CPJKU/madmom.git@0551aa8
8-
yt_dlp>=2023.11.16
8+
yt_dlp>=2023.12.30

0 commit comments

Comments
 (0)