Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
MorsGames
GitHub Repository: MorsGames/sm64plus
Path: blob/master/tools/assemble_sound.py
7854 views
1
#!/usr/bin/env python3
2
from collections import namedtuple, OrderedDict
3
from json import JSONDecoder
4
import os
5
import re
6
import struct
7
import subprocess
8
import sys
9
10
TYPE_CTL = 1
11
TYPE_TBL = 2
12
TYPE_SEQ = 3
13
14
STACK_TRACES = False
15
DUMP_INDIVIDUAL_BINS = False
16
ENDIAN_MARKER = ">"
17
WORD_BYTES = 4
18
19
orderedJsonDecoder = JSONDecoder(object_pairs_hook=OrderedDict)
20
21
22
class Aifc:
23
def __init__(self, name, fname, data, sample_rate, book, loop):
24
self.name = name
25
self.fname = fname
26
self.data = data
27
self.sample_rate = sample_rate
28
self.book = book
29
self.loop = loop
30
self.used = False
31
self.offset = None
32
33
34
class SampleBank:
35
def __init__(self, name, entries):
36
self.name = name
37
self.uses = []
38
self.index = None
39
self.entries = entries
40
self.name_to_entry = {}
41
for e in entries:
42
self.name_to_entry[e.name] = e
43
44
45
Book = namedtuple("Book", ["order", "npredictors", "table"])
46
Loop = namedtuple("Loop", ["start", "end", "count", "state"])
47
Bank = namedtuple("Bank", ["name", "sample_bank", "json"])
48
49
50
def align(val, al):
51
return (val + (al - 1)) & -al
52
53
54
def fail(msg):
55
print(msg, file=sys.stderr)
56
if STACK_TRACES:
57
raise Exception("re-raising exception")
58
sys.exit(1)
59
60
61
def validate(cond, msg, forstr=""):
62
if not cond:
63
if forstr:
64
msg += " for " + forstr
65
raise Exception(msg)
66
67
68
def strip_comments(string):
69
string = re.sub(re.compile("/\*.*?\*/", re.DOTALL), "", string)
70
return re.sub(re.compile("//.*?\n"), "", string)
71
72
73
def pack(fmt, *args):
74
if WORD_BYTES == 4:
75
fmt = fmt.replace("P", "I").replace("X", "")
76
else:
77
fmt = fmt.replace("P", "Q").replace("X", "xxxx")
78
return struct.pack(ENDIAN_MARKER + fmt, *args)
79
80
81
def to_bcd(num):
82
assert num >= 0
83
shift = 0
84
ret = 0
85
while num:
86
ret |= (num % 10) << shift
87
shift += 4
88
num //= 10
89
return ret
90
91
92
def parse_f80(data):
93
exp_bits, mantissa_bits = struct.unpack(">HQ", data)
94
sign_bit = exp_bits & 2 ** 15
95
exp_bits ^= sign_bit
96
sign = -1 if sign_bit else 1
97
if exp_bits == mantissa_bits == 0:
98
return sign * 0.0
99
validate(exp_bits != 0, "sample rate is a denormal")
100
validate(exp_bits != 0x7FFF, "sample rate is infinity/nan")
101
mant = float(mantissa_bits) / 2 ** 63
102
return sign * mant * pow(2, exp_bits - 0x3FFF)
103
104
105
def parse_aifc_loop(data):
106
validate(len(data) == 48, "loop chunk size should be 48")
107
version, nloops, start, end, count = struct.unpack(">HHIIi", data[:16])
108
validate(version == 1, "loop version doesn't match")
109
validate(nloops == 1, "only one loop is supported")
110
state = []
111
for i in range(16, len(data), 2):
112
state.append(struct.unpack(">h", data[i : i + 2])[0])
113
return Loop(start, end, count, state)
114
115
116
def parse_aifc_book(data):
117
version, order, npredictors = struct.unpack(">hhh", data[:6])
118
validate(version == 1, "codebook version doesn't match")
119
validate(
120
len(data) == 6 + 16 * order * npredictors,
121
"predictor book chunk size doesn't match",
122
)
123
table = []
124
for i in range(6, len(data), 2):
125
table.append(struct.unpack(">h", data[i : i + 2])[0])
126
return Book(order, npredictors, table)
127
128
129
def parse_aifc(data, name, fname):
130
validate(data[:4] == b"FORM", "must start with FORM")
131
validate(data[8:12] == b"AIFC", "format must be AIFC")
132
i = 12
133
sections = []
134
while i < len(data):
135
tp = data[i : i + 4]
136
(le,) = struct.unpack(">I", data[i + 4 : i + 8])
137
i += 8
138
sections.append((tp, data[i : i + le]))
139
i = align(i + le, 2)
140
141
audio_data = None
142
vadpcm_codes = None
143
vadpcm_loops = None
144
sample_rate = None
145
146
for (tp, data) in sections:
147
if tp == b"APPL" and data[:4] == b"stoc":
148
plen = data[4]
149
tp = data[5 : 5 + plen]
150
data = data[align(5 + plen, 2) :]
151
if tp == b"VADPCMCODES":
152
vadpcm_codes = data
153
elif tp == b"VADPCMLOOPS":
154
vadpcm_loops = data
155
elif tp == b"SSND":
156
audio_data = data[8:]
157
elif tp == b"COMM":
158
sample_rate = parse_f80(data[8:18])
159
160
validate(sample_rate is not None, "no COMM section")
161
validate(audio_data is not None, "no SSND section")
162
validate(vadpcm_codes is not None, "no VADPCM table")
163
164
book = parse_aifc_book(vadpcm_codes)
165
loop = parse_aifc_loop(vadpcm_loops) if vadpcm_loops is not None else None
166
return Aifc(name, fname, audio_data, sample_rate, book, loop)
167
168
169
class ReserveSerializer:
170
def __init__(self):
171
self.parts = []
172
self.sizes = []
173
self.size = 0
174
175
def add(self, part):
176
assert isinstance(part, (bytes, list))
177
self.parts.append(part)
178
self.sizes.append(len(part))
179
self.size += len(part)
180
181
def reserve(self, space):
182
li = []
183
self.parts.append(li)
184
self.sizes.append(space)
185
self.size += space
186
return li
187
188
def align(self, alignment):
189
new_size = (self.size + alignment - 1) & -alignment
190
self.add((new_size - self.size) * b"\0")
191
192
def finish(self):
193
flat_parts = []
194
for (li, si) in zip(self.parts, self.sizes):
195
if isinstance(li, list):
196
li = b"".join(li)
197
assert (
198
len(li) == si
199
), "unfulfilled reservation of size {}, only got {}".format(si, len(li))
200
flat_parts.append(li)
201
return b"".join(flat_parts)
202
203
204
class GarbageSerializer:
205
def __init__(self):
206
self.garbage_bufs = [[]]
207
self.parts = []
208
self.size = 0
209
self.garbage_pos = 0
210
211
def reset_garbage_pos(self):
212
self.garbage_bufs.append([])
213
self.garbage_pos = 0
214
215
def add(self, part):
216
assert isinstance(part, bytes)
217
self.parts.append(part)
218
self.garbage_bufs[-1].append((self.garbage_pos, part))
219
self.size += len(part)
220
self.garbage_pos += len(part)
221
222
def align(self, alignment):
223
new_size = (self.size + alignment - 1) & -alignment
224
self.add((new_size - self.size) * b"\0")
225
226
def garbage_at(self, pos):
227
# Find the last write to position pos & 0xffff, assuming a cyclic
228
# buffer of size 0x10000 where the write position is reset to 0 on
229
# each call to reset_garbage_pos.
230
pos &= 0xFFFF
231
for bufs in self.garbage_bufs[::-1]:
232
for (bpos, buf) in bufs[::-1]:
233
q = ((bpos + len(buf) - 1 - pos) & ~0xFFFF) + pos
234
if q >= bpos:
235
return buf[q - bpos]
236
return 0
237
238
def align_garbage(self, alignment):
239
while self.size % alignment != 0:
240
self.add(bytes([self.garbage_at(self.garbage_pos)]))
241
242
def finish(self):
243
return b"".join(self.parts)
244
245
246
def validate_json_format(json, fmt, forstr=""):
247
constructor_to_name = {
248
str: "a string",
249
dict: "an object",
250
int: "an integer",
251
float: "a floating point number",
252
list: "an array",
253
}
254
for key, tp in fmt.items():
255
validate(key in json, 'missing key "' + key + '"', forstr)
256
if isinstance(tp, list):
257
validate_int_in_range(json[key], tp[0], tp[1], '"' + key + '"', forstr)
258
else:
259
validate(
260
isinstance(json[key], tp)
261
or (tp == float and isinstance(json[key], int)),
262
'"{}" must be {}'.format(key, constructor_to_name[tp]),
263
forstr,
264
)
265
266
267
def validate_int_in_range(val, lo, hi, msg, forstr=""):
268
validate(isinstance(val, int), "{} must be an integer".format(msg), forstr)
269
validate(
270
lo <= val <= hi, "{} must be in range {} to {}".format(msg, lo, hi), forstr
271
)
272
273
274
def validate_sound(json, sample_bank, forstr=""):
275
validate_json_format(json, {"sample": str}, forstr)
276
if "tuning" in json:
277
validate_json_format(json, {"tuning": float}, forstr)
278
validate(
279
json["sample"] in sample_bank.name_to_entry,
280
"reference to sound {} which isn't found in sample bank {}".format(
281
json["sample"], sample_bank.name
282
),
283
forstr,
284
)
285
286
287
def validate_bank_toplevel(json):
288
validate(isinstance(json, dict), "must have a top-level object")
289
validate_json_format(
290
json,
291
{
292
"envelopes": dict,
293
"sample_bank": str,
294
"instruments": dict,
295
"instrument_list": list,
296
},
297
)
298
299
300
def normalize_sound_json(json):
301
# Convert {"sound": "str"} into {"sound": {"sample": "str"}}
302
fixup = []
303
for inst in json["instruments"].values():
304
if isinstance(inst, list):
305
for drum in inst:
306
fixup.append((drum, "sound"))
307
else:
308
fixup.append((inst, "sound_lo"))
309
fixup.append((inst, "sound"))
310
fixup.append((inst, "sound_hi"))
311
for (obj, key) in fixup:
312
if isinstance(obj, dict) and isinstance(obj.get(key), str):
313
obj[key] = {"sample": obj[key]}
314
315
316
def validate_bank(json, sample_bank):
317
if "date" in json:
318
validate(
319
isinstance(json["date"], str)
320
and re.match(r"[0-9]{4}-[0-9]{2}-[0-9]{2}\Z", json["date"]),
321
"date must have format yyyy-mm-dd",
322
)
323
324
for key, env in json["envelopes"].items():
325
validate(isinstance(env, list), 'envelope "' + key + '" must be an array')
326
last_fine = False
327
for entry in env:
328
if entry in ["stop", "hang", "restart"]:
329
last_fine = True
330
else:
331
validate(
332
isinstance(entry, list) and len(entry) == 2,
333
'envelope entry in "'
334
+ key
335
+ '" must be a list of length 2, or one of stop/hang/restart',
336
)
337
if entry[0] == "goto":
338
validate_int_in_range(
339
entry[1], 0, len(env) - 2, "envelope goto target out of range:"
340
)
341
last_fine = True
342
else:
343
validate_int_in_range(
344
entry[0], 1, 2 ** 16 - 4, "envelope entry's first part"
345
)
346
validate_int_in_range(
347
entry[1], 0, 2 ** 16 - 1, "envelope entry's second part"
348
)
349
last_fine = False
350
validate(
351
last_fine, 'envelope "{}" must end with stop/hang/restart/goto'.format(key)
352
)
353
354
drums = []
355
instruments = []
356
instrument_names = set()
357
for name, inst in json["instruments"].items():
358
if name == "percussion":
359
validate(isinstance(inst, list), "drums entry must be a list")
360
drums = inst
361
else:
362
validate(isinstance(inst, dict), "instrument entry must be an object")
363
instruments.append((name, inst))
364
instrument_names.add(name)
365
366
for drum in drums:
367
validate(isinstance(drum, dict), "drum entry must be an object")
368
validate_json_format(
369
drum,
370
{"release_rate": [0, 255], "pan": [0, 128], "envelope": str, "sound": dict},
371
)
372
validate_sound(drum["sound"], sample_bank)
373
validate(
374
drum["envelope"] in json["envelopes"],
375
"reference to non-existent envelope " + drum["envelope"],
376
"drum",
377
)
378
379
no_sound = {}
380
381
for name, inst in instruments:
382
forstr = "instrument " + name
383
for lohi in ["lo", "hi"]:
384
nr = "normal_range_" + lohi
385
so = "sound_" + lohi
386
if nr in inst:
387
validate(so in inst, nr + " is specified, but not " + so, forstr)
388
if so in inst:
389
validate(nr in inst, so + " is specified, but not " + nr, forstr)
390
else:
391
inst[so] = no_sound
392
if "normal_range_lo" not in inst:
393
inst["normal_range_lo"] = 0
394
if "normal_range_hi" not in inst:
395
inst["normal_range_hi"] = 127
396
397
validate_json_format(
398
inst,
399
{
400
"release_rate": [0, 255],
401
"envelope": str,
402
"normal_range_lo": [0, 127],
403
"normal_range_hi": [0, 127],
404
"sound_lo": dict,
405
"sound": dict,
406
"sound_hi": dict,
407
},
408
forstr,
409
)
410
411
if "ifdef" in inst:
412
validate(
413
isinstance(inst["ifdef"], list)
414
and all(isinstance(x, str) for x in inst["ifdef"]),
415
'"ifdef" must be an array of strings',
416
)
417
418
validate(
419
inst["normal_range_lo"] <= inst["normal_range_hi"],
420
"normal_range_lo > normal_range_hi",
421
forstr,
422
)
423
validate(
424
inst["envelope"] in json["envelopes"],
425
"reference to non-existent envelope " + inst["envelope"],
426
forstr,
427
)
428
for key in ["sound_lo", "sound", "sound_hi"]:
429
if inst[key] is no_sound:
430
del inst[key]
431
else:
432
validate_sound(inst[key], sample_bank, forstr)
433
434
seen_instruments = set()
435
for inst in json["instrument_list"]:
436
if inst is None:
437
continue
438
validate(
439
isinstance(inst, str),
440
"instrument list should contain only strings and nulls",
441
)
442
validate(
443
inst in instrument_names, "reference to non-existent instrument " + inst
444
)
445
validate(
446
inst not in seen_instruments, inst + " occurs twice in the instrument list"
447
)
448
seen_instruments.add(inst)
449
450
for inst in instrument_names:
451
validate(inst in seen_instruments, "unreferenced instrument " + inst)
452
453
454
def apply_ifs(json, defines):
455
if isinstance(json, dict) and "ifdef" in json and "then" in json and "else" in json:
456
validate_json_format(json, {"ifdef": list})
457
true = any(d in defines for d in json["ifdef"])
458
return apply_ifs(json["then"] if true else json["else"], defines)
459
elif isinstance(json, list):
460
for i in range(len(json)):
461
json[i] = apply_ifs(json[i], defines)
462
elif isinstance(json, dict):
463
for key in json:
464
json[key] = apply_ifs(json[key], defines)
465
return json
466
467
468
def apply_version_diffs(json, defines):
469
date_str = json.get("date")
470
if "VERSION_EU" in defines and isinstance(date_str, str):
471
json["date"] = date_str.replace("1996-03-19", "1996-06-24")
472
473
ifdef_removed = set()
474
for key, inst in json["instruments"].items():
475
if (
476
isinstance(inst, dict)
477
and isinstance(inst.get("ifdef"), list)
478
and all(d not in defines for d in inst["ifdef"])
479
):
480
ifdef_removed.add(key)
481
for key in ifdef_removed:
482
del json["instruments"][key]
483
json["instrument_list"].remove(key)
484
485
486
def mark_sample_bank_uses(bank):
487
bank.sample_bank.uses.append(bank)
488
489
def mark_used(name):
490
bank.sample_bank.name_to_entry[name].used = True
491
492
for inst in bank.json["instruments"].values():
493
if isinstance(inst, list):
494
for drum in inst:
495
mark_used(drum["sound"]["sample"])
496
else:
497
if "sound_lo" in inst:
498
mark_used(inst["sound_lo"]["sample"])
499
mark_used(inst["sound"]["sample"])
500
if "sound_hi" in inst:
501
mark_used(inst["sound_hi"]["sample"])
502
503
504
def serialize_ctl(bank, base_ser, is_shindou):
505
json = bank.json
506
507
drums = []
508
instruments = []
509
for inst in json["instruments"].values():
510
if isinstance(inst, list):
511
drums = inst
512
else:
513
instruments.append(inst)
514
515
if not is_shindou:
516
y, m, d = map(int, json.get("date", "0000-00-00").split("-"))
517
date = y * 10000 + m * 100 + d
518
base_ser.add(
519
pack(
520
"IIII",
521
len(json["instrument_list"]),
522
len(drums),
523
1 if len(bank.sample_bank.uses) > 1 else 0,
524
to_bcd(date),
525
)
526
)
527
528
ser = ReserveSerializer()
529
if drums:
530
drum_pos_buf = ser.reserve(WORD_BYTES)
531
else:
532
ser.add(b"\0" * WORD_BYTES)
533
drum_pos_buf = None
534
535
inst_pos_buf = ser.reserve(WORD_BYTES * len(json["instrument_list"]))
536
ser.align(16)
537
538
used_samples = []
539
for inst in json["instruments"].values():
540
if isinstance(inst, list):
541
for drum in inst:
542
used_samples.append(drum["sound"]["sample"])
543
else:
544
if "sound_lo" in inst:
545
used_samples.append(inst["sound_lo"]["sample"])
546
used_samples.append(inst["sound"]["sample"])
547
if "sound_hi" in inst:
548
used_samples.append(inst["sound_hi"]["sample"])
549
550
sample_name_to_addr = {}
551
for name in used_samples:
552
if name in sample_name_to_addr:
553
continue
554
sample_name_to_addr[name] = ser.size
555
aifc = bank.sample_bank.name_to_entry[name]
556
sample_len = len(aifc.data)
557
558
# Sample
559
ser.add(pack("IX", align(sample_len, 2) if is_shindou else 0))
560
ser.add(pack("P", aifc.offset))
561
loop_addr_buf = ser.reserve(WORD_BYTES)
562
book_addr_buf = ser.reserve(WORD_BYTES)
563
if not is_shindou:
564
ser.add(pack("I", align(sample_len, 2)))
565
ser.align(16)
566
567
# Book
568
book_addr_buf.append(pack("P", ser.size))
569
ser.add(pack("ii", aifc.book.order, aifc.book.npredictors))
570
for x in aifc.book.table:
571
ser.add(pack("h", x))
572
ser.align(16)
573
574
# Loop
575
loop_addr_buf.append(pack("P", ser.size))
576
if aifc.loop is None:
577
assert sample_len % 9 in [0, 1]
578
end = sample_len // 9 * 16 + (sample_len % 2) + (sample_len % 9)
579
ser.add(pack("IIiI", 0, end, 0, 0))
580
else:
581
ser.add(pack("IIiI", aifc.loop.start, aifc.loop.end, aifc.loop.count, 0))
582
assert aifc.loop.count != 0
583
for x in aifc.loop.state:
584
ser.add(pack("h", x))
585
ser.align(16)
586
587
env_name_to_addr = {}
588
for name, env in json["envelopes"].items():
589
env_name_to_addr[name] = ser.size
590
for entry in env:
591
if entry == "stop":
592
entry = [0, 0]
593
elif entry == "hang":
594
entry = [2 ** 16 - 1, 0]
595
elif entry == "restart":
596
entry = [2 ** 16 - 3, 0]
597
elif entry[0] == "goto":
598
entry[0] = 2 ** 16 - 2
599
# Envelopes are always written as big endian, to match sequence files
600
# which are byte blobs and can embed envelopes.
601
ser.add(struct.pack(">HH", *entry))
602
ser.align(16)
603
604
def ser_sound(sound):
605
sample_addr = (
606
0 if sound["sample"] is None else sample_name_to_addr[sound["sample"]]
607
)
608
if "tuning" in sound:
609
tuning = sound["tuning"]
610
else:
611
aifc = bank.sample_bank.name_to_entry[sound["sample"]]
612
tuning = aifc.sample_rate / 32000
613
ser.add(pack("PfX", sample_addr, tuning))
614
615
no_sound = {"sample": None, "tuning": 0.0}
616
617
inst_name_to_pos = {}
618
for name, inst in json["instruments"].items():
619
if isinstance(inst, list):
620
continue
621
inst_name_to_pos[name] = ser.size
622
env_addr = env_name_to_addr[inst["envelope"]]
623
ser.add(
624
pack(
625
"BBBBXP",
626
0,
627
inst.get("normal_range_lo", 0),
628
inst.get("normal_range_hi", 127),
629
inst["release_rate"],
630
env_addr,
631
)
632
)
633
ser_sound(inst.get("sound_lo", no_sound))
634
ser_sound(inst["sound"])
635
ser_sound(inst.get("sound_hi", no_sound))
636
ser.align(16)
637
638
for name in json["instrument_list"]:
639
if name is None:
640
inst_pos_buf.append(pack("P", 0))
641
continue
642
inst_pos_buf.append(pack("P", inst_name_to_pos[name]))
643
644
if drums:
645
drum_poses = []
646
for drum in drums:
647
drum_poses.append(ser.size)
648
ser.add(pack("BBBBX", drum["release_rate"], drum["pan"], 0, 0))
649
ser_sound(drum["sound"])
650
env_addr = env_name_to_addr[drum["envelope"]]
651
ser.add(pack("P", env_addr))
652
ser.align(16)
653
654
drum_pos_buf.append(pack("P", ser.size))
655
for pos in drum_poses:
656
ser.add(pack("P", pos))
657
ser.align(16)
658
659
base_ser.add(ser.finish())
660
661
return pack(
662
"hh", (bank.sample_bank.index << 8) | 0xFF, (len(json["instrument_list"]) << 8) | len(drums)
663
)
664
665
666
def serialize_tbl(sample_bank, ser, is_shindou):
667
ser.reset_garbage_pos()
668
base_addr = ser.size
669
for aifc in sample_bank.entries:
670
if not aifc.used:
671
continue
672
ser.align(16)
673
aifc.offset = ser.size - base_addr
674
ser.add(aifc.data)
675
ser.align(2)
676
if is_shindou and sample_bank.index not in [4, 10]:
677
ser.align(16)
678
else:
679
ser.align_garbage(16)
680
681
682
def serialize_seqfile(
683
out_filename,
684
out_header_filename,
685
entries,
686
serialize_entry,
687
entry_list,
688
magic,
689
is_shindou,
690
extra_padding=True,
691
):
692
data_ser = GarbageSerializer()
693
entry_offsets = []
694
entry_lens = []
695
entry_meta = []
696
for entry in entries:
697
entry_offsets.append(data_ser.size)
698
ret = serialize_entry(entry, data_ser, is_shindou)
699
entry_meta.append(ret)
700
entry_lens.append(data_ser.size - entry_offsets[-1])
701
data = data_ser.finish()
702
703
if is_shindou:
704
ser = ReserveSerializer()
705
ser.add(pack("H", len(entries)))
706
ser.align(16)
707
medium = 0x02 # cartridge
708
sh_magic = 0x04 if magic == TYPE_TBL else 0x03
709
710
# Ignore entry_list and loop over all entries instead. This makes a
711
# difference for sample banks, where US/JP/EU doesn't use a normal
712
# header for sample banks but instead has a mapping from sound bank to
713
# sample bank offset/length. Shindou uses a normal header and makes the
714
# mapping part of the sound bank header instead (part of entry_meta).
715
for i in range(len(entries)):
716
ser.add(pack("PIbb", entry_offsets[i], entry_lens[i], medium, sh_magic))
717
ser.add(entry_meta[i] or b"\0\0\0\0")
718
ser.align(WORD_BYTES)
719
720
if out_header_filename:
721
with open(out_header_filename, "wb") as f:
722
f.write(ser.finish())
723
with open(out_filename, "wb") as f:
724
f.write(data)
725
726
else:
727
ser = ReserveSerializer()
728
ser.add(pack("HHX", magic, len(entry_list)))
729
table = ser.reserve(len(entry_list) * 2 * WORD_BYTES)
730
ser.align(16)
731
data_start = ser.size
732
733
ser.add(data)
734
if extra_padding:
735
ser.add(b"\0")
736
ser.align(64)
737
738
for index in entry_list:
739
table.append(pack("P", entry_offsets[index] + data_start))
740
table.append(pack("IX", entry_lens[index]))
741
with open(out_filename, "wb") as f:
742
f.write(ser.finish())
743
744
745
def validate_and_normalize_sequence_json(json, bank_names, defines):
746
validate(isinstance(json, dict), "must have a top-level object")
747
if "comment" in json:
748
del json["comment"]
749
for key, seq in json.items():
750
if isinstance(seq, dict):
751
validate_json_format(seq, {"ifdef": list, "banks": list}, key)
752
validate(
753
all(isinstance(x, str) for x in seq["ifdef"]),
754
'"ifdef" must be an array of strings',
755
key,
756
)
757
if all(d not in defines for d in seq["ifdef"]):
758
seq = None
759
else:
760
seq = seq["banks"]
761
json[key] = seq
762
if isinstance(seq, list):
763
for x in seq:
764
validate(
765
isinstance(x, str), "bank list must be an array of strings", key
766
)
767
validate(
768
x in bank_names, "reference to non-existing sound bank " + x, key
769
)
770
else:
771
validate(seq is None, "bad JSON type, expected null, array or object", key)
772
773
774
def write_sequences(
775
inputs,
776
out_filename,
777
out_header_filename,
778
out_bank_sets,
779
sound_bank_dir,
780
seq_json,
781
defines,
782
is_shindou,
783
):
784
bank_names = sorted(
785
[os.path.splitext(os.path.basename(x))[0] for x in os.listdir(sound_bank_dir)]
786
)
787
788
try:
789
with open(seq_json, "r") as inf:
790
data = inf.read()
791
data = strip_comments(data)
792
json = orderedJsonDecoder.decode(data)
793
validate_and_normalize_sequence_json(json, bank_names, defines)
794
795
except Exception as e:
796
fail("failed to parse " + str(seq_json) + ": " + str(e))
797
798
inputs.sort(key=lambda f: os.path.basename(f))
799
name_to_fname = {}
800
for fname in inputs:
801
name = os.path.splitext(os.path.basename(fname))[0]
802
if name in name_to_fname:
803
fail(
804
"Files "
805
+ fname
806
+ " and "
807
+ name_to_fname[name]
808
+ " conflict. Remove one of them."
809
)
810
name_to_fname[name] = fname
811
if name not in json:
812
fail(
813
"Sequence file " + fname + " is not mentioned in sequences.json. "
814
"Either assign it a list of sound banks, or set it to null to "
815
"explicitly leave it out from the build."
816
)
817
818
for key, seq in json.items():
819
if key not in name_to_fname and seq is not None:
820
fail(
821
"sequences.json assigns sound banks to "
822
+ key
823
+ ", but there is no such sequence file. Either remove the entry (or "
824
"set it to null), or create sound/sequences/" + key + ".m64."
825
)
826
827
ind_to_name = []
828
for key in json:
829
ind = int(key.split("_")[0], 16)
830
while len(ind_to_name) <= ind:
831
ind_to_name.append(None)
832
if ind_to_name[ind] is not None:
833
fail(
834
"Sequence files "
835
+ key
836
+ " and "
837
+ ind_to_name[ind]
838
+ " have the same index. Renumber or delete one of them."
839
)
840
ind_to_name[ind] = key
841
842
while ind_to_name and json.get(ind_to_name[-1]) is None:
843
ind_to_name.pop()
844
845
def serialize_file(name, ser, is_shindou):
846
if json.get(name) is None:
847
return
848
ser.reset_garbage_pos()
849
with open(name_to_fname[name], "rb") as f:
850
ser.add(f.read())
851
if is_shindou and name.startswith("17"):
852
ser.align(16)
853
else:
854
ser.align_garbage(16)
855
856
serialize_seqfile(
857
out_filename,
858
out_header_filename,
859
ind_to_name,
860
serialize_file,
861
range(len(ind_to_name)),
862
TYPE_SEQ,
863
is_shindou,
864
extra_padding=False,
865
)
866
867
with open(out_bank_sets, "wb") as f:
868
ser = ReserveSerializer()
869
table = ser.reserve(len(ind_to_name) * 2)
870
for name in ind_to_name:
871
bank_set = json.get(name) or []
872
table.append(pack("H", ser.size))
873
ser.add(bytes([len(bank_set)]))
874
for bank in bank_set[::-1]:
875
ser.add(bytes([bank_names.index(bank)]))
876
ser.align(16)
877
f.write(ser.finish())
878
879
880
def main():
881
global STACK_TRACES
882
global DUMP_INDIVIDUAL_BINS
883
global ENDIAN_MARKER
884
global WORD_BYTES
885
need_help = False
886
skip_next = 0
887
cpp_command = None
888
print_samples = False
889
sequences_out_file = None
890
sequences_header_out_file = None
891
defines = []
892
args = []
893
for i, a in enumerate(sys.argv[1:], 1):
894
if skip_next > 0:
895
skip_next -= 1
896
continue
897
if a == "--help" or a == "-h":
898
need_help = True
899
elif a == "--cpp":
900
cpp_command = sys.argv[i + 1]
901
skip_next = 1
902
elif a == "-D":
903
defines.append(sys.argv[i + 1])
904
skip_next = 1
905
elif a == "--endian":
906
endian = sys.argv[i + 1]
907
if endian == "big":
908
ENDIAN_MARKER = ">"
909
elif endian == "little":
910
ENDIAN_MARKER = "<"
911
elif endian == "native":
912
ENDIAN_MARKER = "="
913
else:
914
fail("--endian takes argument big, little or native")
915
skip_next = 1
916
elif a == "--bitwidth":
917
bitwidth = sys.argv[i + 1]
918
if bitwidth == "native":
919
WORD_BYTES = struct.calcsize("P")
920
else:
921
if bitwidth not in ["32", "64"]:
922
fail("--bitwidth takes argument 32, 64 or native")
923
WORD_BYTES = int(bitwidth) // 8
924
skip_next = 1
925
elif a.startswith("-D"):
926
defines.append(a[2:])
927
elif a == "--stack-trace":
928
STACK_TRACES = True
929
elif a == "--dump-individual-bins":
930
DUMP_INDIVIDUAL_BINS = True
931
elif a == "--print-samples":
932
print_samples = True
933
elif a == "--sequences":
934
sequences_out_file = sys.argv[i + 1]
935
sequences_header_out_file = sys.argv[i + 2]
936
bank_sets_out_file = sys.argv[i + 3]
937
sound_bank_dir = sys.argv[i + 4]
938
sequence_json = sys.argv[i + 5]
939
skip_next = 5
940
elif a.startswith("-"):
941
print("Unrecognized option " + a)
942
sys.exit(1)
943
else:
944
args.append(a)
945
946
defines_set = {d.split("=")[0] for d in defines}
947
is_shindou = "VERSION_SH" in defines_set
948
949
if sequences_out_file is not None and not need_help:
950
write_sequences(
951
args,
952
sequences_out_file,
953
sequences_header_out_file,
954
bank_sets_out_file,
955
sound_bank_dir,
956
sequence_json,
957
defines_set,
958
is_shindou,
959
)
960
sys.exit(0)
961
962
if need_help or len(args) != 6:
963
print(
964
"Usage: {} <samples dir> <sound bank dir>"
965
" <out .ctl file> <out .ctl Shindou header file>"
966
" <out .tbl file> <out .tbl Shindou header file>"
967
" [--cpp <preprocessor>]"
968
" [-D <symbol>]"
969
" [--stack-trace]"
970
" | --sequences <out sequence .bin> <out Shindou sequence header .bin> "
971
"<out bank sets .bin> <sound bank dir> <sequences.json> <inputs...>".format(
972
sys.argv[0]
973
)
974
)
975
sys.exit(0 if need_help else 1)
976
977
sample_bank_dir = args[0]
978
sound_bank_dir = args[1]
979
ctl_data_out = args[2]
980
ctl_data_header_out = args[3]
981
tbl_data_out = args[4]
982
tbl_data_header_out = args[5]
983
984
banks = []
985
sample_banks = []
986
name_to_sample_bank = {}
987
988
sample_bank_names = sorted(os.listdir(sample_bank_dir))
989
for name in sample_bank_names:
990
dir = os.path.join(sample_bank_dir, name)
991
if not os.path.isdir(dir):
992
continue
993
entries = []
994
for f in sorted(os.listdir(dir)):
995
fname = os.path.join(dir, f)
996
if not f.endswith(".aifc"):
997
continue
998
try:
999
with open(fname, "rb") as inf:
1000
data = inf.read()
1001
entries.append(parse_aifc(data, f[:-5], fname))
1002
except Exception as e:
1003
fail("malformed AIFC file " + fname + ": " + str(e))
1004
if entries:
1005
sample_bank = SampleBank(name, entries)
1006
sample_banks.append(sample_bank)
1007
name_to_sample_bank[name] = sample_bank
1008
1009
bank_names = sorted(os.listdir(sound_bank_dir))
1010
for f in bank_names:
1011
fname = os.path.join(sound_bank_dir, f)
1012
if not f.endswith(".json"):
1013
continue
1014
1015
try:
1016
if cpp_command:
1017
data = subprocess.run(
1018
[cpp_command, fname] + ["-D" + x for x in defines],
1019
stdout=subprocess.PIPE,
1020
check=True,
1021
).stdout.decode()
1022
else:
1023
with open(fname, "r") as inf:
1024
data = inf.read()
1025
data = strip_comments(data)
1026
bank_json = orderedJsonDecoder.decode(data)
1027
1028
bank_json = apply_ifs(bank_json, defines_set)
1029
validate_bank_toplevel(bank_json)
1030
apply_version_diffs(bank_json, defines_set)
1031
normalize_sound_json(bank_json)
1032
1033
sample_bank_name = bank_json["sample_bank"]
1034
validate(
1035
sample_bank_name in name_to_sample_bank,
1036
"sample bank " + sample_bank_name + " not found",
1037
)
1038
sample_bank = name_to_sample_bank[sample_bank_name]
1039
1040
validate_bank(bank_json, sample_bank)
1041
1042
bank = Bank(f[:-5], sample_bank, bank_json)
1043
mark_sample_bank_uses(bank)
1044
banks.append(bank)
1045
1046
except Exception as e:
1047
fail("failed to parse bank " + fname + ": " + str(e))
1048
1049
sample_banks = [b for b in sample_banks if b.uses]
1050
sample_banks.sort(key=lambda b: b.uses[0].name)
1051
sample_bank_index = 0
1052
for sample_bank in sample_banks:
1053
sample_bank.index = sample_bank_index
1054
sample_bank_index += 1
1055
1056
serialize_seqfile(
1057
tbl_data_out,
1058
tbl_data_header_out,
1059
sample_banks,
1060
serialize_tbl,
1061
[x.sample_bank.index for x in banks],
1062
TYPE_TBL,
1063
is_shindou,
1064
)
1065
1066
if DUMP_INDIVIDUAL_BINS:
1067
# Debug logic, may simplify diffing
1068
os.makedirs("ctl/", exist_ok=True)
1069
for b in banks:
1070
with open("ctl/" + b.name + ".bin", "wb") as f:
1071
ser = GarbageSerializer()
1072
serialize_ctl(b, ser, is_shindou)
1073
f.write(ser.finish())
1074
print("wrote to ctl/")
1075
1076
serialize_seqfile(
1077
ctl_data_out,
1078
ctl_data_header_out,
1079
banks,
1080
serialize_ctl,
1081
list(range(len(banks))),
1082
TYPE_CTL,
1083
is_shindou,
1084
)
1085
1086
if print_samples:
1087
for sample_bank in sample_banks:
1088
for entry in sample_bank.entries:
1089
if entry.used:
1090
print(entry.fname)
1091
1092
1093
if __name__ == "__main__":
1094
main()
1095
1096