-
Notifications
You must be signed in to change notification settings - Fork 175
Expand file tree
/
Copy pathdx.js
More file actions
1529 lines (1520 loc) · 68.5 KB
/
dx.js
File metadata and controls
1529 lines (1520 loc) · 68.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/**
* @fileoverview Command-line utility for unpacking ZIP, ARC, IMG, JSON, and ISO containers
* @author Jeff Parsons <Jeff@pcjs.org>
* @copyright © 2012-2026 Jeff Parsons
* @license MIT <https://www.pcjs.org/LICENSE.txt>
*
* This file is part of PCjs, a computer emulation software project at <https://www.pcjs.org>.
*
* Some completely random and mildly interesting ZIP anomalies
* -----------------------------------------------------------
*
* This command:
*
* dx.js -lt https://discmaster.textfiles.com/file/29622/ibm0040-0049/ibm0047.tar/ibm0047/AVC-8.ZIP
*
* lists 9 files, two of which trigger warnings due to the --test (-t) option:
*
* Filename External Internal Method Ratio Attr Date Time CRC
* -------- -------- -------- ------ ----- ---- ---- ---- ---
* SAMPSHOW._ST 54082 54082 Store 0% 0x20 1991-05-22 01:03:00 9791da66 [FileHeader name: BVHXGA.DLL]
* SAMPSND._AD 350840 318710 Implode 9% 0x20 1991-05-22 01:03:00 e74e80bf [Missing FileHeader at 54160]
* SAMPSND._AU 1690 1690 Store 0% 0x20 1991-05-22 01:03:00 790b9590 AVC-8/SAMPSND._AU
* SAMPSND2._AD 508760 484636 Implode 5% 0x20 1991-05-22 01:03:00 9351eec9 AVC-8/SAMPSND2._AD
* SAMPSND2._AU 2920 1697 Implode 42% 0x20 1991-05-22 01:03:00 1138d881 AVC-8/SAMPSND2._AU
* SAMPVOIC._AD 52448 50099 Implode 4% 0x20 1991-05-22 01:03:00 1e1a9d7f AVC-8/SAMPVOIC._AD
* SPROTECT.EXE 20627 12461 Implode 40% 0x20 1991-05-22 01:03:00 918616b2 AVC-8/SPROTECT.EXE
* VOICE._AD 428672 410777 Implode 4% 0x20 1991-05-22 01:03:00 3a53989f AVC-8/VOICE._AD
* VOICE._AU 3190 3190 Store 0% 0x20 1991-05-22 01:03:00 15a9741a AVC-8/VOICE._AU
*
* https://discmaster.textfiles.com/file/29622/ibm0040-0049/ibm0047.tar/ibm0047/AVC-8.ZIP: 9 files, 2 warnings
*
* Since the archive's DirHeaders appear to have "issues", let's bypass them with --nodir (-n)
* and rely on a scan of the archive's FileHeaders instead. Now we see a different set of (8) files:
*
* Filename External Internal Method Ratio Attr Date Time CRC
* -------- -------- -------- ------ ----- ---- ---- ---- ---
* BVHXGA.DLL 4330 2638 Implode 39% 0x00 1991-04-22 09:28:30 39d50b6b AVC-8/BVHXGA.DLL
* DISPLAY.DLL 424864 161490 Implode 62% 0x00 1991-04-22 09:21:44 d595a00f AVC-8/DISPLAY.DLL
* EXOS2.DLL 35481 15040 Implode 58% 0x00 1991-06-06 08:59:16 ea2ee879 AVC-8/EXOS2.DLL
* README.XGA 1199 608 Implode 49% 0x00 1991-06-06 17:02:38 1069fd3d AVC-8/README.XGA
* XGA.DDP 336 211 Implode 37% 0x00 1991-05-30 13:03:58 76513e7e AVC-8/XGA.DDP
* XGALOAD.DLL 5592 2127 Implode 62% 0x00 1991-06-06 10:01:08 d3fac5b3 AVC-8/XGALOAD.DLL
* XGALOAD0.SYS 14993 3554 Implode 76% 0x00 1991-06-06 11:14:12 d94fd9d5 AVC-8/XGALOAD0.SYS
* XGARING0.SYS 15001 3567 Implode 76% 0x00 1991-04-05 11:47:36 ac04a726 AVC-8/XGARING0.SYS
*
* https://discmaster.textfiles.com/file/29622/ibm0040-0049/ibm0047.tar/ibm0047/AVC-8.ZIP: 8 files, 0 warnings
*
* And there are no warnings. So judicious use of -n can access otherwise inaccessible content.
*
* Interestingly, when you search for "AVC-8.ZIP" on discmaster.textfiles.com, you'll notice that they
* display the archive's comment (aka "banner"), which comes from the directory, but they don't display
* the entries in the directory, so perhaps they have some logic that "falls back" to the file headers
* whenever the directory headers appear corrupt.
*/
import fs from "fs/promises";
import glob from "glob";
import path from "path";
import zlib from "zlib";
import crypto from "crypto";
import CSV from "./csv.js";
import Format from "./format.js";
import DZip from "./dzip.js";
import DXC from "./dxc.js";
import BASFile from "./basfile.js";
import DataBuffer from "./db.js";
import { DiskInfo } from "./disk.js";
import { LegacyArc, LegacyZip } from "./legacy.js";
const format = new Format();
const printf = function(...args) {
let s = format.sprintf(...args);
process.stdout.write(s);
};
const pause = async function(prompt) {
if (!process.stdin.isTTY) {
return false;
}
printf("%s", prompt || "Press return to continue...");
process.stdin.resume();
return new Promise(resolve => {
process.stdin.once('data', () => {
process.stdin.pause();
printf("\n");
resolve(true);
});
});
};
const options = {
"all": {
type: "boolean",
usage: "--all",
alias: "-a",
description: "process all items, regardless of extension"
},
"batch": {
type: "string",
usage: "--batch [file]",
description: "process items listed in file"
},
"banner": {
type: "boolean",
usage: "--banner",
alias: "-b",
description: "print archive comments (banners)"
},
"compat": {
type: "boolean",
usage: "--compat",
alias: "-c",
description: "use CD-ROM 8.3 filenames only" // ie, ignore any supplementary (eg, "Joliet") volume descriptors
},
"csv": {
type: "string",
usage: "--csv [file]",
description: "write list of item contents to CSV file"
},
"debug": {
type: "boolean",
usage: "--debug",
description: "display debug information",
internal: true
},
"dest": {
type: "string",
usage: "--dest [dir]",
alias: "-d",
description: "extract files into destination directory",
},
"dir": {
type: "boolean",
usage: "--dir",
description: "print directory of item contents"
//
// NOTE: Directory listings can be truncated with --truncate. Also note that if you use --dir
// instead of --list in conjunction with --csv, the CSV will include the full list of files but
// WITHOUT reading and hashing their contents.
//
},
"dump": {
type: "string",
usage: "--dump [spec]",
description: "dump contents of matching files"
},
"extract": {
type: "boolean",
usage: "--extract",
alias: ["-e", "-x"],
description: "extract files (implied by --dest)"
},
"files": {
type: "string",
usage: "--files [spec]",
description: "filter on matching files (eg, \"*.txt\")"
},
"filter": {
type: "string",
usage: "--filter [...]",
alias: "-f",
multiple: true,
description: "filter on criteria (see --filter list)",
options: {
"list": {
value: 0,
description: "list available filters"
},
"banner": {
value: DZip.EXCEPTIONS.BANNER,
description: "process only commented archives"
},
"comment": {
value: DZip.EXCEPTIONS.COMMENT,
description: "process only commented entries"
},
"encrypted": {
value: DZip.EXCEPTIONS.ENCRYPTED,
description: "process only encrypted entries"
},
"split": {
value: DZip.EXCEPTIONS.SPLIT,
description: "process only split archives"
},
"wrong": {
value: DZip.EXCEPTIONS.WRONGTYPE,
description: "process only archives with the wrong type"
},
"unique": {
value: DXC.EXCEPTIONS.UNIQUE,
description: "process only unique items (CSV only)"
}
}
},
"in": {
type: "string",
usage: "--in [encoding]",
description: "set input encoding (default is cp437)"
},
"list": {
type: "boolean",
usage: "--list",
alias: "-l",
description: "print list of item contents"
},
"nodir": {
type: "boolean",
usage: "--nodir",
alias: "-n",
description: "skip archive directory (scan for files)"
//
// Yes, scanning for file entries instead of relying on directory entries goes against ZIP protocol,
// but sometimes an archive is screwed up or is part of a multi-disk archive that's missing some parts.
//
// For ISO images, this option tells readDirectory() to use the path table to build the directory,
// instead of relying solely on directory records. Normally, we ignore the path table, because it's
// redundant, but using it can improve performance when accessing an ISO image over a network connection.
//
},
"out": {
type: "string",
usage: "--out [encoding]",
description: "set output encoding (default is --in)"
},
"overwrite": {
type: "boolean",
usage: "--overwrite",
alias: "-o",
description: "overwrite existing files on extract"
},
"password": {
type: "string",
usage: "--password [...]",
alias: ["-g", "-s"],
description: "decrypt garbled entries using password",
//
// The original ARC utility used -g to "garble" entries, whereas PKUNZIP used -s to "scramble" entries;
// going with --password seems more straightforward, but in honor of the utilities, we also allow -g and -s.
//
},
"path": {
type: "string",
usage: "--path [spec]",
description: "process matching items (eg, \"**/*.zip\")",
},
"pause": {
type: "boolean",
usage: "--pause",
alias: "-p",
description: "pause after each item until a key is pressed"
},
"pcjs": {
type: "boolean",
usage: "--pcjs",
description: "use PCjs-specific rules when processing disk images",
internal: true
},
"recurse": {
type: "boolean",
usage: "--recurse",
alias: "-r",
description: "process items within items"
},
"retries": {
type: "number",
usage: "--retries [n]",
description: "number of retries for network requests (default: 3)",
internal: true
},
"test": {
type: "boolean",
usage: "--test",
alias: "-t",
description: "test contents of all items"
},
"truncate": {
type: "boolean",
usage: "--truncate",
description: "truncate directory of contents (see --dir)",
internal: true
},
"type": {
type: "string",
usage: "--type [spec]",
description: "type contents of matching files"
},
"update": {
type: "boolean",
usage: "--update",
description: "generate an update script for the Internet Archive",
internal: true
},
"upload": {
type: "boolean",
usage: "--upload",
description: "generate an upload script for the Internet Archive"
},
"verbose": {
type: "boolean",
usage: "--verbose",
alias: "-v",
description: "display additional information"
},
"fileID": {
type: "number",
usage: "--fileID [id]",
description: "override file ID (default: 1)",
internal: true
},
"setID": {
type: "number",
usage: "--setID [id]",
description: "override set ID (default: 1)",
internal: true
},
"help": {
type: "boolean",
usage: "--help",
alias: "-h",
description: "display this help message",
handler: function() {
printf("\nUsage:\n %s [option(s)] [item(s)]\n", path.basename(process.argv[1]));
printf("\nProcesses ZIP, ARC, IMG, ISO, MDF and other container items\n");
printf("\nOptions:\n");
for (let key in options) {
let option = options[key];
if (option.internal) continue;
let aliases = Array.isArray(option.alias)? option.alias.join(",") : option.alias;
printf(" %-18s %s%s\n", option.usage, option.description, aliases? " [" + aliases + "]" : "");
}
}
}
};
/**
* displayFile(name, encoding, db, dump)
*
* Display file contents as specified.
*
* @param {string|null} name
* @param {string} encoding
* @param {DataBuffer} db
* @param {boolean} [dump]
*/
function displayFile(name, encoding, db, dump = false)
{
if (name) {
printf("%s:\n", name);
}
if (!dump) {
printf("%s\n", db.toString(encoding));
return;
}
//
// Dump the file contents in hex format, with a 4-byte offset at the start of each line,
// using the same format as "hexdump"; eg:
//
// 00000000 50 4b 03 04 0a 00 00 00 00 00 a4 48 da 5a 00 00 |PK.........H.Z..|
//
for (let i = 0; i < db.length; i += 16) {
let line = format.sprintf("%08x ", i);
for (let j = 0; j < 16; j++) {
if (j == 8) {
line += " ";
}
if (i + j < db.length) {
line += format.sprintf("%02x ", db.buffer[i + j]);
} else {
line += " ";
}
}
line += " |";
for (let j = 0; j < 16; j++) {
let c = 32;
if (i + j < db.length) {
c = db.buffer[i + j];
if (c < 32 || c > 126) {
c = 46;
}
}
line += format.sprintf("%c", c);
}
line += "|";
printf("%s\n", line);
}
}
/**
* getPhotoInfo(basePath, baseExt)
*
* @param {string} basePath
* @param {string} baseExt
* @returns {Array} [photoPath, width, height]
*/
async function getPhotoInfo(basePath, baseExt)
{
const imageExts = ['.jpg', '.png'];
for (const ext of imageExts) {
const photoPath = path.join(path.dirname(basePath), path.basename(basePath, baseExt) + ext);
try {
//
// Read file header to determine dimensions (courtesy of CoPilot).
//
let width = 0, height = 0;
const file = await fs.open(photoPath, 'r');
const stats = await file.stat();
const buffer = Buffer.alloc(stats.size);
await file.read(buffer, 0, buffer.length, 0);
await file.close();
if (buffer[0] === 0xFF && buffer[1] === 0xD8) {
//
// JPEG file: Parse JPEG segments to find SOF marker (Start of Frame)
//
let offset = 2; // Skip the initial SOI marker (0xFF, 0xD8)
while (offset < buffer.length - 8) {
//
// Check if we've found a marker
//
if (buffer[offset] === 0xFF) {
const markerCode = buffer[offset + 1];
//
// SOF markers are in range 0xC0-0xCF, excluding 0xC4 (DHT), 0xC8 (JPG), and 0xCC (DAC)
//
if (markerCode >= 0xC0 && markerCode <= 0xCF &&
markerCode !== 0xC4 && markerCode !== 0xC8 && markerCode !== 0xCC) {
//
// SOF marker found, extract dimensions
// Format: FF xx SIZE(2 bytes) PRECISION(1 byte) HEIGHT(2 bytes) WIDTH(2 bytes) ...
//
height = (buffer[offset + 5] << 8) | buffer[offset + 6];
width = (buffer[offset + 7] << 8) | buffer[offset + 8];
break;
}
//
// If not an SOF marker, skip this segment using its length
//
if (markerCode !== 0xFF && markerCode !== 0x00) {
//
// Segment length includes the 2 bytes for the length field itself
//
const segmentLength = (buffer[offset + 2] << 8) | buffer[offset + 3];
if (segmentLength < 2) break; // Invalid segment length
offset += segmentLength + 2;
} else {
//
// Skip padding bytes or continue to next byte
//
offset++;
}
} else {
offset++;
}
}
}
else if (buffer[0] === 0x89 && buffer[1] === 0x50 && buffer[2] === 0x4E && buffer[3] === 0x47) {
//
// PNG file
//
width = (buffer[16] << 24) | (buffer[17] << 16) | (buffer[18] << 8) | buffer[19];
height = (buffer[20] << 24) | (buffer[21] << 16) | (buffer[22] << 8) | buffer[23];
}
return [photoPath, width, height];
} catch (error) {
// console.log(error);
}
}
return [null, 0, 0];
}
/**
* getList(text)
*
* @param {string} text
* @returns {Array} (of objects containing 'path' and optional 'photo' and 'thumb' properties)
*/
function getList(text)
{
let list;
if (text[0] != '-') {
list = text.split(/\r?\n/).filter(line => line.length > 0 && !line.startsWith("#")).map(path => ({ path }));
} else {
list = [];
let items = text.split(/(^|\n)- /);
items.splice(0, 2);
for (let item of items) {
item = item.trim();
if (!item) continue;
let lines = item.split(/\n/);
let id = lines[0].trim();
let fileNames = [], photoNames = [], thumbNames = [];
for (let i = 1; i < lines.length; i++) {
let line = lines[i].trim();
if (line) {
let match = line.match(/^([-+]) (.*)$/);
if (match) {
if (match[1] == "-") {
let matchFile = match[2].match(/^([^_].*)(\.iso|\.cdr|\.mdf|\.7z)$/i);
if (matchFile) {
fileNames.push(matchFile[1] + matchFile[2]);
} else {
let matchPhoto = match[2].match(/^([^_].*)(\.jpe?g|\.png|\.tiff?)$/i);
if (matchPhoto) {
if (matchPhoto[1].endsWith("_thumb")) {
thumbNames.push(matchPhoto[1] + matchPhoto[2]);
} else {
photoNames.push(matchPhoto[1] + matchPhoto[2]);
}
}
}
}
}
else {
printf("warning: unrecognized line in item %s: %s\n", id, line);
}
} else {
printf("warning: empty line in item %s\n", id);
}
}
if (!fileNames.length) {
printf("warning: no file name(s) in item %s\n", id);
continue;
}
for (let fileName of fileNames) {
let fileURL = "", photoURL = "", thumbURL = "";
fileURL = "https://archive.org/download/" + id + "/" + encodeURIComponent(fileName);
//
// Find a photo in photoNames that has the same base name as fileName.
//
let baseName = fileName.replace(/\.(iso|cdr|mdf|7z)$/i, "");
let photoName = photoNames.find(name => name.startsWith(baseName)) || photoNames[0] ||"";
let thumbName = thumbNames.find(name => name.startsWith(baseName + "_thumb")) || thumbNames[0] || "";
if (photoName) {
photoURL = "https://archive.org/download/" + id + "/" + encodeURIComponent(photoName);
}
if (thumbName) {
thumbURL = "https://archive.org/download/" + id + "/" + encodeURIComponent(thumbName);
}
list.push({path: fileURL, photo: photoURL, thumb: thumbURL});
}
}
}
return list;
}
/**
* isTextFile(name)
*
* @param {string} name
* @returns {number} 0 if false, 1 if true, 2 if BAS file
*/
function isTextFile(name)
{
const asTextFileExts = [
".md", ".me", ".bas", ".bat", ".rat", ".asm", ".inc", ".lrf", ".nfo", ".diz",
".mak", ".txt", ".xml", ".mac", ".inf", ".skl", ".dat", ".c", ".h"
];
let ext = path.extname(name).toLowerCase();
return (asTextFileExts.includes(ext)? 1 : 0) + (ext == ".bas"? 1 : 0);
}
/**
* main(argc, argv, errors)
*/
async function main(argc, argv, errors)
{
if (argv.help || argv.verbose) {
printf("dx.js %s\n%s\n\nArguments: %s\n", DXC.VERSION, DXC.COPYRIGHT, argv[0]);
if (argv.help) {
options.help.handler();
}
}
//
// Before we get started, display any usage errors encountered by parseArgs().
//
let nErrors = 0;
for (let error of errors) {
printf("%s\n", error);
nErrors++;
}
//
// Next, let's deal with any specified filters.
//
let filterExceptions = 0, filterMethod = -1;
if (Array.isArray(argv.filter)) {
for (let filter of argv.filter) {
let option = options.filter.options[filter];
if (!option) {
//
// We also allow filtering based on compression method, but that doesn't actually set a filter bit;
// it sets a method number instead, which means you can filter on only one compression method at a time.
//
let methodName = filter[0].toUpperCase() + filter.slice(1).toLowerCase();
let method = LegacyZip.methodNames.indexOf(methodName);
if (method >= 0) {
filterMethod = method;
continue;
}
method = LegacyArc.methodNames.indexOf(methodName);
if (method >= 0) {
filterMethod = -(method + 2);
continue;
}
printf("unknown filter: %s\n", filter);
nErrors++;
continue;
}
if (!option.value) {
printf("\nAvailable filters:\n");
for (let key in options.filter.options) {
let option = options.filter.options[key];
if (option.value) {
printf("%12s: %s\n", key, option.description);
}
}
//
// Also list all possible compression methods, since we allow filtering on those as well.
//
let methods = LegacyZip.methodNames.concat(LegacyArc.methodNames);
for (let i = 0; i < methods.length; i++) {
if (methods[i]) {
let methodValue;
if (i < LegacyZip.methodNames.length) {
methodValue = i;
} else {
methodValue = -(i - LegacyZip.methodNames.length + 2);
}
printf("%12s: process only entries using %s compression (%d)\n", methods[i].toLowerCase(), methods[i], methodValue);
}
}
continue;
}
filterExceptions |= option.value;
}
}
//
// Build a list of items to process, starting with files listed in a batch file.
//
let itemList = [], fromPCJS = {}, uploadIDs = [];
if (argv.batch) {
try {
if (argv.batch.match(/\.csv$/i)) {
let items = 0;
let csv = new CSV();
await csv.open(argv.batch);
do {
let row = await csv.getNextRow();
if (!row) break;
let item = {
volume: row.volume.toUpperCase() || "",
entries: row.entries || 0,
newest: row.newest? new Date(row.newest) : new Date(0),
size: row.size? +row.size : 0,
path: row.path + "/" + row.name
};
if (row.photo) {
item.photo = row.photo;
}
if (row.thumb) {
item.thumb = row.thumb;
}
itemList.push(item);
items++;
} while (true);
await csv.close();
//
// If we're generating scripts for the Internet Archive, we want to weed out duplicates first.
//
// The process begins by sorting itemList by volume, then by entries, then by newest, and finally by path.
//
let cDuplicates = 0;
if ((argv.upload || argv.update || (filterExceptions & DXC.EXCEPTIONS.UNIQUE)) && csv.hasFields("volume", "entries", "newest", "size")) {
itemList.sort((a, b) => {
if (a.volume < b.volume) return -1;
if (a.volume > b.volume) return 1;
if (a.entries < b.entries) return -1;
if (a.entries > b.entries) return 1;
if (a.newest.getTime() < b.newest.getTime()) return -1;
if (a.newest.getTime() > b.newest.getTime()) return 1;
if (a.path < b.path) return 1;
if (a.path > b.path) return -1;
return 0;
});
//
// Let the weeding begin.
//
let lastItem = null;
itemList = itemList.filter(item => {
if (!lastItem || item.volume != lastItem.volume || item.entries != lastItem.entries || item.newest.getTime() != lastItem.newest.getTime()) {
lastItem = item;
return argv.upload || argv.update? !item.path.match(/^https?:\/\//) : true;
}
if (argv.verbose && item.path.match(/^https?:\/\//) && lastItem.path.match(/^https?:\/\//)) {
printf("possible website duplicates:\n\t%s\n\t%s\n", item.path, lastItem.path);
}
if (argv.verbose && argv.debug && item.size != lastItem.size) {
printf("size mismatch in presumed duplicates:\n\t%s (%d)\n\t%s (%d)\n", item.path, item.size, lastItem.path, lastItem.size);
}
lastItem = item;
cDuplicates++;
return false;
});
}
if (!argv.upload && !argv.update || argv.verbose) {
printf("Found %d item%s in %s (%d duplicate%s removed)\n", itemList.length, itemList.length, path.basename(argv.batch), cDuplicates);
}
} else {
let text = await fs.readFile(argv.batch, "utf8");
let list = getList(text);
itemList = itemList.concat(list);
if (!argv.upload && !argv.update || argv.verbose) {
printf("Found %d item%s in %s\n", list.length, list.length, path.basename(argv.batch));
}
}
} catch (error) {
printf("%s\n", error.message);
nErrors++;
}
}
//
// Add any items matching --path patterns.
//
if (argv.path) {
if (argv.path[0] == '~') {
argv.path = path.join(process.env.HOME, argv.path.slice(1));
}
let items = glob.sync(argv.path, { /* follow: true, */ nodir: true, nocase: true, ignore: [".*"] });
//
// If the path included both .img and .json extensions AND --pcjs was specified, then
// we check every .img file for a neighboring .json file; if found, then the .img file is
// removed from the list and the .json file is added to the fromPCJS list.
//
if (argv.pcjs) {
for (let i = 0; i < items.length; i++) {
let itemPath = items[i];
if (path.basename(itemPath) == "diskettes.json" || path.basename(itemPath) == "diskettes-annotated.json") {
items.splice(i--, 1);
}
else if (itemPath.endsWith(".img")) {
let jsonPath = itemPath.replace(/\/archive\/([^/]*)\.img$/, "/$1.json");
if (jsonPath != itemPath && items.includes(jsonPath)) {
items.splice(i--, 1);
fromPCJS[jsonPath] = itemPath;
}
}
}
}
itemList = itemList.concat(items.map(path => ({ path })));
printf("Found %d item%s in specified path\n", items.length);
}
//
// Finally, include any explicitly listed items.
//
for (let i = 1; i < argv.length; i++) {
itemList.push({path: argv[i]});
}
let bannerHashes = {};
let listing = argv.dir || argv.list;
let fileID = +argv.fileID || 1, setID = argv.setID || 1;
let nTotalItems = 0, nTotalFiles = 0, nTotalWarnings = 0;
let incoding = (argv.in || "cp437").replace(/[-_]/g, "").toLowerCase();
let outcoding = (argv.out || (argv.type? "utf8" : incoding)).replace(/[-_]/g, "").toLowerCase();
if (outcoding == "cp437") {
outcoding = "binary"; // "cp437" is a legacy encoding, so we use "binary" instead
}
//
// Normally, a client will provide either a fetch interface or open interface, not both; for example,
// browsers don't have access to the file system, so they will provide only fetch. But as a Node client,
// we DO have both, so we provide both; our open() API defaults to open but will fall back to fetch if
// the filename starts with a network prefix (eg, "http://").
//
// NOTE: While 'fetch' works fine for Node, browsers apparently require 'window.fetch.bind(window)' instead.
//
let dxc = new DXC({
fetch: fetch, // async interface for opening remote files
open: fs.open, // async interface for opening local files
// inflate: zlib.inflateRaw, // async interface for ZIP_DEFLATE data
inflateSync: zlib.inflateRawSync // sync interface for ZIP_DEFLATE data
},
{
encoding: incoding, // input encoding (default is "cp437")
debug: argv.debug // enable debug mode (includes additional warnings)
});
//
// If CSV output is enabled, then open the specified file for writing.
//
let csv;
if (argv.csv) {
try {
csv = await fs.open(argv.csv, "a");
let stats = await fs.stat(argv.csv);
if (!stats.size) {
let heading = dxc.formatHeading(DXC.FORMAT.CSV);
await csv.write(heading);
}
} catch (error) {
printf("%s: %s\n", argv.csv, error.message);
nErrors++;
}
}
if (nErrors) {
return;
}
let dumpItem = false;
if (!itemList.length && argv.dump) {
itemList.push({path: argv.dump});
dumpItem = true;
delete argv.dump;
}
//
// Define a function to process an individual container item, which then allows us to recursively process
// nested containers if --recurse is been specified.
//
let processItem = async function(itemID, itemPath, itemPhoto = null, itemThumb = null, itemTarget = null, itemDB = null, modified = null) {
let handle;
let heading = false;
let prevPath = "";
let dirListing = argv.dir;
let dirLimit = argv.truncate? 11 : -1;
let nDirFiles = 0, nDirBytes = 0;
let itemName = path.basename(itemPath);
let itemExt = path.extname(itemName);
let widthPhoto = 0, heightPhoto = 0;
let nItemFiles = 0, nItemBytes = 0, nItemWarnings = 0;
//
// Generate paths we may need later (for file and/or banner extraction).
//
// If you use the search-and-replace form of the dest option (ie, "--dest <search>=<replace>"), the
// destination path is the source path with the first occurrence of <search> replaced with <replace>.
//
// Otherwise, destination path is whatever follows "--dest". The presence of "--dest" automatically
// enables extraction. If no directory is specified but extraction is still enabled via "--extract",
// then the current directory is used.
//
// If multiple items are being processed and/or extraction was enabled without a specific directory,
// then extraction will occur inside a directory with the name of the item (which will be created if
// necessary). The only way to bypass that behavior is to process items one at a time OR explicitly
// use "." as the directory; the goal is to avoid unintentional merging of extracted files.
//
let srcPath = itemPath;
try {
srcPath = decodeURIComponent(srcPath);
} catch (e) {} // Interesting that a simple function like this can throw a URIError...
let srcName = path.basename(srcPath);
let srcBase = path.basename(srcPath, itemExt);
srcPath = path.dirname(srcPath);
let dstPath = itemTarget || argv.dest || "";
let dstDefault = !dstPath;
let chgPath = dstPath.split("=");
if (chgPath.length > 1) {
if (srcPath.indexOf(chgPath[0]) >= 0) {
dstPath = srcPath.replace(chgPath[0], chgPath[1]);
} else {
printf("warning: source path %s does not contain '%s'\n", srcPath, chgPath[0]);
dstPath = chgPath[1];
}
}
if (dstPath != ".") {
if (!dstPath || itemTarget || itemList.length > 1) {
//
// TODO: Consider an option that determines whether or not to strip the item extension
// from the destination path. The danger is that it can result in extraction conflicts,
// because a folder may contain multiple items with the same name but different extensions
// (eg, "CONTEST.ZIP" and "CONTEST.ARC") or there might simply be another file or folder
// with a conflicting name (eg, "CONTEST").
//
// dstPath = path.join(dstPath, srcBase);
//
dstPath = path.join(dstPath, itemDB? srcName : srcBase);
}
}
let bannerPath = path.join(argv.dest || "", srcBase + ".BAN");
if (!itemPhoto && !itemDB && itemExt.match(/\.(img|json|iso|mdf|bin|cdr)$/i)) {
//
// A top-level item (specifically, a disk image) may have an associated photo in the file system.
//
[itemPhoto, widthPhoto, heightPhoto] = await getPhotoInfo(itemPath, itemExt);
}
let printHeading = function(entry, isFile, isNested) {
let entryPath, fullPath = "";
let continued = nItemFiles > 0? " (continued)" : "";
if (entry.target) {
entryPath = path.dirname(entry.target);
} else {
entryPath = path.dirname(entry.name);
if (entryPath == ".") {
entryPath = "";
}
if (!entryPath) {
entryPath = handle.label;
} else {
entryPath = handle.label + path.sep + entryPath;
}
}
if (!prevPath && !isNested) {
fullPath = ` [${itemPath}]`;
}
if (prevPath != entryPath) {
if (argv.truncate) {
dirLimit = nItemFiles >= 100? 0 : 11;
}
prevPath = entryPath;
if (dirListing) {
if (heading) {
continued = "";
}
heading = false;
}
}
if (!heading && (dirLimit || !isFile) && !argv.csv) {
let itemPrinted = false;
if (listing || argv.banner && handle.item.comment) {
if (listing || !nItemFiles) {
if (dirListing && dirLimit < 0 && nDirFiles) {
printf("%8d file%s %10d byte%s\n", nDirFiles, nDirFiles == 1? " " : "s", nDirBytes);
nDirFiles = nDirBytes = 0;
}
if (dirListing && dirLimit) {
printf("\nDirectory of %s%s%s\n", entryPath, fullPath, continued);
}
if (!dirListing) {
printf("\n%s%s\n", itemPath, continued);
}
if (argv.truncate) {
dirLimit = nItemFiles >= 100? 0 : 11;
}
itemPrinted = true;
}
}
if (handle.item.warnings.length && (!handle.item.volTable || handle.item.volTable.length)) {
if (argv.verbose) {
printf("%s\n%s\n", itemPrinted? "" : entryPath, handle.item.warnings.join("\n"));
} else {
printf("%s: %d issue%s detected\n", itemPrinted? "\nWarning" : entryPath, handle.item.warnings.length);
}
nItemWarnings += handle.item.warnings.length;
}
//
// We also refer to the archive comment as the archive's "banner", which is an archive
// filtering condition (--filter banner), but if you also want to SEE the banners, then
// you must also specify --banner.
//
if (argv.banner && handle.item.comment && !nItemFiles) {
printf("%s\n", handle.item.comment);
}
if (listing) {
if (dirListing) {
if (dirLimit < 0) {
printf("\n");
}
}
else {
printf("\n%s", dxc.formatHeading());
}
}
}
heading = true;
if (isFile) {
nDirFiles++;
nDirBytes += entry.size;
nItemFiles++;
nItemBytes += entry.size;
nTotalFiles++;
}
};
let printScript = function() {
//
// I expect item.name to refer to a file with a path of the form:
//
// .../[publisher]/[category]/filename.ext
//
// So let's extract the publisher and category values from the path now.
//
let pathParts = path.dirname(handle.item.name).split(path.sep);
let publisher = pathParts[pathParts.length - 2] || ""; // eg. Microsoft
let category = pathParts[pathParts.length - 1] || ""; // eg. TechNet
let id = "";
if (publisher) {
id += publisher.toLowerCase().replace(/ /g, '-');
if (id == "microsoft") id = "ms";
id += '-';
}
if (category) {
id += category.toLowerCase().replace(/ /g, '-') + '-';
}
let label = (handle.label || srcBase).replace(/ /g, "-").toUpperCase();
id += label.toLowerCase();
let origID = id, nextID = 1;
while (uploadIDs.includes(id)) {
id = origID + "-" + nextID++;
}
uploadIDs.push(id);
let title = format.sprintf("%s %s %s Disc (%F %Y)", publisher, category, label, handle.item.modified).trim();
let files = [], targetName = label + itemExt;
printf("# %s %s\n", argv.upload? "uploading" : "updating", path.basename(handle.item.name));
printf("cp \"%s\" %s\n", handle.item.name, targetName);
files.push(targetName);
if (argv.upload && itemPhoto) {
let match = itemPhoto.match(/^(.*?)(\.[^.]+)$/);
if (match) {
let targetExt = match[2].toLowerCase();
let targetName = label + targetExt;