1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
|
import { readdirSync, writeFile, mkdirSync, createReadStream, createWriteStream, existsSync, statSync } from "fs";
import * as path from "path";
import { red, cyan, yellow } from "colors";
import { Utils } from "../../../Utils";
import rimraf = require("rimraf");
import { DashUploadUtils } from "../../../server/DashUploadUtils";
const StreamZip = require('node-stream-zip');
const createImageSizeStream = require("image-size-stream");
import { parseXml } from "libxmljs";
import { strictEqual } from "assert";
import { Readable, PassThrough } from "stream";
import { Directory, serverPathToFile, pathToDirectory } from "../../../server/ApiManagers/UploadManager";
/**
* This is an arbitrary bundle of data that gets populated
* in extractFileContents
*/
interface DocumentContents {
body: string;
imageData: ImageData[];
hyperlinks: string[];
tableData: TableData[];
longDescription: string;
}
/**
* A rough schema for everything that Bill has
* included for each document
*/
export interface DeviceDocument {
title: string;
shortDescription: string;
longDescription: string;
company: string;
year: number;
originalPrice?: number;
degreesOfFreedom?: number;
dimensions?: string;
primaryKey: string;
secondaryKey: string;
attribute: string;
__images: ImageData[];
additionalMedia: ({ [type: string]: string } | undefined)[];
hyperlinks: string[];
captions: string[]; // from the table column
embeddedFileNames: string[]; // from the table column
}
/**
* A layer of abstraction around a single parsing
* attempt. The error is not a TypeScript error, but
* rather an invalidly formatted value for a given key.
*/
export interface AnalysisResult {
device?: DeviceDocument;
invalid?: { [deviceProperty: string]: string };
}
/**
* A mini API that takes in a string and returns
* either the given T or an error indicating that the
* transformation was rejected.
*/
type Transformer<T> = (raw: string) => TransformResult<T>;
interface TransformResult<T> {
transformed?: T;
error?: string;
}
/**
* Simple bundle counting successful and failed imports
*/
export interface ImportResults {
deviceCount: number;
errorCount: number;
}
/**
* Definitions for callback functions. Such instances are
* just invoked by when a single document has been parsed
* or the entire import is over. As of this writing, these
* callbacks are supplied by WebSocket.ts and used to inform
* the client of these events.
*/
type ResultCallback = (result: AnalysisResult) => void;
type TerminatorCallback = (result: ImportResults) => void;
/**
* Defines everything needed to define how a single key should be
* formatted within the plain body text. The association between
* keys and their format definitions is stored FormatMap
*/
interface ValueFormatDefinition<T> {
exp: RegExp; // the expression that the key's value should match
matchIndex?: number; // defaults to 0, but can be overridden to account for grouping in @param exp
transformer?: Transformer<T>; // if desirable, how to transform the Regex match
required?: boolean; // defaults to true, confirms that for a whole document to be counted successful,
// all of its required values should be present and properly formatted
}
/**
* The basic data we extract from each image in the document
*/
interface ImageData {
url: string;
nativeWidth: number;
nativeHeight: number;
}
namespace Utilities {
/**
* Numeric 'try parse', fits with the Transformer API
* @param raw the serialized number
*/
export function numberValue(raw: string): TransformResult<number> {
const transformed = Number(raw);
if (isNaN(transformed)) {
return { error: `${raw} cannot be parsed to a numeric value.` };
}
return { transformed };
}
/**
* A simple tokenizer that splits along 'and' and commas, and removes duplicates
* Helpful mainly for attribute and primary key lists
* @param raw the string to tokenize
*/
export function collectUniqueTokens(raw: string): TransformResult<string[]> {
const pieces = raw.replace(/,|\s+and\s+/g, " ").split(/\s+/).filter(piece => piece.length);
const unique = new Set(pieces.map(token => token.toLowerCase().trim()));
return { transformed: Array.from(unique).map(capitalize).sort() };
}
/**
* Tries to correct XML text parsing artifact where some sentences lose their separating space,
* and others gain excess whitespace
* @param raw
*/
export function correctSentences(raw: string): TransformResult<string> {
raw = raw.replace(/\./g, ". ").replace(/\:/g, ": ").replace(/\,/g, ", ").replace(/\?/g, "? ").trimRight();
raw = raw.replace(/\s{2,}/g, " ");
return { transformed: raw };
}
/**
* Simple capitalization
* @param word to capitalize
*/
export function capitalize(word: string): string {
const clean = word.trim();
if (!clean.length) {
return word;
}
return word.charAt(0).toUpperCase() + word.slice(1);
}
/**
* Streams the requeted file at the relative path to the
* root of the zip, then parses it with a library
* @param zip the zip instance data source
* @param relativePath the path to a .xml file within the zip to parse
*/
export async function readAndParseXml(zip: any, relativePath: string) {
console.log(`Text streaming ${relativePath}`);
const contents = await new Promise<string>((resolve, reject) => {
let body = "";
zip.stream(relativePath, (error: any, stream: any) => {
if (error) {
reject(error);
}
stream.on('data', (chunk: any) => body += chunk.toString());
stream.on('end', () => resolve(body));
});
});
return parseXml(contents);
}
}
/**
* Defines how device values should be formatted. As you can see, the formatting is
* not super consistent and has changed over time as edge cases have been found, but this
* at least imposes some constraints, and will notify you if a document doesn't match the specifications
* in this map.
*/
const FormatMap = new Map<keyof DeviceDocument, ValueFormatDefinition<any>>([
["title", {
exp: /contact\s+(.*)Short Description:/
}],
["company", {
exp: /Company:\s+([^\|]*)\s+\|/,
transformer: (raw: string) => ({ transformed: raw.replace(/\./g, "") })
}],
["year", {
exp: /Year:\s+([^\|]*)\s+\|/,
transformer: (raw: string) => Utilities.numberValue(/[0-9]{4}/.exec(raw)![0])
}],
["primaryKey", {
exp: /Primary:\s+(.*)(Secondary|Additional):/,
transformer: raw => {
const { transformed, error } = Utilities.collectUniqueTokens(raw);
return transformed ? { transformed: transformed[0] } : { error };
}
}],
["secondaryKey", {
exp: /(Secondary|Additional):\s+(.*)Attributes?:/,
transformer: raw => {
const { transformed, error } = Utilities.collectUniqueTokens(raw);
return transformed ? { transformed: transformed[0] } : { error };
},
matchIndex: 2
}],
["attribute", {
exp: /Attributes?:\s+(.*)Links/,
transformer: raw => {
const { transformed, error } = Utilities.collectUniqueTokens(raw);
return transformed ? { transformed: transformed[0] } : { error };
},
}],
["originalPrice", {
exp: /Original Price \(USD\)\:\s+(\$[0-9\,]+\.[0-9]+|NFS)/,
transformer: (raw: string) => {
raw = raw.replace(/\,/g, "");
if (raw === "NFS") {
return { transformed: -1 };
}
return Utilities.numberValue(raw.slice(1));
},
required: false
}],
["degreesOfFreedom", {
exp: /Degrees of Freedom:\s+([0-9]+)/,
transformer: Utilities.numberValue,
required: false
}],
["dimensions", {
exp: /Dimensions\s+\(L x W x H\):\s+([0-9\.]+\s+x\s+[0-9\.]+\s+x\s+[0-9\.]+\s\([A-Za-z]+\))/,
transformer: (raw: string) => {
const [length, width, group] = raw.split(" x ");
const [height, unit] = group.split(" ");
return {
transformed: {
dim_length: Number(length),
dim_width: Number(width),
dim_height: Number(height),
dim_unit: unit.replace(/[\(\)]+/g, "")
}
};
},
required: false
}],
["shortDescription", {
exp: /Short Description:\s+(.*)Bill Buxton[’']s Notes/,
transformer: Utilities.correctSentences
}],
]);
const sourceDir = path.resolve(__dirname, "source"); // where the Word documents are assumed to be stored
const assetDir = path.resolve(__dirname, "assets"); // where any additional media content like pdfs will be stored. Each subdirectory of this
// must follow the enum Directory.<type> naming scheme
const outDir = path.resolve(__dirname, "json"); // where the JSON output of these device documents will be written
const imageDir = path.resolve(__dirname, "../../../server/public/files/images/buxton"); // where, in the server, these images will be written
const successOut = "buxton.json"; // the JSON list representing properly formatted documents
const failOut = "incomplete.json"; // the JSON list representing improperly formatted documents
const deviceKeys = Array.from(FormatMap.keys()); // a way to iterate through all keys of the DeviceDocument interface
/**
* Starts by REMOVING ALL EXISTING BUXTON RESOURCES. This might need to be
* changed going forward
* @param emitter the callback when each document is completed
* @param terminator the callback when the entire import is completed
*/
export default async function executeImport(emitter: ResultCallback, terminator: TerminatorCallback) {
try {
// get all Word documents in the source directory
const contents = readdirSync(sourceDir);
const wordDocuments = contents.filter(file => /.*\.docx?$/.test(file)).map(file => `${sourceDir}/${file}`);
// removal takes place here
[outDir, imageDir].forEach(dir => {
rimraf.sync(dir);
mkdirSync(dir);
});
await transferAssets();
return parseFiles(wordDocuments, emitter, terminator);
} catch (e) {
const message = [
"Unable to find a source directory.",
"Please ensure that the following directory exists:",
`${e.message}`
].join('\n');
console.log(red(message));
return { error: message };
}
}
/**
* Builds a mirrored directory structure of all media / asset files
* within the server's public directory.
*/
async function transferAssets() {
for (const assetType of readdirSync(assetDir)) {
const subroot = path.resolve(assetDir, assetType);
if (!statSync(subroot).isDirectory()) {
continue;
}
const outputSubroot = serverPathToFile(assetType as Directory, "buxton");
if (existsSync(outputSubroot)) {
continue;
} else {
mkdirSync(outputSubroot);
}
for (const fileName of readdirSync(subroot)) {
const readStream = createReadStream(path.resolve(subroot, fileName));
const writeStream = createWriteStream(path.resolve(outputSubroot, fileName));
await new Promise<void>(resolve => {
readStream.pipe(writeStream).on("close", resolve);
});
}
}
}
/**
* Parse every Word document in the directory, notifying any callers as needed
* at each iteration via the emitter.
* @param wordDocuments the string list of Word document names to parse
* @param emitter the callback when each document is completed
* @param terminator the callback when the entire import is completed
*/
async function parseFiles(wordDocuments: string[], emitter: ResultCallback, terminator: TerminatorCallback): Promise<DeviceDocument[]> {
// execute parent-most parse function
const results: AnalysisResult[] = [];
for (const filePath of wordDocuments) {
const fileName = path.basename(filePath).replace("Bill_Notes_", ""); // not strictly needed, but cleaner
console.log(cyan(`\nExtracting contents from ${fileName}...`));
const result = analyze(fileName, await extractFileContents(filePath));
emitter(result);
results.push(result);
}
// collect information about errors and successes
const masterDevices: DeviceDocument[] = [];
const masterErrors: { [key: string]: string }[] = [];
results.forEach(({ device, invalid: errors }) => {
if (device) {
masterDevices.push(device);
} else if (errors) {
masterErrors.push(errors);
}
});
// something went wrong, since errors and successes should sum to total inputs
const total = wordDocuments.length;
if (masterDevices.length + masterErrors.length !== total) {
throw new Error(`Encountered a ${masterDevices.length} to ${masterErrors.length} mismatch in device / error split!`);
}
// write the external JSON representations of this import
console.log();
await writeOutputFile(successOut, masterDevices, total, true);
await writeOutputFile(failOut, masterErrors, total, false);
console.log();
// notify the caller that the import has finished
terminator({ deviceCount: masterDevices.length, errorCount: masterErrors.length });
return masterDevices;
}
/**
* XPath definitions for desired XML targets in respective hierarchies.
*
* For table cells, can be read as: "find me anything that looks like <w:tc> in XML, whose
* parent looks like <w:tr>, whose parent looks like <w:tbl>"
*
* <w:tbl>
* <w:tr>
* <w:tc>
*
* These are found by trial and error, and using an online XML parser / prettifier
* to inspect the structure, since the Node XML library does not expose the parsed
* structure very well for searching, say in the debug console.
*/
const xPaths = {
paragraphs: '//*[name()="w:p"]',
tableCells: '//*[name()="w:tbl"]/*[name()="w:tr"]/*[name()="w:tc"]',
hyperlinks: '//*[name()="Relationship" and contains(@Type, "hyperlink")]'
};
interface TableData {
fileName: string;
caption: string;
additionalMedia?: { [type: string]: string };
}
const SuffixDirectoryMap = new Map<string, Directory>([
["p", Directory.pdfs]
]);
/**
* The meat of the script, images and text content are extracted here
* @param pathToDocument the path to the document relative to the root of the zip
*/
async function extractFileContents(pathToDocument: string): Promise<DocumentContents> {
console.log('Extracting text...');
const zip = new StreamZip({ file: pathToDocument, storeEntries: true });
await new Promise<void>(resolve => zip.on('ready', resolve));
// extract the body of the document and, specifically, its captions
const document = await Utilities.readAndParseXml(zip, "word/document.xml");
// get plain text
const body = document.root()?.text() ?? "No body found. Check the import script's XML parser.";
const captions: string[] = [];
const tableData: TableData[] = [];
// preserve paragraph formatting and line breaks that would otherwise get lost in the plain text parsing
// of the XML hierarchy
const paragraphs = document.find(xPaths.paragraphs).map(node => Utilities.correctSentences(node.text()).transformed!);
const start = paragraphs.indexOf(paragraphs.find(el => /Bill Buxton[’']s Notes/.test(el))!) + 1;
const end = paragraphs.indexOf("Device Details");
const longDescription = paragraphs.slice(start, end).filter(paragraph => paragraph.length).join("\n\n");
// extract captions from the table cells
const tableRowsFlattened = document.find(xPaths.tableCells).map(node => node.text().trim());
const { length } = tableRowsFlattened;
const numCols = 4;
strictEqual(length > numCols, true, "No captions written."); // first row has the headers, not content
strictEqual(length % numCols === 0, true, "Improper caption formatting.");
// break the flat list of strings into groups of numColumns. Thus, each group represents
// a row in the table, where the first row has no text content since it's
// the image, the second has the file name and the third has the caption (maybe additional columns
// have been added or reordered since this was written, but follow the same appraoch)
for (let i = numCols; i < tableRowsFlattened.length; i += numCols) {
const row = tableRowsFlattened.slice(i, i + numCols);
const entry: TableData = { fileName: row[1], caption: row[2] };
const key = SuffixDirectoryMap.get(row[3].toLowerCase());
if (key) {
const media: any = {};
media[key] = `${entry.fileName.split(".")[0]}.pdf`;
entry.additionalMedia = media;
}
tableData.push(entry);
}
// extract all hyperlinks embedded in the document
const rels = await Utilities.readAndParseXml(zip, "word/_rels/document.xml.rels");
const hyperlinks = rels.find(xPaths.hyperlinks).map(el => el.attrs()[2].value());
console.log("Text extracted.");
// write out the images for this document
console.log("Beginning image extraction...");
const imageData = await writeImages(zip);
console.log(`Extracted ${imageData.length} images.`);
// cleanup
zip.close();
return { body, longDescription, imageData, tableData, hyperlinks };
}
// zip relative path from root expression / filter used to isolate only media assets
const imageEntry = /^word\/media\/\w+\.(jpeg|jpg|png|gif)/;
/**
* Image dimensions and file suffix,
*/
interface ImageAttrs {
width: number;
height: number;
type: string;
}
/**
* For each image, stream the file, get its size, check if it's an icon
* (if it is, ignore it)
* @param zip the zip instance data source
*/
async function writeImages(zip: any): Promise<ImageData[]> {
const allEntries = Object.values<any>(zip.entries()).map(({ name }) => name);
const imageEntries = allEntries.filter(name => imageEntry.test(name));
const imageUrls: ImageData[] = [];
const valid: any[] = [];
const getImageStream = (mediaPath: string) => new Promise<Readable>((resolve, reject) => {
zip.stream(mediaPath, (error: any, stream: any) => error ? reject(error) : resolve(stream));
});
for (const mediaPath of imageEntries) {
const { width, height, type } = await new Promise<ImageAttrs>(async resolve => {
const sizeStream = (createImageSizeStream() as PassThrough).on('size', (dimensions: ImageAttrs) => {
readStream.destroy();
resolve(dimensions);
}).on("error", () => readStream.destroy());
const readStream = await getImageStream(mediaPath);
readStream.pipe(sizeStream);
});
// if it's not an icon, by this rough heuristic, i.e. is it not square
const number = Number(/image(\d+)/.exec(mediaPath)![1]);
if (number > 5 || width - height > 10) {
valid.push({ width, height, type, mediaPath, number });
}
}
valid.sort((a, b) => a.number - b.number);
const [{ width: first_w, height: first_h }, { width: second_w, height: second_h }] = valid;
if (Math.abs(first_w / second_w - first_h / second_h) < 0.01) {
const first_size = first_w * first_h;
const second_size = second_w * second_h;
const target = first_size >= second_size ? 1 : 0;
valid.splice(target, 1);
console.log(`Heuristically removed image with size ${target ? second_size : first_size}`);
}
// for each valid image, output the _o, _l, _m, and _s files
// THIS IS WHERE THE SCRIPT SPENDS MOST OF ITS TIME
for (const { type, width, height, mediaPath } of valid) {
const generatedFileName = `upload_${Utils.GenerateGuid()}.${type.toLowerCase()}`;
await DashUploadUtils.outputResizedImages(() => getImageStream(mediaPath), generatedFileName, imageDir);
imageUrls.push({
url: `/files/images/buxton/${generatedFileName}`,
nativeWidth: width,
nativeHeight: height
});
}
return imageUrls;
}
/**
* Takes the results of extractFileContents, which relative to this is sort of the
* external media / preliminary text processing, and now tests the given file name to
* with those value definitions to make sure the body of the document contains all
* required fields, properly formatted
* @param fileName the file whose body to inspect
* @param contents the data already computed / parsed by extractFileContents
*/
function analyze(fileName: string, contents: DocumentContents): AnalysisResult {
const { body, imageData, hyperlinks, tableData, longDescription } = contents;
const device: any = {
hyperlinks,
captions: tableData.map(({ caption }) => caption),
embeddedFileNames: tableData.map(({ fileName }) => fileName),
additionalMedia: tableData.map(({ additionalMedia }) => additionalMedia),
longDescription,
__images: imageData
};
const errors: { [key: string]: string } = { fileName };
for (const key of deviceKeys) {
const { exp, transformer, matchIndex, required } = FormatMap.get(key)!;
const matches = exp.exec(body);
let captured: string;
// if we matched and we got the specific match we're after
if (matches && (captured = matches[matchIndex ?? 1])) { // matchIndex defaults to 1
captured = captured.replace(/\s{2,}/g, " "); // remove excess whitespace
// if supplied, apply the required transformation (recall this is specified in FormatMap)
if (transformer) {
const { error, transformed } = transformer(captured);
if (error) {
// we hit a snag trying to transform the valid match
// still counts as a fundamental error
errors[key] = `__ERR__${key.toUpperCase()}__TRANSFORM__: ${error}`;
continue;
}
captured = transformed;
}
device[key] = captured;
} else if (required ?? true) {
// the field was either implicitly or explicitly required, and failed to match the definition in
// FormatMap
errors[key] = `ERR__${key.toUpperCase()}__: outer match ${matches === null ? "wasn't" : "was"} captured.`;
continue;
}
}
// print errors - this can be removed
const errorKeys = Object.keys(errors);
if (errorKeys.length > 1) {
console.log(red(`@ ${cyan(fileName.toUpperCase())}...`));
errorKeys.forEach(key => key !== "filename" && console.log(red(errors[key])));
return { invalid: errors };
}
return { device };
}
/**
* A utility function that writes the JSON results for this import out to the desired path
* @param relativePath where to write the JSON file
* @param data valid device document objects, or errors
* @param total used for more informative printing
* @param success whether or not the caller is writing the successful parses or the failures
*/
async function writeOutputFile(relativePath: string, data: any[], total: number, success: boolean) {
console.log(yellow(`Encountered ${data.length} ${success ? "valid" : "invalid"} documents out of ${total} candidates. Writing ${relativePath}...`));
return new Promise<void>((resolve, reject) => {
const destination = path.resolve(outDir, relativePath);
const contents = JSON.stringify(data, undefined, 4); // format the JSON
writeFile(destination, contents, err => err ? reject(err) : resolve());
});
}
|