forked from elastic/kibana
-
Notifications
You must be signed in to change notification settings - Fork 0
/
load.js
95 lines (82 loc) · 2.91 KB
/
load.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
/*
* Licensed to Elasticsearch B.V. under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch B.V. licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import { resolve } from 'path';
import { createReadStream } from 'fs';
import {
createPromiseFromStreams,
concatStreamProviders,
} from '../../utils';
import {
isGzip,
createStats,
prioritizeMappings,
readDirectory,
createParseArchiveStreams,
createCreateIndexStream,
createIndexDocRecordsStream,
migrateKibanaIndex,
} from '../lib';
// pipe a series of streams into each other so that data and errors
// flow from the first stream to the last. Errors from the last stream
// are not listened for
const pipeline = (...streams) => streams
.reduce((source, dest) => (
source
.once('error', (error) => dest.destroy(error))
.pipe(dest)
));
export async function loadAction({ name, skipExisting, client, dataDir, log, kibanaUrl }) {
const inputDir = resolve(dataDir, name);
const stats = createStats(name, log);
const files = prioritizeMappings(await readDirectory(inputDir));
// a single stream that emits records from all archive files, in
// order, so that createIndexStream can track the state of indexes
// across archives and properly skip docs from existing indexes
const recordStream = concatStreamProviders(
files.map(filename => () => {
log.info('[%s] Loading %j', name, filename);
return pipeline(
createReadStream(resolve(inputDir, filename)),
...createParseArchiveStreams({ gzip: isGzip(filename) })
);
}),
{ objectMode: true }
);
await createPromiseFromStreams([
recordStream,
createCreateIndexStream({ client, stats, skipExisting, log, kibanaUrl }),
createIndexDocRecordsStream(client, stats),
]);
const result = stats.toJSON();
const indicesToRefresh = Object
.entries(result)
.filter(([, stats]) => !stats.deleted)
.map(([index, { docs }]) => {
log.info('[%s] Indexed %d docs into %j', name, docs.indexed, index);
return index;
});
await client.indices.refresh({
index: indicesToRefresh
});
// If we affected the Kibana index, we need to ensure it's migrated...
if (Object.keys(result).some(k => k.startsWith('.kibana'))) {
await migrateKibanaIndex({ client, log });
}
return result;
}