Files
sci-gui-agent-benchmark/vscodeEvalExtension/node_modules/@vscode/test-cli/out/bin.mjs
2024-01-08 23:09:12 +08:00

419 lines
13 KiB
JavaScript
Executable File

#!/usr/bin/env node
/*---------------------------------------------------------
* Copyright (C) Microsoft Corporation. All rights reserved.
*--------------------------------------------------------*/
import * as chokidar from 'chokidar';
import { existsSync, promises as fs } from 'fs';
import { glob } from 'glob';
import { minimatch } from 'minimatch';
import { cpus } from 'os';
import { dirname, isAbsolute, join, resolve } from 'path';
import supportsColor from 'supports-color';
import { fileURLToPath, pathToFileURL } from 'url';
import yargs from 'yargs';
const rulesAndBehavior = 'Mocha: Rules & Behavior';
const reportingAndOutput = 'Mocha: Reporting & Output';
const fileHandling = 'Mocha: File Handling';
const testFilters = 'Mocha: Test Filters';
const vscodeSection = 'VS Code Options';
const configFileDefault = 'nearest .vscode-test.js';
const args = yargs(process.argv)
.epilogue('See https://code.visualstudio.com/api/working-with-extensions/testing-extension for help')
.option('config', {
type: 'string',
description: 'Config file to use',
default: configFileDefault,
group: vscodeSection,
})
.option('label', {
alias: 'l',
type: 'array',
description: 'Specify the test configuration to run based on its label in configuration',
group: vscodeSection,
})
.option('code-version', {
type: 'string',
description: 'Override the VS Code version used to run tests',
group: vscodeSection,
})
//#region Rules & Behavior
.option('bail', {
alias: 'b',
type: 'boolean',
description: 'Abort ("bail") after first test failure',
group: rulesAndBehavior,
})
.option('dry-run', {
type: 'boolean',
description: 'Report tests without executing them',
group: rulesAndBehavior,
})
.option('list-configuration', {
type: 'boolean',
description: 'List configurations and that they woud run, without executing them',
group: rulesAndBehavior,
})
.option('fail-zero', {
type: 'boolean',
description: 'Fail test run if no test(s) encountered',
group: rulesAndBehavior,
})
.option('forbid-only', {
type: 'boolean',
description: 'Fail if exclusive test(s) encountered',
group: rulesAndBehavior,
})
.option('forbid-pending', {
type: 'boolean',
description: 'Fail if pending test(s) encountered',
group: rulesAndBehavior,
})
.option('jobs', {
alias: 'j',
type: 'number',
description: 'Number of concurrent jobs for --parallel; use 1 to run in serial',
default: Math.max(1, cpus().length - 1),
group: rulesAndBehavior,
})
.options('parallel', {
alias: 'p',
type: 'boolean',
description: 'Run tests in parallel',
group: rulesAndBehavior,
})
.option('retries', {
alias: 'r',
type: 'number',
description: 'Number of times to retry failed tests',
group: rulesAndBehavior,
})
.option('slow', {
alias: 's',
type: 'number',
description: 'Specify "slow" test threshold (in milliseconds)',
default: 75,
group: rulesAndBehavior,
})
.option('timeout', {
alias: 't',
type: 'number',
description: 'Specify test timeout threshold (in milliseconds)',
default: 2000,
group: rulesAndBehavior,
})
//#endregion
//#region Reporting & Output
.option('color', {
alias: 'c',
type: 'boolean',
description: 'Force-enable color output',
group: reportingAndOutput,
})
.option('diff', {
type: 'boolean',
description: 'Show diff on failure',
default: true,
group: reportingAndOutput,
})
.option('full-trace', {
type: 'boolean',
description: 'Display full stack traces',
group: reportingAndOutput,
})
.option('inline-diffs', {
type: 'boolean',
description: 'Display actual/expected differences inline within each string',
group: reportingAndOutput,
})
.option('reporter', {
alias: 'R',
type: 'string',
description: 'Specify reporter to use',
default: 'spec',
group: reportingAndOutput,
})
.option('reporter-option', {
alias: 'O',
type: 'array',
description: 'Reporter-specific options (<k=v,[k1=v1,..]>)',
group: reportingAndOutput,
})
//#endregion
//#region File Handling
.option('file', {
type: 'array',
description: 'Specify file(s) to be loaded prior to root suite',
group: fileHandling,
})
.option('ignore', {
alias: 'exclude',
type: 'array',
description: 'Ignore file(s) or glob pattern(s)',
group: fileHandling,
})
.option('watch', {
alias: 'w',
type: 'boolean',
description: 'Watch files in the current working directory for changes',
group: fileHandling,
})
.option('watch-files', {
type: 'array',
description: 'List of paths or globs to watch',
group: fileHandling,
})
.option('watch-ignore', {
type: 'array',
description: 'List of paths or globs to exclude from watching',
group: fileHandling,
})
.option('run', {
type: 'array',
description: 'List of specific files to run',
group: fileHandling,
})
//#endregion
//#region Test Filters
.option('fgrep', {
type: 'string',
alias: 'f',
description: 'Only run tests containing this string',
group: testFilters,
})
.option('grep', {
type: 'string',
alias: 'g',
description: 'Only run tests matching this string or regexp',
group: testFilters,
})
.option('invert', {
alias: 'i',
type: 'boolean',
description: 'Inverts --grep and --fgrep matches',
group: testFilters,
})
.parseSync();
const configFileRules = {
json: (path) => fs.readFile(path, 'utf8').then(JSON.parse),
js: (path) => import(pathToFileURL(path).toString()),
mjs: (path) => import(pathToFileURL(path).toString()),
};
class CliExpectedError extends Error {
}
main();
async function main() {
let code = 0;
try {
let configs = args.config !== configFileDefault
? await tryLoadConfigFile(args.config)
: await loadDefaultConfigFile();
if (args.label?.length) {
configs = args.label.map((label) => {
const found = configs.find((c, i) => typeof label === 'string' ? c.config.label === label : i === label);
if (!found) {
throw new CliExpectedError(`Could not find a configuration with label "${label}"`);
}
return found;
});
}
if (args.watch) {
await watchConfigs(configs);
}
else {
code = await runConfigs(configs);
}
}
catch (e) {
code = 1;
if (e instanceof CliExpectedError) {
console.error(e.message);
}
else {
console.error(e.stack || e);
}
}
finally {
process.exit(code);
}
}
const WATCH_RUN_DEBOUNCE = 500;
async function watchConfigs(configs) {
let debounceRun;
let rerun = false;
let running = true;
const runOrDebounce = () => {
if (debounceRun) {
clearTimeout(debounceRun);
}
debounceRun = setTimeout(async () => {
running = true;
rerun = false;
try {
await runConfigs(configs);
}
finally {
running = false;
if (rerun) {
runOrDebounce();
}
}
}, WATCH_RUN_DEBOUNCE);
};
const watcher = chokidar.watch(args.watchFiles?.length ? args.watchFiles.map(String) : process.cwd(), {
ignored: [
'**/.vscode-test/**',
'**/node_modules/**',
...(args.watchIgnore || []).map(String),
],
ignoreInitial: true,
});
watcher.on('all', (evts) => {
console.log(evts);
if (running) {
rerun = true;
}
else {
runOrDebounce();
}
});
watcher.on('ready', () => {
runOrDebounce();
});
// wait until interrupted
await new Promise(() => {
/* no-op */
});
}
const isDesktop = (config) => !config.platform || config.platform === 'desktop';
const RUNNER_PATH = join(fileURLToPath(new URL('.', import.meta.url)), 'runner.cjs');
/** Runs the given test configurations. */
async function runConfigs(configs) {
const resolvedConfigs = await Promise.all(configs.map(async (c) => {
const files = args.run?.length
? args.run.map((r) => resolve(process.cwd(), String(r)))
: await gatherFiles(c);
const env = {};
if (isDesktop(c.config)) {
c.config.launchArgs ||= [];
if (c.config.workspaceFolder) {
c.config.launchArgs.push(resolve(dirname(c.path), c.config.workspaceFolder));
}
env.VSCODE_TEST_OPTIONS = JSON.stringify({
mochaOpts: { ...args, ...c.config.mocha },
colorDefault: supportsColor.stdout || process.env.MOCHA_COLORS !== undefined,
preload: [
...(typeof c.config.mocha?.preload === 'string'
? [c.config.mocha.preload]
: c.config.mocha?.preload || []).map((f) => require.resolve(f, { paths: [c.path] })),
...(args.file?.map((f) => require.resolve(String(f), { paths: [process.cwd()] })) ||
[]),
],
files,
});
}
return {
...c,
files,
env,
extensionTestsPath: RUNNER_PATH,
extensionDevelopmentPath: c.config.extensionDevelopmentPath?.slice() || dirname(c.path),
};
}));
if (args.listConfiguration) {
console.log(JSON.stringify(resolvedConfigs, null, 2));
return 0;
}
let code = 0;
for (const { config, path, env, extensionTestsPath } of resolvedConfigs) {
if (isDesktop(config)) {
let electron;
try {
electron = await import('@vscode/test-electron');
}
catch (e) {
throw new CliExpectedError('@vscode/test-electron not found, you may need to install it ("npm install -D @vscode/test-electron")');
}
const nextCode = await electron.runTests({
...config,
version: args.codeVersion || config.version,
extensionDevelopmentPath: config.extensionDevelopmentPath?.slice() || dirname(path),
extensionTestsPath,
extensionTestsEnv: { ...config.env, ...env, ELECTRON_RUN_AS_NODE: undefined },
launchArgs: [...(config.launchArgs || [])],
platform: config.desktopPlatform,
reporter: config.download?.reporter,
timeout: config.download?.timeout,
reuseMachineInstall: config.useInstallation && 'fromMachine' in config.useInstallation
? config.useInstallation.fromMachine
: undefined,
vscodeExecutablePath: config.useInstallation && 'fromPath' in config.useInstallation
? config.useInstallation.fromPath
: undefined,
});
if (nextCode > 0 && args.bail) {
return nextCode;
}
code = Math.max(code, nextCode);
}
}
return code;
}
/** Gathers test files that match the config */
async function gatherFiles({ config, path }) {
const fileListsProms = [];
const cwd = dirname(path);
const ignoreGlobs = args.ignore?.map(String).filter((p) => !isAbsolute(p));
for (const file of config.files instanceof Array ? config.files : [config.files]) {
if (isAbsolute(file)) {
if (!ignoreGlobs?.some((i) => minimatch(file, i))) {
fileListsProms.push([file]);
}
}
else {
fileListsProms.push(glob(file, { cwd, ignore: ignoreGlobs }).then((l) => l.map((f) => join(cwd, f))));
}
}
const files = new Set((await Promise.all(fileListsProms)).flat());
args.ignore?.forEach((i) => files.delete(i));
return [...files];
}
/** Loads a specific config file by the path, throwing if loading fails. */
async function tryLoadConfigFile(path) {
const ext = path.split('.').pop();
if (!configFileRules.hasOwnProperty(ext)) {
throw new CliExpectedError(`I don't know how to load the extension '${ext}'. We can load: ${Object.keys(configFileRules).join(', ')}`);
}
try {
let loaded = await configFileRules[ext](path);
if ('default' in loaded) {
// handle default es module exports
loaded = loaded.default;
}
// allow returned promises to resolve:
loaded = await loaded;
return (Array.isArray(loaded) ? loaded : [loaded]).map((config) => ({ config, path }));
}
catch (e) {
throw new CliExpectedError(`Could not read config file ${path}: ${e.stack || e}`);
}
}
/** Loads the default config based on the process working directory. */
async function loadDefaultConfigFile() {
const base = '.vscode-test';
let dir = process.cwd();
while (true) {
for (const ext of Object.keys(configFileRules)) {
const candidate = join(dir, `${base}.${ext}`);
if (existsSync(candidate)) {
return tryLoadConfigFile(candidate);
}
}
const next = dirname(dir);
if (next === dir) {
break;
}
dir = next;
}
throw new CliExpectedError(`Could not find a ${base} file in this directory or any parent. You can specify one with the --config option.`);
}