add hw2
This commit is contained in:
82
node_modules/langium/lib/parser/async-parser.d.ts
generated
vendored
Normal file
82
node_modules/langium/lib/parser/async-parser.d.ts
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2023 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import type { CancellationToken } from '../utils/cancellation.js';
|
||||
import type { LangiumCoreServices } from '../services.js';
|
||||
import type { AstNode } from '../syntax-tree.js';
|
||||
import type { LangiumParser, ParseResult } from './langium-parser.js';
|
||||
import type { Hydrator } from '../serializer/hydrator.js';
|
||||
import type { Event } from '../utils/event.js';
|
||||
import { Deferred } from '../utils/promise-utils.js';
|
||||
import { Emitter } from '../utils/event.js';
|
||||
/**
|
||||
* Async parser that allows cancellation of the current parsing process.
|
||||
*
|
||||
* @remarks
|
||||
* The sync parser implementation is blocking the event loop, which can become quite problematic for large files.
|
||||
* The default implementation is not actually async. It just wraps the sync parser in a promise. A real implementation would create worker threads or web workers to offload the parsing work.
|
||||
*/
|
||||
export interface AsyncParser {
|
||||
/**
|
||||
* Parses the given text and returns the parse result.
|
||||
*
|
||||
* @param text The text to parse.
|
||||
* @param cancelToken A cancellation token that can be used to cancel the parsing process.
|
||||
* @returns A promise that resolves to the parse result.
|
||||
*
|
||||
* @throws `OperationCancelled` if the parsing process is cancelled.
|
||||
*/
|
||||
parse<T extends AstNode>(text: string, cancelToken: CancellationToken): Promise<ParseResult<T>>;
|
||||
}
|
||||
/**
|
||||
* Default implementation of the async parser which simply wraps the sync parser in a promise.
|
||||
*
|
||||
* @remarks
|
||||
* A real implementation would create worker threads or web workers to offload the parsing work.
|
||||
*/
|
||||
export declare class DefaultAsyncParser implements AsyncParser {
|
||||
protected readonly syncParser: LangiumParser;
|
||||
constructor(services: LangiumCoreServices);
|
||||
parse<T extends AstNode>(text: string, _cancelToken: CancellationToken): Promise<ParseResult<T>>;
|
||||
}
|
||||
export declare abstract class AbstractThreadedAsyncParser implements AsyncParser {
|
||||
/**
|
||||
* The thread count determines how many threads are used to parse files in parallel.
|
||||
* The default value is 8. Decreasing this value increases startup performance, but decreases parallel parsing performance.
|
||||
*/
|
||||
protected threadCount: number;
|
||||
/**
|
||||
* The termination delay determines how long the parser waits for a thread to finish after a cancellation request.
|
||||
* The default value is 200(ms).
|
||||
*/
|
||||
protected terminationDelay: number;
|
||||
protected workerPool: ParserWorker[];
|
||||
protected queue: Array<Deferred<ParserWorker>>;
|
||||
protected readonly hydrator: Hydrator;
|
||||
constructor(services: LangiumCoreServices);
|
||||
protected initializeWorkers(): void;
|
||||
parse<T extends AstNode>(text: string, cancelToken: CancellationToken): Promise<ParseResult<T>>;
|
||||
protected terminateWorker(worker: ParserWorker): void;
|
||||
protected acquireParserWorker(cancelToken: CancellationToken): Promise<ParserWorker>;
|
||||
protected abstract createWorker(): ParserWorker;
|
||||
}
|
||||
export type WorkerMessagePost = (message: unknown) => void;
|
||||
export type WorkerMessageCallback = (cb: (message: unknown) => void) => void;
|
||||
export declare class ParserWorker {
|
||||
protected readonly sendMessage: WorkerMessagePost;
|
||||
protected readonly _terminate: () => void;
|
||||
protected readonly onReadyEmitter: Emitter<void>;
|
||||
protected deferred: Deferred<ParseResult>;
|
||||
protected _ready: boolean;
|
||||
protected _parsing: boolean;
|
||||
get ready(): boolean;
|
||||
get onReady(): Event<void>;
|
||||
constructor(sendMessage: WorkerMessagePost, onMessage: WorkerMessageCallback, onError: WorkerMessageCallback, terminate: () => void);
|
||||
terminate(): void;
|
||||
lock(): void;
|
||||
unlock(): void;
|
||||
parse(text: string): Promise<ParseResult>;
|
||||
}
|
||||
//# sourceMappingURL=async-parser.d.ts.map
|
||||
1
node_modules/langium/lib/parser/async-parser.d.ts.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/async-parser.d.ts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"async-parser.d.ts","sourceRoot":"","sources":["../../src/parser/async-parser.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAEhF,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,0BAA0B,CAAC;AAClE,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,gBAAgB,CAAC;AAC1D,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AACjD,OAAO,KAAK,EAAE,aAAa,EAAE,WAAW,EAAE,MAAM,qBAAqB,CAAC;AACtE,OAAO,KAAK,EAAE,QAAQ,EAAE,MAAM,2BAA2B,CAAC;AAC1D,OAAO,KAAK,EAAE,KAAK,EAAE,MAAM,mBAAmB,CAAC;AAC/C,OAAO,EAAE,QAAQ,EAAsB,MAAM,2BAA2B,CAAC;AACzE,OAAO,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AAE5C;;;;;;GAMG;AACH,MAAM,WAAW,WAAW;IACxB;;;;;;;;OAQG;IACH,KAAK,CAAC,CAAC,SAAS,OAAO,EAAE,IAAI,EAAE,MAAM,EAAE,WAAW,EAAE,iBAAiB,GAAG,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,CAAC;CACnG;AAED;;;;;GAKG;AACH,qBAAa,kBAAmB,YAAW,WAAW;IAElD,SAAS,CAAC,QAAQ,CAAC,UAAU,EAAE,aAAa,CAAC;gBAEjC,QAAQ,EAAE,mBAAmB;IAIzC,KAAK,CAAC,CAAC,SAAS,OAAO,EAAE,IAAI,EAAE,MAAM,EAAE,YAAY,EAAE,iBAAiB,GAAG,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC;CAGnG;AAED,8BAAsB,2BAA4B,YAAW,WAAW;IAEpE;;;OAGG;IACH,SAAS,CAAC,WAAW,SAAK;IAC1B;;;OAGG;IACH,SAAS,CAAC,gBAAgB,SAAO;IACjC,SAAS,CAAC,UAAU,EAAE,YAAY,EAAE,CAAM;IAC1C,SAAS,CAAC,KAAK,EAAE,KAAK,CAAC,QAAQ,CAAC,YAAY,CAAC,CAAC,CAAM;IAEpD,SAAS,CAAC,QAAQ,CAAC,QAAQ,EAAE,QAAQ,CAAC;gBAE1B,QAAQ,EAAE,mBAAmB;IAIzC,SAAS,CAAC,iBAAiB,IAAI,IAAI;IAgB7B,KAAK,CAAC,CAAC,SAAS,OAAO,EAAE,IAAI,EAAE,MAAM,EAAE,WAAW,EAAE,iBAAiB,GAAG,OAAO,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC;IAwBrG,SAAS,CAAC,eAAe,CAAC,MAAM,EAAE,YAAY,GAAG,IAAI;cAQrC,mBAAmB,CAAC,WAAW,EAAE,iBAAiB,GAAG,OAAO,CAAC,YAAY,CAAC;IAoB1F,SAAS,CAAC,QAAQ,CAAC,YAAY,IAAI,YAAY;CAClD;AAED,MAAM,MAAM,iBAAiB,GAAG,CAAC,OAAO,EAAE,OAAO,KAAK,IAAI,CAAC;AAC3D,MAAM,MAAM,qBAAqB,GAAG,CAAC,EAAE,EAAE,CAAC,OAAO,EAAE,OAAO,KAAK,IAAI,KAAK,IAAI,CAAC;AAE7E,qBAAa,YAAY;IAErB,SAAS,CAAC,QAAQ,CAAC,WAAW,EAAE,iBAAiB,CAAC;IAClD,SAAS,CAAC,QAAQ,CAAC,UAAU,EAAE,MAAM,IAAI,CAAC;IAC1C,SAAS,CAAC,QAAQ,CAAC,cAAc,gBAAuB;IAExD,SAAS,CAAC,QAAQ,wBAA+B;IACjD,SAAS,CAAC,MAAM,UAAQ;IACxB,SAAS,CAAC,QAAQ,UAAS;IAE3B,IAAI,KAAK,IAAI,OAAO,CAEnB;IAED,IAAI,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAEzB;gBAEW,WAAW,EAAE,iBAAiB,EAAE,SAAS,EAAE,qBAAqB,EAAE,OAAO,EAAE,qBAAqB,EAAE,SAAS,EAAE,MAAM,IAAI;IAcnI,SAAS,IAAI,IAAI;IAKjB,IAAI,IAAI,IAAI;IAIZ,MAAM,IAAI,IAAI;IAMd,KAAK,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,CAAC,WAAW,CAAC;CAS5C"}
|
||||
149
node_modules/langium/lib/parser/async-parser.js
generated
vendored
Normal file
149
node_modules/langium/lib/parser/async-parser.js
generated
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2023 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import { Deferred, OperationCancelled } from '../utils/promise-utils.js';
|
||||
import { Emitter } from '../utils/event.js';
|
||||
/**
|
||||
* Default implementation of the async parser which simply wraps the sync parser in a promise.
|
||||
*
|
||||
* @remarks
|
||||
* A real implementation would create worker threads or web workers to offload the parsing work.
|
||||
*/
|
||||
export class DefaultAsyncParser {
|
||||
constructor(services) {
|
||||
this.syncParser = services.parser.LangiumParser;
|
||||
}
|
||||
parse(text, _cancelToken) {
|
||||
return Promise.resolve(this.syncParser.parse(text));
|
||||
}
|
||||
}
|
||||
export class AbstractThreadedAsyncParser {
|
||||
constructor(services) {
|
||||
/**
|
||||
* The thread count determines how many threads are used to parse files in parallel.
|
||||
* The default value is 8. Decreasing this value increases startup performance, but decreases parallel parsing performance.
|
||||
*/
|
||||
this.threadCount = 8;
|
||||
/**
|
||||
* The termination delay determines how long the parser waits for a thread to finish after a cancellation request.
|
||||
* The default value is 200(ms).
|
||||
*/
|
||||
this.terminationDelay = 200;
|
||||
this.workerPool = [];
|
||||
this.queue = [];
|
||||
this.hydrator = services.serializer.Hydrator;
|
||||
}
|
||||
initializeWorkers() {
|
||||
while (this.workerPool.length < this.threadCount) {
|
||||
const worker = this.createWorker();
|
||||
worker.onReady(() => {
|
||||
if (this.queue.length > 0) {
|
||||
const deferred = this.queue.shift();
|
||||
if (deferred) {
|
||||
worker.lock();
|
||||
deferred.resolve(worker);
|
||||
}
|
||||
}
|
||||
});
|
||||
this.workerPool.push(worker);
|
||||
}
|
||||
}
|
||||
async parse(text, cancelToken) {
|
||||
const worker = await this.acquireParserWorker(cancelToken);
|
||||
const deferred = new Deferred();
|
||||
let timeout;
|
||||
// If the cancellation token is requested, we wait for a certain time before terminating the worker.
|
||||
// Since the cancellation token lives longer than the parsing process, we need to dispose the event listener.
|
||||
// Otherwise, we might accidentally terminate the worker after the parsing process has finished.
|
||||
const cancellation = cancelToken.onCancellationRequested(() => {
|
||||
timeout = setTimeout(() => {
|
||||
this.terminateWorker(worker);
|
||||
}, this.terminationDelay);
|
||||
});
|
||||
worker.parse(text).then(result => {
|
||||
const hydrated = this.hydrator.hydrate(result);
|
||||
deferred.resolve(hydrated);
|
||||
}).catch(err => {
|
||||
deferred.reject(err);
|
||||
}).finally(() => {
|
||||
cancellation.dispose();
|
||||
clearTimeout(timeout);
|
||||
});
|
||||
return deferred.promise;
|
||||
}
|
||||
terminateWorker(worker) {
|
||||
worker.terminate();
|
||||
const index = this.workerPool.indexOf(worker);
|
||||
if (index >= 0) {
|
||||
this.workerPool.splice(index, 1);
|
||||
}
|
||||
}
|
||||
async acquireParserWorker(cancelToken) {
|
||||
this.initializeWorkers();
|
||||
for (const worker of this.workerPool) {
|
||||
if (worker.ready) {
|
||||
worker.lock();
|
||||
return worker;
|
||||
}
|
||||
}
|
||||
const deferred = new Deferred();
|
||||
cancelToken.onCancellationRequested(() => {
|
||||
const index = this.queue.indexOf(deferred);
|
||||
if (index >= 0) {
|
||||
this.queue.splice(index, 1);
|
||||
}
|
||||
deferred.reject(OperationCancelled);
|
||||
});
|
||||
this.queue.push(deferred);
|
||||
return deferred.promise;
|
||||
}
|
||||
}
|
||||
export class ParserWorker {
|
||||
get ready() {
|
||||
return this._ready;
|
||||
}
|
||||
get onReady() {
|
||||
return this.onReadyEmitter.event;
|
||||
}
|
||||
constructor(sendMessage, onMessage, onError, terminate) {
|
||||
this.onReadyEmitter = new Emitter();
|
||||
this.deferred = new Deferred();
|
||||
this._ready = true;
|
||||
this._parsing = false;
|
||||
this.sendMessage = sendMessage;
|
||||
this._terminate = terminate;
|
||||
onMessage(result => {
|
||||
const parseResult = result;
|
||||
this.deferred.resolve(parseResult);
|
||||
this.unlock();
|
||||
});
|
||||
onError(error => {
|
||||
this.deferred.reject(error);
|
||||
this.unlock();
|
||||
});
|
||||
}
|
||||
terminate() {
|
||||
this.deferred.reject(OperationCancelled);
|
||||
this._terminate();
|
||||
}
|
||||
lock() {
|
||||
this._ready = false;
|
||||
}
|
||||
unlock() {
|
||||
this._parsing = false;
|
||||
this._ready = true;
|
||||
this.onReadyEmitter.fire();
|
||||
}
|
||||
parse(text) {
|
||||
if (this._parsing) {
|
||||
throw new Error('Parser worker is busy');
|
||||
}
|
||||
this._parsing = true;
|
||||
this.deferred = new Deferred();
|
||||
this.sendMessage(text);
|
||||
return this.deferred.promise;
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=async-parser.js.map
|
||||
1
node_modules/langium/lib/parser/async-parser.js.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/async-parser.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"async-parser.js","sourceRoot":"","sources":["../../src/parser/async-parser.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAQhF,OAAO,EAAE,QAAQ,EAAE,kBAAkB,EAAE,MAAM,2BAA2B,CAAC;AACzE,OAAO,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AAsB5C;;;;;GAKG;AACH,MAAM,OAAO,kBAAkB;IAI3B,YAAY,QAA6B;QACrC,IAAI,CAAC,UAAU,GAAG,QAAQ,CAAC,MAAM,CAAC,aAAa,CAAC;IACpD,CAAC;IAED,KAAK,CAAoB,IAAY,EAAE,YAA+B;QAClE,OAAO,OAAO,CAAC,OAAO,CAAC,IAAI,CAAC,UAAU,CAAC,KAAK,CAAI,IAAI,CAAC,CAAC,CAAC;IAC3D,CAAC;CACJ;AAED,MAAM,OAAgB,2BAA2B;IAiB7C,YAAY,QAA6B;QAfzC;;;WAGG;QACO,gBAAW,GAAG,CAAC,CAAC;QAC1B;;;WAGG;QACO,qBAAgB,GAAG,GAAG,CAAC;QACvB,eAAU,GAAmB,EAAE,CAAC;QAChC,UAAK,GAAkC,EAAE,CAAC;QAKhD,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC,UAAU,CAAC,QAAQ,CAAC;IACjD,CAAC;IAES,iBAAiB;QACvB,OAAO,IAAI,CAAC,UAAU,CAAC,MAAM,GAAG,IAAI,CAAC,WAAW,EAAE,CAAC;YAC/C,MAAM,MAAM,GAAG,IAAI,CAAC,YAAY,EAAE,CAAC;YACnC,MAAM,CAAC,OAAO,CAAC,GAAG,EAAE;gBAChB,IAAI,IAAI,CAAC,KAAK,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;oBACxB,MAAM,QAAQ,GAAG,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC;oBACpC,IAAI,QAAQ,EAAE,CAAC;wBACX,MAAM,CAAC,IAAI,EAAE,CAAC;wBACd,QAAQ,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC;oBAC7B,CAAC;gBACL,CAAC;YACL,CAAC,CAAC,CAAC;YACH,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;QACjC,CAAC;IACL,CAAC;IAED,KAAK,CAAC,KAAK,CAAoB,IAAY,EAAE,WAA8B;QACvE,MAAM,MAAM,GAAG,MAAM,IAAI,CAAC,mBAAmB,CAAC,WAAW,CAAC,CAAC;QAC3D,MAAM,QAAQ,GAAG,IAAI,QAAQ,EAAkB,CAAC;QAChD,IAAI,OAAmC,CAAC;QACxC,oGAAoG;QACpG,6GAA6G;QAC7G,gGAAgG;QAChG,MAAM,YAAY,GAAG,WAAW,CAAC,uBAAuB,CAAC,GAAG,EAAE;YAC1D,OAAO,GAAG,UAAU,CAAC,GAAG,EAAE;gBACtB,IAAI,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC;YACjC,CAAC,EAAE,IAAI,CAAC,gBAAgB,CAAC,CAAC;QAC9B,CAAC,CAAC,CAAC;QACH,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,EAAE;YAC7B,MAAM,QAAQ,GAAG,IAAI,CAAC,QAAQ,CAAC,OAAO,CAAI,MAAM,CAAC,CAAC;YAClD,QAAQ,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC;QAC/B,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,EAAE;YACX,QAAQ,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC;QACzB,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,EAAE;YACZ,YAAY,CAAC,OAAO,EAAE,CAAC;YACvB,YAAY,CAAC,OAAO,CAAC,CAAC;QAC1B,CAAC,CAAC,CAAC;QACH,OAAO,QAAQ,CAAC,OAAO,CAAC;IAC5B,CAAC;IAES,eAAe,CAAC,MAAoB;QAC1C,MAAM,CAAC,SAAS,EAAE,CAAC;QACnB,MAAM,KAAK,GAAG,IAAI,CAAC,UAAU,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC;QAC9C,IAAI,KAAK,IAAI,CAAC,EAAE,CAAC;YACb,IAAI,CAAC,UAAU,CAAC,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC;QACrC,CAAC;IACL,CAAC;IAES,KAAK,CAAC,mBAAmB,CAAC,WAA8B;QAC9D,IAAI,CAAC,iBAAiB,EAAE,CAAC;QACzB,KAAK,MAAM,MAAM,IAAI,IAAI,CAAC,UAAU,EAAE,CAAC;YACnC,IAAI,MAAM,CAAC,KAAK,EAAE,CAAC;gBACf,MAAM,CAAC,IAAI,EAAE,CAAC;gBACd,OAAO,MAAM,CAAC;YAClB,CAAC;QACL,CAAC;QACD,MAAM,QAAQ,GAAG,IAAI,QAAQ,EAAgB,CAAC;QAC9C,WAAW,CAAC,uBAAuB,CAAC,GAAG,EAAE;YACrC,MAAM,KAAK,GAAG,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC;YAC3C,IAAI,KAAK,IAAI,CAAC,EAAE,CAAC;gBACb,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC;YAChC,CAAC;YACD,QAAQ,CAAC,MAAM,CAAC,kBAAkB,CAAC,CAAC;QACxC,CAAC,CAAC,CAAC;QACH,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QAC1B,OAAO,QAAQ,CAAC,OAAO,CAAC;IAC5B,CAAC;CAGJ;AAKD,MAAM,OAAO,YAAY;IAUrB,IAAI,KAAK;QACL,OAAO,IAAI,CAAC,MAAM,CAAC;IACvB,CAAC;IAED,IAAI,OAAO;QACP,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC;IACrC,CAAC;IAED,YAAY,WAA8B,EAAE,SAAgC,EAAE,OAA8B,EAAE,SAAqB;QAdhH,mBAAc,GAAG,IAAI,OAAO,EAAQ,CAAC;QAE9C,aAAQ,GAAG,IAAI,QAAQ,EAAe,CAAC;QACvC,WAAM,GAAG,IAAI,CAAC;QACd,aAAQ,GAAG,KAAK,CAAC;QAWvB,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;QAC/B,IAAI,CAAC,UAAU,GAAG,SAAS,CAAC;QAC5B,SAAS,CAAC,MAAM,CAAC,EAAE;YACf,MAAM,WAAW,GAAG,MAAqB,CAAC;YAC1C,IAAI,CAAC,QAAQ,CAAC,OAAO,CAAC,WAAW,CAAC,CAAC;YACnC,IAAI,CAAC,MAAM,EAAE,CAAC;QAClB,CAAC,CAAC,CAAC;QACH,OAAO,CAAC,KAAK,CAAC,EAAE;YACZ,IAAI,CAAC,QAAQ,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;YAC5B,IAAI,CAAC,MAAM,EAAE,CAAC;QAClB,CAAC,CAAC,CAAC;IACP,CAAC;IAED,SAAS;QACL,IAAI,CAAC,QAAQ,CAAC,MAAM,CAAC,kBAAkB,CAAC,CAAC;QACzC,IAAI,CAAC,UAAU,EAAE,CAAC;IACtB,CAAC;IAED,IAAI;QACA,IAAI,CAAC,MAAM,GAAG,KAAK,CAAC;IACxB,CAAC;IAED,MAAM;QACF,IAAI,CAAC,QAAQ,GAAG,KAAK,CAAC;QACtB,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC;QACnB,IAAI,CAAC,cAAc,CAAC,IAAI,EAAE,CAAC;IAC/B,CAAC;IAED,KAAK,CAAC,IAAY;QACd,IAAI,IAAI,CAAC,QAAQ,EAAE,CAAC;YAChB,MAAM,IAAI,KAAK,CAAC,uBAAuB,CAAC,CAAC;QAC7C,CAAC;QACD,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC;QACrB,IAAI,CAAC,QAAQ,GAAG,IAAI,QAAQ,EAAE,CAAC;QAC/B,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,CAAC;QACvB,OAAO,IAAI,CAAC,QAAQ,CAAC,OAAO,CAAC;IACjC,CAAC;CACJ"}
|
||||
9
node_modules/langium/lib/parser/completion-parser-builder.d.ts
generated
vendored
Normal file
9
node_modules/langium/lib/parser/completion-parser-builder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2022 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import type { LangiumCoreServices } from '../services.js';
|
||||
import { LangiumCompletionParser } from './langium-parser.js';
|
||||
export declare function createCompletionParser(services: LangiumCoreServices): LangiumCompletionParser;
|
||||
//# sourceMappingURL=completion-parser-builder.d.ts.map
|
||||
1
node_modules/langium/lib/parser/completion-parser-builder.d.ts.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/completion-parser-builder.d.ts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"completion-parser-builder.d.ts","sourceRoot":"","sources":["../../src/parser/completion-parser-builder.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAEhF,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,gBAAgB,CAAC;AAC1D,OAAO,EAAE,uBAAuB,EAAE,MAAM,qBAAqB,CAAC;AAG9D,wBAAgB,sBAAsB,CAAC,QAAQ,EAAE,mBAAmB,GAAG,uBAAuB,CAO7F"}
|
||||
16
node_modules/langium/lib/parser/completion-parser-builder.js
generated
vendored
Normal file
16
node_modules/langium/lib/parser/completion-parser-builder.js
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2022 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import { LangiumCompletionParser } from './langium-parser.js';
|
||||
import { createParser } from './parser-builder-base.js';
|
||||
export function createCompletionParser(services) {
|
||||
const grammar = services.Grammar;
|
||||
const lexer = services.parser.Lexer;
|
||||
const parser = new LangiumCompletionParser(services);
|
||||
createParser(grammar, parser, lexer.definition);
|
||||
parser.finalize();
|
||||
return parser;
|
||||
}
|
||||
//# sourceMappingURL=completion-parser-builder.js.map
|
||||
1
node_modules/langium/lib/parser/completion-parser-builder.js.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/completion-parser-builder.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"completion-parser-builder.js","sourceRoot":"","sources":["../../src/parser/completion-parser-builder.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAGhF,OAAO,EAAE,uBAAuB,EAAE,MAAM,qBAAqB,CAAC;AAC9D,OAAO,EAAE,YAAY,EAAE,MAAM,0BAA0B,CAAC;AAExD,MAAM,UAAU,sBAAsB,CAAC,QAA6B;IAChE,MAAM,OAAO,GAAG,QAAQ,CAAC,OAAO,CAAC;IACjC,MAAM,KAAK,GAAG,QAAQ,CAAC,MAAM,CAAC,KAAK,CAAC;IACpC,MAAM,MAAM,GAAG,IAAI,uBAAuB,CAAC,QAAQ,CAAC,CAAC;IACrD,YAAY,CAAC,OAAO,EAAE,MAAM,EAAE,KAAK,CAAC,UAAU,CAAC,CAAC;IAChD,MAAM,CAAC,QAAQ,EAAE,CAAC;IAClB,OAAO,MAAM,CAAC;AAClB,CAAC"}
|
||||
76
node_modules/langium/lib/parser/cst-node-builder.d.ts
generated
vendored
Normal file
76
node_modules/langium/lib/parser/cst-node-builder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2021 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import type { IToken, TokenType } from 'chevrotain';
|
||||
import type { Range } from 'vscode-languageserver-types';
|
||||
import type { AbstractElement } from '../languages/generated/ast.js';
|
||||
import type { AstNode, CompositeCstNode, CstNode, LeafCstNode, RootCstNode } from '../syntax-tree.js';
|
||||
export declare class CstNodeBuilder {
|
||||
private rootNode;
|
||||
private nodeStack;
|
||||
get current(): CompositeCstNodeImpl;
|
||||
buildRootNode(input: string): RootCstNode;
|
||||
buildCompositeNode(feature: AbstractElement): CompositeCstNode;
|
||||
buildLeafNode(token: IToken, feature?: AbstractElement): LeafCstNode;
|
||||
removeNode(node: CstNode): void;
|
||||
addHiddenNodes(tokens: IToken[]): void;
|
||||
construct(item: {
|
||||
$type: string | symbol | undefined;
|
||||
$cstNode: CstNode;
|
||||
}): void;
|
||||
}
|
||||
export declare abstract class AbstractCstNode implements CstNode {
|
||||
abstract get offset(): number;
|
||||
abstract get length(): number;
|
||||
abstract get end(): number;
|
||||
abstract get range(): Range;
|
||||
container?: CompositeCstNode;
|
||||
grammarSource?: AbstractElement;
|
||||
root: RootCstNode;
|
||||
private _astNode?;
|
||||
/** @deprecated use `container` instead. */
|
||||
get parent(): CompositeCstNode | undefined;
|
||||
/** @deprecated use `grammarSource` instead. */
|
||||
get feature(): AbstractElement | undefined;
|
||||
get hidden(): boolean;
|
||||
get astNode(): AstNode;
|
||||
set astNode(value: AstNode | undefined);
|
||||
/** @deprecated use `astNode` instead. */
|
||||
get element(): AstNode;
|
||||
get text(): string;
|
||||
}
|
||||
export declare class LeafCstNodeImpl extends AbstractCstNode implements LeafCstNode {
|
||||
get offset(): number;
|
||||
get length(): number;
|
||||
get end(): number;
|
||||
get hidden(): boolean;
|
||||
get tokenType(): TokenType;
|
||||
get range(): Range;
|
||||
private _hidden;
|
||||
private _offset;
|
||||
private _length;
|
||||
private _range;
|
||||
private _tokenType;
|
||||
constructor(offset: number, length: number, range: Range, tokenType: TokenType, hidden?: boolean);
|
||||
}
|
||||
export declare class CompositeCstNodeImpl extends AbstractCstNode implements CompositeCstNode {
|
||||
readonly content: CstNode[];
|
||||
private _rangeCache?;
|
||||
/** @deprecated use `content` instead. */
|
||||
get children(): CstNode[];
|
||||
get offset(): number;
|
||||
get length(): number;
|
||||
get end(): number;
|
||||
get range(): Range;
|
||||
private get firstNonHiddenNode();
|
||||
private get lastNonHiddenNode();
|
||||
}
|
||||
export declare class RootCstNodeImpl extends CompositeCstNodeImpl implements RootCstNode {
|
||||
private _text;
|
||||
get text(): string;
|
||||
get fullText(): string;
|
||||
constructor(input?: string);
|
||||
}
|
||||
//# sourceMappingURL=cst-node-builder.d.ts.map
|
||||
1
node_modules/langium/lib/parser/cst-node-builder.d.ts.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/cst-node-builder.d.ts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"cst-node-builder.d.ts","sourceRoot":"","sources":["../../src/parser/cst-node-builder.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAEhF,OAAO,KAAK,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,YAAY,CAAC;AACpD,OAAO,KAAK,EAAE,KAAK,EAAE,MAAM,6BAA6B,CAAC;AACzD,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,+BAA+B,CAAC;AACrE,OAAO,KAAK,EAAE,OAAO,EAAE,gBAAgB,EAAE,OAAO,EAAE,WAAW,EAAE,WAAW,EAAE,MAAM,mBAAmB,CAAC;AAItG,qBAAa,cAAc;IAEvB,OAAO,CAAC,QAAQ,CAAmB;IACnC,OAAO,CAAC,SAAS,CAA8B;IAE/C,IAAI,OAAO,IAAI,oBAAoB,CAElC;IAED,aAAa,CAAC,KAAK,EAAE,MAAM,GAAG,WAAW;IAOzC,kBAAkB,CAAC,OAAO,EAAE,eAAe,GAAG,gBAAgB;IAS9D,aAAa,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,WAAW;IAQpE,UAAU,CAAC,IAAI,EAAE,OAAO,GAAG,IAAI;IAU/B,cAAc,CAAC,MAAM,EAAE,MAAM,EAAE,GAAG,IAAI;IAiCtC,SAAS,CAAC,IAAI,EAAE;QAAE,KAAK,EAAE,MAAM,GAAG,MAAM,GAAG,SAAS,CAAC;QAAC,QAAQ,EAAE,OAAO,CAAA;KAAE,GAAG,IAAI;CAenF;AAED,8BAAsB,eAAgB,YAAW,OAAO;IACpD,QAAQ,KAAK,MAAM,IAAI,MAAM,CAAC;IAC9B,QAAQ,KAAK,MAAM,IAAI,MAAM,CAAC;IAC9B,QAAQ,KAAK,GAAG,IAAI,MAAM,CAAC;IAC3B,QAAQ,KAAK,KAAK,IAAI,KAAK,CAAC;IAE5B,SAAS,CAAC,EAAE,gBAAgB,CAAC;IAC7B,aAAa,CAAC,EAAE,eAAe,CAAC;IAChC,IAAI,EAAE,WAAW,CAAC;IAClB,OAAO,CAAC,QAAQ,CAAC,CAAU;IAE3B,2CAA2C;IAC3C,IAAI,MAAM,IAAI,gBAAgB,GAAG,SAAS,CAEzC;IAED,+CAA+C;IAC/C,IAAI,OAAO,IAAI,eAAe,GAAG,SAAS,CAEzC;IAED,IAAI,MAAM,IAAI,OAAO,CAEpB;IAED,IAAI,OAAO,IAAI,OAAO,CAMrB;IAED,IAAI,OAAO,CAAC,KAAK,EAAE,OAAO,GAAG,SAAS,EAErC;IAED,yCAAyC;IACzC,IAAI,OAAO,IAAI,OAAO,CAErB;IAED,IAAI,IAAI,IAAI,MAAM,CAEjB;CACJ;AAED,qBAAa,eAAgB,SAAQ,eAAgB,YAAW,WAAW;IACvE,IAAI,MAAM,IAAI,MAAM,CAEnB;IAED,IAAI,MAAM,IAAI,MAAM,CAEnB;IAED,IAAI,GAAG,IAAI,MAAM,CAEhB;IAED,IAAa,MAAM,IAAI,OAAO,CAE7B;IAED,IAAI,SAAS,IAAI,SAAS,CAEzB;IAED,IAAI,KAAK,IAAI,KAAK,CAEjB;IAED,OAAO,CAAC,OAAO,CAAU;IACzB,OAAO,CAAC,OAAO,CAAS;IACxB,OAAO,CAAC,OAAO,CAAS;IACxB,OAAO,CAAC,MAAM,CAAQ;IACtB,OAAO,CAAC,UAAU,CAAY;gBAElB,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,KAAK,EAAE,KAAK,EAAE,SAAS,EAAE,SAAS,EAAE,MAAM,UAAQ;CAQjG;AAED,qBAAa,oBAAqB,SAAQ,eAAgB,YAAW,gBAAgB;IACjF,QAAQ,CAAC,OAAO,EAAE,OAAO,EAAE,CAA8B;IACzD,OAAO,CAAC,WAAW,CAAC,CAAQ;IAE5B,yCAAyC;IACzC,IAAI,QAAQ,IAAI,OAAO,EAAE,CAExB;IAED,IAAI,MAAM,IAAI,MAAM,CAEnB;IAED,IAAI,MAAM,IAAI,MAAM,CAEnB;IAED,IAAI,GAAG,IAAI,MAAM,CAEhB;IAED,IAAI,KAAK,IAAI,KAAK,CAajB;IAED,OAAO,KAAK,kBAAkB,GAO7B;IAED,OAAO,KAAK,iBAAiB,GAQ5B;CACJ;AAiCD,qBAAa,eAAgB,SAAQ,oBAAqB,YAAW,WAAW;IAC5E,OAAO,CAAC,KAAK,CAAM;IAEnB,IAAa,IAAI,IAAI,MAAM,CAE1B;IAED,IAAI,QAAQ,IAAI,MAAM,CAErB;gBAEW,KAAK,CAAC,EAAE,MAAM;CAI7B"}
|
||||
243
node_modules/langium/lib/parser/cst-node-builder.js
generated
vendored
Normal file
243
node_modules/langium/lib/parser/cst-node-builder.js
generated
vendored
Normal file
@@ -0,0 +1,243 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2021 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import { Position } from 'vscode-languageserver-types';
|
||||
import { tokenToRange } from '../utils/cst-utils.js';
|
||||
export class CstNodeBuilder {
|
||||
constructor() {
|
||||
this.nodeStack = [];
|
||||
}
|
||||
get current() {
|
||||
var _a;
|
||||
return (_a = this.nodeStack[this.nodeStack.length - 1]) !== null && _a !== void 0 ? _a : this.rootNode;
|
||||
}
|
||||
buildRootNode(input) {
|
||||
this.rootNode = new RootCstNodeImpl(input);
|
||||
this.rootNode.root = this.rootNode;
|
||||
this.nodeStack = [this.rootNode];
|
||||
return this.rootNode;
|
||||
}
|
||||
buildCompositeNode(feature) {
|
||||
const compositeNode = new CompositeCstNodeImpl();
|
||||
compositeNode.grammarSource = feature;
|
||||
compositeNode.root = this.rootNode;
|
||||
this.current.content.push(compositeNode);
|
||||
this.nodeStack.push(compositeNode);
|
||||
return compositeNode;
|
||||
}
|
||||
buildLeafNode(token, feature) {
|
||||
const leafNode = new LeafCstNodeImpl(token.startOffset, token.image.length, tokenToRange(token), token.tokenType, !feature);
|
||||
leafNode.grammarSource = feature;
|
||||
leafNode.root = this.rootNode;
|
||||
this.current.content.push(leafNode);
|
||||
return leafNode;
|
||||
}
|
||||
removeNode(node) {
|
||||
const parent = node.container;
|
||||
if (parent) {
|
||||
const index = parent.content.indexOf(node);
|
||||
if (index >= 0) {
|
||||
parent.content.splice(index, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
addHiddenNodes(tokens) {
|
||||
const nodes = [];
|
||||
for (const token of tokens) {
|
||||
const leafNode = new LeafCstNodeImpl(token.startOffset, token.image.length, tokenToRange(token), token.tokenType, true);
|
||||
leafNode.root = this.rootNode;
|
||||
nodes.push(leafNode);
|
||||
}
|
||||
let current = this.current;
|
||||
let added = false;
|
||||
// If we are within a composite node, we add the hidden nodes to the content
|
||||
if (current.content.length > 0) {
|
||||
current.content.push(...nodes);
|
||||
return;
|
||||
}
|
||||
// Otherwise we are at a newly created node
|
||||
// Instead of adding the hidden nodes here, we search for the first parent node with content
|
||||
while (current.container) {
|
||||
const index = current.container.content.indexOf(current);
|
||||
if (index > 0) {
|
||||
// Add the hidden nodes before the current node
|
||||
current.container.content.splice(index, 0, ...nodes);
|
||||
added = true;
|
||||
break;
|
||||
}
|
||||
current = current.container;
|
||||
}
|
||||
// If we arrive at the root node, we add the hidden nodes at the beginning
|
||||
// This is the case if the hidden nodes are the first nodes in the tree
|
||||
if (!added) {
|
||||
this.rootNode.content.unshift(...nodes);
|
||||
}
|
||||
}
|
||||
construct(item) {
|
||||
const current = this.current;
|
||||
// The specified item could be a datatype ($type is symbol) or a fragment ($type is undefined)
|
||||
// Only if the $type is a string, we actually assign the element
|
||||
if (typeof item.$type === 'string') {
|
||||
this.current.astNode = item;
|
||||
}
|
||||
item.$cstNode = current;
|
||||
const node = this.nodeStack.pop();
|
||||
// Empty composite nodes are not valid
|
||||
// Simply remove the node from the tree
|
||||
if ((node === null || node === void 0 ? void 0 : node.content.length) === 0) {
|
||||
this.removeNode(node);
|
||||
}
|
||||
}
|
||||
}
|
||||
export class AbstractCstNode {
|
||||
/** @deprecated use `container` instead. */
|
||||
get parent() {
|
||||
return this.container;
|
||||
}
|
||||
/** @deprecated use `grammarSource` instead. */
|
||||
get feature() {
|
||||
return this.grammarSource;
|
||||
}
|
||||
get hidden() {
|
||||
return false;
|
||||
}
|
||||
get astNode() {
|
||||
var _a, _b;
|
||||
const node = typeof ((_a = this._astNode) === null || _a === void 0 ? void 0 : _a.$type) === 'string' ? this._astNode : (_b = this.container) === null || _b === void 0 ? void 0 : _b.astNode;
|
||||
if (!node) {
|
||||
throw new Error('This node has no associated AST element');
|
||||
}
|
||||
return node;
|
||||
}
|
||||
set astNode(value) {
|
||||
this._astNode = value;
|
||||
}
|
||||
/** @deprecated use `astNode` instead. */
|
||||
get element() {
|
||||
return this.astNode;
|
||||
}
|
||||
get text() {
|
||||
return this.root.fullText.substring(this.offset, this.end);
|
||||
}
|
||||
}
|
||||
export class LeafCstNodeImpl extends AbstractCstNode {
|
||||
get offset() {
|
||||
return this._offset;
|
||||
}
|
||||
get length() {
|
||||
return this._length;
|
||||
}
|
||||
get end() {
|
||||
return this._offset + this._length;
|
||||
}
|
||||
get hidden() {
|
||||
return this._hidden;
|
||||
}
|
||||
get tokenType() {
|
||||
return this._tokenType;
|
||||
}
|
||||
get range() {
|
||||
return this._range;
|
||||
}
|
||||
constructor(offset, length, range, tokenType, hidden = false) {
|
||||
super();
|
||||
this._hidden = hidden;
|
||||
this._offset = offset;
|
||||
this._tokenType = tokenType;
|
||||
this._length = length;
|
||||
this._range = range;
|
||||
}
|
||||
}
|
||||
export class CompositeCstNodeImpl extends AbstractCstNode {
|
||||
constructor() {
|
||||
super(...arguments);
|
||||
this.content = new CstNodeContainer(this);
|
||||
}
|
||||
/** @deprecated use `content` instead. */
|
||||
get children() {
|
||||
return this.content;
|
||||
}
|
||||
get offset() {
|
||||
var _a, _b;
|
||||
return (_b = (_a = this.firstNonHiddenNode) === null || _a === void 0 ? void 0 : _a.offset) !== null && _b !== void 0 ? _b : 0;
|
||||
}
|
||||
get length() {
|
||||
return this.end - this.offset;
|
||||
}
|
||||
get end() {
|
||||
var _a, _b;
|
||||
return (_b = (_a = this.lastNonHiddenNode) === null || _a === void 0 ? void 0 : _a.end) !== null && _b !== void 0 ? _b : 0;
|
||||
}
|
||||
get range() {
|
||||
const firstNode = this.firstNonHiddenNode;
|
||||
const lastNode = this.lastNonHiddenNode;
|
||||
if (firstNode && lastNode) {
|
||||
if (this._rangeCache === undefined) {
|
||||
const { range: firstRange } = firstNode;
|
||||
const { range: lastRange } = lastNode;
|
||||
this._rangeCache = { start: firstRange.start, end: lastRange.end.line < firstRange.start.line ? firstRange.start : lastRange.end };
|
||||
}
|
||||
return this._rangeCache;
|
||||
}
|
||||
else {
|
||||
return { start: Position.create(0, 0), end: Position.create(0, 0) };
|
||||
}
|
||||
}
|
||||
get firstNonHiddenNode() {
|
||||
for (const child of this.content) {
|
||||
if (!child.hidden) {
|
||||
return child;
|
||||
}
|
||||
}
|
||||
return this.content[0];
|
||||
}
|
||||
get lastNonHiddenNode() {
|
||||
for (let i = this.content.length - 1; i >= 0; i--) {
|
||||
const child = this.content[i];
|
||||
if (!child.hidden) {
|
||||
return child;
|
||||
}
|
||||
}
|
||||
return this.content[this.content.length - 1];
|
||||
}
|
||||
}
|
||||
class CstNodeContainer extends Array {
|
||||
constructor(parent) {
|
||||
super();
|
||||
this.parent = parent;
|
||||
Object.setPrototypeOf(this, CstNodeContainer.prototype);
|
||||
}
|
||||
push(...items) {
|
||||
this.addParents(items);
|
||||
return super.push(...items);
|
||||
}
|
||||
unshift(...items) {
|
||||
this.addParents(items);
|
||||
return super.unshift(...items);
|
||||
}
|
||||
splice(start, count, ...items) {
|
||||
this.addParents(items);
|
||||
return super.splice(start, count, ...items);
|
||||
}
|
||||
addParents(items) {
|
||||
for (const item of items) {
|
||||
item.container = this.parent;
|
||||
}
|
||||
}
|
||||
}
|
||||
export class RootCstNodeImpl extends CompositeCstNodeImpl {
|
||||
get text() {
|
||||
return this._text.substring(this.offset, this.end);
|
||||
}
|
||||
get fullText() {
|
||||
return this._text;
|
||||
}
|
||||
constructor(input) {
|
||||
super();
|
||||
this._text = '';
|
||||
this._text = input !== null && input !== void 0 ? input : '';
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=cst-node-builder.js.map
|
||||
1
node_modules/langium/lib/parser/cst-node-builder.js.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/cst-node-builder.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
183
node_modules/langium/lib/parser/indentation-aware.d.ts
generated
vendored
Normal file
183
node_modules/langium/lib/parser/indentation-aware.d.ts
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2024 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import type { CustomPatternMatcherFunc, TokenType, IToken, TokenVocabulary } from 'chevrotain';
|
||||
import type { Grammar, TerminalRule } from '../languages/generated/ast.js';
|
||||
import type { LexingReport, TokenBuilderOptions } from './token-builder.js';
|
||||
import type { LexerResult, TokenizeOptions } from './lexer.js';
|
||||
import type { LangiumCoreServices } from '../services.js';
|
||||
import { DefaultTokenBuilder } from './token-builder.js';
|
||||
import { DefaultLexer } from './lexer.js';
|
||||
type IndentationAwareDelimiter<TokenName extends string> = [begin: TokenName, end: TokenName];
|
||||
export interface IndentationTokenBuilderOptions<TerminalName extends string = string, KeywordName extends string = string> {
|
||||
/**
|
||||
* The name of the token used to denote indentation in the grammar.
|
||||
* A possible definition in the grammar could look like this:
|
||||
* ```langium
|
||||
* terminal INDENT: ':synthetic-indent:';
|
||||
* ```
|
||||
*
|
||||
* @default 'INDENT'
|
||||
*/
|
||||
indentTokenName: TerminalName;
|
||||
/**
|
||||
* The name of the token used to denote deindentation in the grammar.
|
||||
* A possible definition in the grammar could look like this:
|
||||
* ```langium
|
||||
* terminal DEDENT: ':synthetic-dedent:';
|
||||
* ```
|
||||
*
|
||||
* @default 'DEDENT'
|
||||
*/
|
||||
dedentTokenName: TerminalName;
|
||||
/**
|
||||
* The name of the token used to denote whitespace other than indentation and newlines in the grammar.
|
||||
* A possible definition in the grammar could look like this:
|
||||
* ```langium
|
||||
* hidden terminal WS: /[ \t]+/;
|
||||
* ```
|
||||
*
|
||||
* @default 'WS'
|
||||
*/
|
||||
whitespaceTokenName: TerminalName;
|
||||
/**
|
||||
* The delimiter tokens inside of which indentation should be ignored and treated as normal whitespace.
|
||||
* For example, Python doesn't treat any whitespace between `(` and `)` as significant.
|
||||
*
|
||||
* Can be either terminal tokens or keyword tokens.
|
||||
*
|
||||
* @default []
|
||||
*/
|
||||
ignoreIndentationDelimiters: Array<IndentationAwareDelimiter<TerminalName | KeywordName>>;
|
||||
}
|
||||
export declare const indentationBuilderDefaultOptions: IndentationTokenBuilderOptions;
|
||||
export declare enum LexingMode {
|
||||
REGULAR = "indentation-sensitive",
|
||||
IGNORE_INDENTATION = "ignore-indentation"
|
||||
}
|
||||
export interface IndentationLexingReport extends LexingReport {
|
||||
/** Dedent tokens that are necessary to close the remaining indents. */
|
||||
remainingDedents: IToken[];
|
||||
}
|
||||
/**
|
||||
* A token builder that is sensitive to indentation in the input text.
|
||||
* It will generate tokens for indentation and dedentation based on the indentation level.
|
||||
*
|
||||
* The first generic parameter corresponds to the names of terminal tokens,
|
||||
* while the second one corresponds to the names of keyword tokens.
|
||||
* Both parameters are optional and can be imported from `./generated/ast.js`.
|
||||
*
|
||||
* Inspired by https://github.com/chevrotain/chevrotain/blob/master/examples/lexer/python_indentation/python_indentation.js
|
||||
*/
|
||||
export declare class IndentationAwareTokenBuilder<Terminals extends string = string, KeywordName extends string = string> extends DefaultTokenBuilder {
|
||||
/**
|
||||
* The stack stores all the previously matched indentation levels to understand how deeply the next tokens are nested.
|
||||
* The stack is valid for lexing
|
||||
*/
|
||||
protected indentationStack: number[];
|
||||
readonly options: IndentationTokenBuilderOptions<Terminals, KeywordName>;
|
||||
/**
|
||||
* The token type to be used for indentation tokens
|
||||
*/
|
||||
readonly indentTokenType: TokenType;
|
||||
/**
|
||||
* The token type to be used for dedentation tokens
|
||||
*/
|
||||
readonly dedentTokenType: TokenType;
|
||||
/**
|
||||
* A regular expression to match a series of tabs and/or spaces.
|
||||
* Override this to customize what the indentation is allowed to consist of.
|
||||
*/
|
||||
protected whitespaceRegExp: RegExp;
|
||||
constructor(options?: Partial<IndentationTokenBuilderOptions<NoInfer<Terminals>, NoInfer<KeywordName>>>);
|
||||
buildTokens(grammar: Grammar, options?: TokenBuilderOptions | undefined): TokenVocabulary;
|
||||
flushLexingReport(text: string): IndentationLexingReport;
|
||||
/**
|
||||
* Helper function to check if the current position is the start of a new line.
|
||||
*
|
||||
* @param text The full input string.
|
||||
* @param offset The current position at which to check
|
||||
* @returns Whether the current position is the start of a new line
|
||||
*/
|
||||
protected isStartOfLine(text: string, offset: number): boolean;
|
||||
/**
|
||||
* A helper function used in matching both indents and dedents.
|
||||
*
|
||||
* @param text The full input string.
|
||||
* @param offset The current position at which to attempt a match
|
||||
* @param tokens Previously scanned tokens
|
||||
* @param groups Token Groups
|
||||
* @returns The current and previous indentation levels and the matched whitespace
|
||||
*/
|
||||
protected matchWhitespace(text: string, offset: number, tokens: IToken[], groups: Record<string, IToken[]>): {
|
||||
currIndentLevel: number;
|
||||
prevIndentLevel: number;
|
||||
match: RegExpExecArray | null;
|
||||
};
|
||||
/**
|
||||
* Helper function to create an instance of an indentation token.
|
||||
*
|
||||
* @param tokenType Indent or dedent token type
|
||||
* @param text Full input string, used to calculate the line number
|
||||
* @param image The original image of the token (tabs or spaces)
|
||||
* @param offset Current position in the input string
|
||||
* @returns The indentation token instance
|
||||
*/
|
||||
protected createIndentationTokenInstance(tokenType: TokenType, text: string, image: string, offset: number): IToken;
|
||||
/**
|
||||
* Helper function to get the line number at a given offset.
|
||||
*
|
||||
* @param text Full input string, used to calculate the line number
|
||||
* @param offset Current position in the input string
|
||||
* @returns The line number at the given offset
|
||||
*/
|
||||
protected getLineNumber(text: string, offset: number): number;
|
||||
/**
|
||||
* A custom pattern for matching indents
|
||||
*
|
||||
* @param text The full input string.
|
||||
* @param offset The offset at which to attempt a match
|
||||
* @param tokens Previously scanned tokens
|
||||
* @param groups Token Groups
|
||||
*/
|
||||
protected indentMatcher(text: string, offset: number, tokens: IToken[], groups: Record<string, IToken[]>): ReturnType<CustomPatternMatcherFunc>;
|
||||
/**
|
||||
* A custom pattern for matching dedents
|
||||
*
|
||||
* @param text The full input string.
|
||||
* @param offset The offset at which to attempt a match
|
||||
* @param tokens Previously scanned tokens
|
||||
* @param groups Token Groups
|
||||
*/
|
||||
protected dedentMatcher(text: string, offset: number, tokens: IToken[], groups: Record<string, IToken[]>): ReturnType<CustomPatternMatcherFunc>;
|
||||
protected buildTerminalToken(terminal: TerminalRule): TokenType;
|
||||
/**
|
||||
* Resets the indentation stack between different runs of the lexer
|
||||
*
|
||||
* @param text Full text that was tokenized
|
||||
* @returns Remaining dedent tokens to match all previous indents at the end of the file
|
||||
*/
|
||||
flushRemainingDedents(text: string): IToken[];
|
||||
}
|
||||
/**
|
||||
* A lexer that is aware of indentation in the input text.
|
||||
* The only purpose of this lexer is to reset the internal state of the {@link IndentationAwareTokenBuilder}
|
||||
* between the tokenization of different text inputs.
|
||||
*
|
||||
* In your module, you can override the default lexer with this one as such:
|
||||
* ```ts
|
||||
* parser: {
|
||||
* TokenBuilder: () => new IndentationAwareTokenBuilder(),
|
||||
* Lexer: (services) => new IndentationAwareLexer(services),
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
export declare class IndentationAwareLexer extends DefaultLexer {
|
||||
protected readonly indentationTokenBuilder: IndentationAwareTokenBuilder;
|
||||
constructor(services: LangiumCoreServices);
|
||||
tokenize(text: string, options?: TokenizeOptions): LexerResult;
|
||||
}
|
||||
export {};
|
||||
//# sourceMappingURL=indentation-aware.d.ts.map
|
||||
1
node_modules/langium/lib/parser/indentation-aware.d.ts.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/indentation-aware.d.ts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"indentation-aware.d.ts","sourceRoot":"","sources":["../../src/parser/indentation-aware.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAEhF,OAAO,KAAK,EAAE,wBAAwB,EAAE,SAAS,EAAE,MAAM,EAA6B,eAAe,EAAE,MAAM,YAAY,CAAC;AAC1H,OAAO,KAAK,EAAE,OAAO,EAAE,YAAY,EAAE,MAAM,+BAA+B,CAAC;AAC3E,OAAO,KAAK,EAAE,YAAY,EAAE,mBAAmB,EAAE,MAAM,oBAAoB,CAAC;AAC5E,OAAO,KAAK,EAAE,WAAW,EAAE,eAAe,EAAE,MAAM,YAAY,CAAC;AAC/D,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,gBAAgB,CAAC;AAE1D,OAAO,EAAE,mBAAmB,EAAE,MAAM,oBAAoB,CAAC;AACzD,OAAO,EAA4B,YAAY,EAAoB,MAAM,YAAY,CAAC;AAEtF,KAAK,yBAAyB,CAAC,SAAS,SAAS,MAAM,IAAI,CAAC,KAAK,EAAE,SAAS,EAAE,GAAG,EAAE,SAAS,CAAC,CAAC;AAE9F,MAAM,WAAW,8BAA8B,CAAC,YAAY,SAAS,MAAM,GAAG,MAAM,EAAE,WAAW,SAAS,MAAM,GAAG,MAAM;IACrH;;;;;;;;OAQG;IACH,eAAe,EAAE,YAAY,CAAC;IAC9B;;;;;;;;OAQG;IACH,eAAe,EAAE,YAAY,CAAC;IAC9B;;;;;;;;OAQG;IACH,mBAAmB,EAAE,YAAY,CAAC;IAClC;;;;;;;OAOG;IACH,2BAA2B,EAAE,KAAK,CAAC,yBAAyB,CAAC,YAAY,GAAG,WAAW,CAAC,CAAC,CAAA;CAC5F;AAED,eAAO,MAAM,gCAAgC,EAAE,8BAK9C,CAAC;AAEF,oBAAY,UAAU;IAClB,OAAO,0BAA0B;IACjC,kBAAkB,uBAAuB;CAC5C;AAED,MAAM,WAAW,uBAAwB,SAAQ,YAAY;IACzD,uEAAuE;IACvE,gBAAgB,EAAE,MAAM,EAAE,CAAC;CAC9B;AAED;;;;;;;;;GASG;AACH,qBAAa,4BAA4B,CAAC,SAAS,SAAS,MAAM,GAAG,MAAM,EAAE,WAAW,SAAS,MAAM,GAAG,MAAM,CAAE,SAAQ,mBAAmB;IACzI;;;OAGG;IACH,SAAS,CAAC,gBAAgB,EAAE,MAAM,EAAE,CAAO;IAE3C,QAAQ,CAAC,OAAO,EAAE,8BAA8B,CAAC,SAAS,EAAE,WAAW,CAAC,CAAC;IAEzE;;OAEG;IACH,QAAQ,CAAC,eAAe,EAAE,SAAS,CAAC;IAEpC;;OAEG;IACH,QAAQ,CAAC,eAAe,EAAE,SAAS,CAAC;IAEpC;;;OAGG;IACH,SAAS,CAAC,gBAAgB,SAAa;gBAE3B,OAAO,GAAE,OAAO,CAAC,8BAA8B,CAAC,OAAO,CAAC,SAAS,CAAC,EAAE,OAAO,CAAC,WAAW,CAAC,CAAC,CAA8F;IAoB1L,WAAW,CAAC,OAAO,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,mBAAmB,GAAG,SAAS,GAAG,eAAe;IAkDzF,iBAAiB,CAAC,IAAI,EAAE,MAAM,GAAG,uBAAuB;IAQjE;;;;;;OAMG;IACH,SAAS,CAAC,aAAa,CAAC,IAAI,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,GAAG,OAAO;IAI9D;;;;;;;;OAQG;IAEH,SAAS,CAAC,eAAe,CAAC,IAAI,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,EAAE,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,GAAG;QAAE,eAAe,EAAE,MAAM,CAAC;QAAC,eAAe,EAAE,MAAM,CAAC;QAAC,KAAK,EAAE,eAAe,GAAG,IAAI,CAAA;KAAE;IAUhM;;;;;;;;OAQG;IACH,SAAS,CAAC,8BAA8B,CAAC,SAAS,EAAE,SAAS,EAAE,IAAI,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,GAAG,MAAM;IAWnH;;;;;;OAMG;IACH,SAAS,CAAC,aAAa,CAAC,IAAI,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,GAAG,MAAM;IAI7D;;;;;;;OAOG;IACH,SAAS,CAAC,aAAa,CAAC,IAAI,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,EAAE,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,GAAG,UAAU,CAAC,wBAAwB,CAAC;IAkB/I;;;;;;;OAOG;IACH,SAAS,CAAC,aAAa,CAAC,IAAI,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,EAAE,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,GAAG,UAAU,CAAC,wBAAwB,CAAC;cA8C5H,kBAAkB,CAAC,QAAQ,EAAE,YAAY,GAAG,SAAS;IAkBxE;;;;;OAKG;IACH,qBAAqB,CAAC,IAAI,EAAE,MAAM,GAAG,MAAM,EAAE;CAYhD;AAED;;;;;;;;;;;;GAYG;AACH,qBAAa,qBAAsB,SAAQ,YAAY;IAEnD,SAAS,CAAC,QAAQ,CAAC,uBAAuB,EAAE,4BAA4B,CAAC;gBAE7D,QAAQ,EAAE,mBAAmB;IAShC,QAAQ,CAAC,IAAI,EAAE,MAAM,EAAE,OAAO,GAAE,eAA0C,GAAG,WAAW;CAqCpG"}
|
||||
318
node_modules/langium/lib/parser/indentation-aware.js
generated
vendored
Normal file
318
node_modules/langium/lib/parser/indentation-aware.js
generated
vendored
Normal file
@@ -0,0 +1,318 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2024 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import { createToken, createTokenInstance, Lexer } from 'chevrotain';
|
||||
import { DefaultTokenBuilder } from './token-builder.js';
|
||||
import { DEFAULT_TOKENIZE_OPTIONS, DefaultLexer, isTokenTypeArray } from './lexer.js';
|
||||
export const indentationBuilderDefaultOptions = {
|
||||
indentTokenName: 'INDENT',
|
||||
dedentTokenName: 'DEDENT',
|
||||
whitespaceTokenName: 'WS',
|
||||
ignoreIndentationDelimiters: [],
|
||||
};
|
||||
export var LexingMode;
|
||||
(function (LexingMode) {
|
||||
LexingMode["REGULAR"] = "indentation-sensitive";
|
||||
LexingMode["IGNORE_INDENTATION"] = "ignore-indentation";
|
||||
})(LexingMode || (LexingMode = {}));
|
||||
/**
|
||||
* A token builder that is sensitive to indentation in the input text.
|
||||
* It will generate tokens for indentation and dedentation based on the indentation level.
|
||||
*
|
||||
* The first generic parameter corresponds to the names of terminal tokens,
|
||||
* while the second one corresponds to the names of keyword tokens.
|
||||
* Both parameters are optional and can be imported from `./generated/ast.js`.
|
||||
*
|
||||
* Inspired by https://github.com/chevrotain/chevrotain/blob/master/examples/lexer/python_indentation/python_indentation.js
|
||||
*/
|
||||
export class IndentationAwareTokenBuilder extends DefaultTokenBuilder {
|
||||
constructor(options = indentationBuilderDefaultOptions) {
|
||||
super();
|
||||
/**
|
||||
* The stack stores all the previously matched indentation levels to understand how deeply the next tokens are nested.
|
||||
* The stack is valid for lexing
|
||||
*/
|
||||
this.indentationStack = [0];
|
||||
/**
|
||||
* A regular expression to match a series of tabs and/or spaces.
|
||||
* Override this to customize what the indentation is allowed to consist of.
|
||||
*/
|
||||
this.whitespaceRegExp = /[ \t]+/y;
|
||||
this.options = Object.assign(Object.assign({}, indentationBuilderDefaultOptions), options);
|
||||
this.indentTokenType = createToken({
|
||||
name: this.options.indentTokenName,
|
||||
pattern: this.indentMatcher.bind(this),
|
||||
line_breaks: false,
|
||||
});
|
||||
this.dedentTokenType = createToken({
|
||||
name: this.options.dedentTokenName,
|
||||
pattern: this.dedentMatcher.bind(this),
|
||||
line_breaks: false,
|
||||
});
|
||||
}
|
||||
buildTokens(grammar, options) {
|
||||
const tokenTypes = super.buildTokens(grammar, options);
|
||||
if (!isTokenTypeArray(tokenTypes)) {
|
||||
throw new Error('Invalid tokens built by default builder');
|
||||
}
|
||||
const { indentTokenName, dedentTokenName, whitespaceTokenName, ignoreIndentationDelimiters } = this.options;
|
||||
// Rearrange tokens because whitespace (which is ignored) goes to the beginning by default, consuming indentation as well
|
||||
// Order should be: dedent, indent, spaces
|
||||
let dedent;
|
||||
let indent;
|
||||
let ws;
|
||||
const otherTokens = [];
|
||||
for (const tokenType of tokenTypes) {
|
||||
for (const [begin, end] of ignoreIndentationDelimiters) {
|
||||
if (tokenType.name === begin) {
|
||||
tokenType.PUSH_MODE = LexingMode.IGNORE_INDENTATION;
|
||||
}
|
||||
else if (tokenType.name === end) {
|
||||
tokenType.POP_MODE = true;
|
||||
}
|
||||
}
|
||||
if (tokenType.name === dedentTokenName) {
|
||||
dedent = tokenType;
|
||||
}
|
||||
else if (tokenType.name === indentTokenName) {
|
||||
indent = tokenType;
|
||||
}
|
||||
else if (tokenType.name === whitespaceTokenName) {
|
||||
ws = tokenType;
|
||||
}
|
||||
else {
|
||||
otherTokens.push(tokenType);
|
||||
}
|
||||
}
|
||||
if (!dedent || !indent || !ws) {
|
||||
throw new Error('Some indentation/whitespace tokens not found!');
|
||||
}
|
||||
if (ignoreIndentationDelimiters.length > 0) {
|
||||
const multiModeLexerDef = {
|
||||
modes: {
|
||||
[LexingMode.REGULAR]: [dedent, indent, ...otherTokens, ws],
|
||||
[LexingMode.IGNORE_INDENTATION]: [...otherTokens, ws],
|
||||
},
|
||||
defaultMode: LexingMode.REGULAR,
|
||||
};
|
||||
return multiModeLexerDef;
|
||||
}
|
||||
else {
|
||||
return [dedent, indent, ws, ...otherTokens];
|
||||
}
|
||||
}
|
||||
flushLexingReport(text) {
|
||||
const result = super.flushLexingReport(text);
|
||||
return Object.assign(Object.assign({}, result), { remainingDedents: this.flushRemainingDedents(text) });
|
||||
}
|
||||
/**
|
||||
* Helper function to check if the current position is the start of a new line.
|
||||
*
|
||||
* @param text The full input string.
|
||||
* @param offset The current position at which to check
|
||||
* @returns Whether the current position is the start of a new line
|
||||
*/
|
||||
isStartOfLine(text, offset) {
|
||||
return offset === 0 || '\r\n'.includes(text[offset - 1]);
|
||||
}
|
||||
/**
|
||||
* A helper function used in matching both indents and dedents.
|
||||
*
|
||||
* @param text The full input string.
|
||||
* @param offset The current position at which to attempt a match
|
||||
* @param tokens Previously scanned tokens
|
||||
* @param groups Token Groups
|
||||
* @returns The current and previous indentation levels and the matched whitespace
|
||||
*/
|
||||
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||
matchWhitespace(text, offset, tokens, groups) {
|
||||
var _a;
|
||||
this.whitespaceRegExp.lastIndex = offset;
|
||||
const match = this.whitespaceRegExp.exec(text);
|
||||
return {
|
||||
currIndentLevel: (_a = match === null || match === void 0 ? void 0 : match[0].length) !== null && _a !== void 0 ? _a : 0,
|
||||
prevIndentLevel: this.indentationStack.at(-1),
|
||||
match,
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Helper function to create an instance of an indentation token.
|
||||
*
|
||||
* @param tokenType Indent or dedent token type
|
||||
* @param text Full input string, used to calculate the line number
|
||||
* @param image The original image of the token (tabs or spaces)
|
||||
* @param offset Current position in the input string
|
||||
* @returns The indentation token instance
|
||||
*/
|
||||
createIndentationTokenInstance(tokenType, text, image, offset) {
|
||||
const lineNumber = this.getLineNumber(text, offset);
|
||||
return createTokenInstance(tokenType, image, offset, offset + image.length, lineNumber, lineNumber, 1, image.length);
|
||||
}
|
||||
/**
|
||||
* Helper function to get the line number at a given offset.
|
||||
*
|
||||
* @param text Full input string, used to calculate the line number
|
||||
* @param offset Current position in the input string
|
||||
* @returns The line number at the given offset
|
||||
*/
|
||||
getLineNumber(text, offset) {
|
||||
return text.substring(0, offset).split(/\r\n|\r|\n/).length;
|
||||
}
|
||||
/**
|
||||
* A custom pattern for matching indents
|
||||
*
|
||||
* @param text The full input string.
|
||||
* @param offset The offset at which to attempt a match
|
||||
* @param tokens Previously scanned tokens
|
||||
* @param groups Token Groups
|
||||
*/
|
||||
indentMatcher(text, offset, tokens, groups) {
|
||||
if (!this.isStartOfLine(text, offset)) {
|
||||
return null;
|
||||
}
|
||||
const { currIndentLevel, prevIndentLevel, match } = this.matchWhitespace(text, offset, tokens, groups);
|
||||
if (currIndentLevel <= prevIndentLevel) {
|
||||
// shallower indentation (should be matched by dedent)
|
||||
// or same indentation level (should be matched by whitespace and ignored)
|
||||
return null;
|
||||
}
|
||||
this.indentationStack.push(currIndentLevel);
|
||||
return match;
|
||||
}
|
||||
/**
|
||||
* A custom pattern for matching dedents
|
||||
*
|
||||
* @param text The full input string.
|
||||
* @param offset The offset at which to attempt a match
|
||||
* @param tokens Previously scanned tokens
|
||||
* @param groups Token Groups
|
||||
*/
|
||||
dedentMatcher(text, offset, tokens, groups) {
|
||||
var _a, _b, _c, _d;
|
||||
if (!this.isStartOfLine(text, offset)) {
|
||||
return null;
|
||||
}
|
||||
const { currIndentLevel, prevIndentLevel, match } = this.matchWhitespace(text, offset, tokens, groups);
|
||||
if (currIndentLevel >= prevIndentLevel) {
|
||||
// bigger indentation (should be matched by indent)
|
||||
// or same indentation level (should be matched by whitespace and ignored)
|
||||
return null;
|
||||
}
|
||||
const matchIndentIndex = this.indentationStack.lastIndexOf(currIndentLevel);
|
||||
// Any dedent must match some previous indentation level.
|
||||
if (matchIndentIndex === -1) {
|
||||
this.diagnostics.push({
|
||||
severity: 'error',
|
||||
message: `Invalid dedent level ${currIndentLevel} at offset: ${offset}. Current indentation stack: ${this.indentationStack}`,
|
||||
offset,
|
||||
length: (_b = (_a = match === null || match === void 0 ? void 0 : match[0]) === null || _a === void 0 ? void 0 : _a.length) !== null && _b !== void 0 ? _b : 0,
|
||||
line: this.getLineNumber(text, offset),
|
||||
column: 1
|
||||
});
|
||||
return null;
|
||||
}
|
||||
const numberOfDedents = this.indentationStack.length - matchIndentIndex - 1;
|
||||
const newlinesBeforeDedent = (_d = (_c = text.substring(0, offset).match(/[\r\n]+$/)) === null || _c === void 0 ? void 0 : _c[0].length) !== null && _d !== void 0 ? _d : 1;
|
||||
for (let i = 0; i < numberOfDedents; i++) {
|
||||
const token = this.createIndentationTokenInstance(this.dedentTokenType, text, '', // Dedents are 0-width tokens
|
||||
offset - (newlinesBeforeDedent - 1));
|
||||
tokens.push(token);
|
||||
this.indentationStack.pop();
|
||||
}
|
||||
// Token already added, let the dedentation now be consumed as whitespace (if any) and ignored
|
||||
return null;
|
||||
}
|
||||
buildTerminalToken(terminal) {
|
||||
const tokenType = super.buildTerminalToken(terminal);
|
||||
const { indentTokenName, dedentTokenName, whitespaceTokenName } = this.options;
|
||||
if (tokenType.name === indentTokenName) {
|
||||
return this.indentTokenType;
|
||||
}
|
||||
else if (tokenType.name === dedentTokenName) {
|
||||
return this.dedentTokenType;
|
||||
}
|
||||
else if (tokenType.name === whitespaceTokenName) {
|
||||
return createToken({
|
||||
name: whitespaceTokenName,
|
||||
pattern: this.whitespaceRegExp,
|
||||
group: Lexer.SKIPPED,
|
||||
});
|
||||
}
|
||||
return tokenType;
|
||||
}
|
||||
/**
|
||||
* Resets the indentation stack between different runs of the lexer
|
||||
*
|
||||
* @param text Full text that was tokenized
|
||||
* @returns Remaining dedent tokens to match all previous indents at the end of the file
|
||||
*/
|
||||
flushRemainingDedents(text) {
|
||||
const remainingDedents = [];
|
||||
while (this.indentationStack.length > 1) {
|
||||
remainingDedents.push(this.createIndentationTokenInstance(this.dedentTokenType, text, '', text.length));
|
||||
this.indentationStack.pop();
|
||||
}
|
||||
this.indentationStack = [0];
|
||||
return remainingDedents;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* A lexer that is aware of indentation in the input text.
|
||||
* The only purpose of this lexer is to reset the internal state of the {@link IndentationAwareTokenBuilder}
|
||||
* between the tokenization of different text inputs.
|
||||
*
|
||||
* In your module, you can override the default lexer with this one as such:
|
||||
* ```ts
|
||||
* parser: {
|
||||
* TokenBuilder: () => new IndentationAwareTokenBuilder(),
|
||||
* Lexer: (services) => new IndentationAwareLexer(services),
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
export class IndentationAwareLexer extends DefaultLexer {
|
||||
constructor(services) {
|
||||
super(services);
|
||||
if (services.parser.TokenBuilder instanceof IndentationAwareTokenBuilder) {
|
||||
this.indentationTokenBuilder = services.parser.TokenBuilder;
|
||||
}
|
||||
else {
|
||||
throw new Error('IndentationAwareLexer requires an accompanying IndentationAwareTokenBuilder');
|
||||
}
|
||||
}
|
||||
tokenize(text, options = DEFAULT_TOKENIZE_OPTIONS) {
|
||||
const result = super.tokenize(text);
|
||||
// consuming all remaining dedents and remove them as they might not be serializable
|
||||
const report = result.report;
|
||||
if ((options === null || options === void 0 ? void 0 : options.mode) === 'full') {
|
||||
// auto-complete document with remaining dedents
|
||||
result.tokens.push(...report.remainingDedents);
|
||||
}
|
||||
report.remainingDedents = [];
|
||||
// remove any "indent-dedent" pair with an empty body as these are typically
|
||||
// added by comments or lines with just whitespace but have no real value
|
||||
const { indentTokenType, dedentTokenType } = this.indentationTokenBuilder;
|
||||
// Use tokenTypeIdx for fast comparison
|
||||
const indentTokenIdx = indentTokenType.tokenTypeIdx;
|
||||
const dedentTokenIdx = dedentTokenType.tokenTypeIdx;
|
||||
const cleanTokens = [];
|
||||
const length = result.tokens.length - 1;
|
||||
for (let i = 0; i < length; i++) {
|
||||
const token = result.tokens[i];
|
||||
const nextToken = result.tokens[i + 1];
|
||||
if (token.tokenTypeIdx === indentTokenIdx && nextToken.tokenTypeIdx === dedentTokenIdx) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
cleanTokens.push(token);
|
||||
}
|
||||
// Push last token separately
|
||||
if (length >= 0) {
|
||||
cleanTokens.push(result.tokens[length]);
|
||||
}
|
||||
result.tokens = cleanTokens;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=indentation-aware.js.map
|
||||
1
node_modules/langium/lib/parser/indentation-aware.js.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/indentation-aware.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
17
node_modules/langium/lib/parser/index.d.ts
generated
vendored
Normal file
17
node_modules/langium/lib/parser/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2023 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
export * from './async-parser.js';
|
||||
export * from './completion-parser-builder.js';
|
||||
export * from './cst-node-builder.js';
|
||||
export * from './indentation-aware.js';
|
||||
export * from './langium-parser-builder.js';
|
||||
export * from './langium-parser.js';
|
||||
export * from './lexer.js';
|
||||
export * from './parser-builder-base.js';
|
||||
export * from './parser-config.js';
|
||||
export * from './token-builder.js';
|
||||
export * from './value-converter.js';
|
||||
//# sourceMappingURL=index.d.ts.map
|
||||
1
node_modules/langium/lib/parser/index.d.ts.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/index.d.ts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/parser/index.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAEhF,cAAc,mBAAmB,CAAC;AAClC,cAAc,gCAAgC,CAAC;AAC/C,cAAc,uBAAuB,CAAC;AACtC,cAAc,wBAAwB,CAAC;AACvC,cAAc,6BAA6B,CAAC;AAC5C,cAAc,qBAAqB,CAAC;AACpC,cAAc,YAAY,CAAC;AAC3B,cAAc,0BAA0B,CAAC;AACzC,cAAc,oBAAoB,CAAC;AACnC,cAAc,oBAAoB,CAAC;AACnC,cAAc,sBAAsB,CAAC"}
|
||||
17
node_modules/langium/lib/parser/index.js
generated
vendored
Normal file
17
node_modules/langium/lib/parser/index.js
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2023 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
export * from './async-parser.js';
|
||||
export * from './completion-parser-builder.js';
|
||||
export * from './cst-node-builder.js';
|
||||
export * from './indentation-aware.js';
|
||||
export * from './langium-parser-builder.js';
|
||||
export * from './langium-parser.js';
|
||||
export * from './lexer.js';
|
||||
export * from './parser-builder-base.js';
|
||||
export * from './parser-config.js';
|
||||
export * from './token-builder.js';
|
||||
export * from './value-converter.js';
|
||||
//# sourceMappingURL=index.js.map
|
||||
1
node_modules/langium/lib/parser/index.js.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/index.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/parser/index.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAEhF,cAAc,mBAAmB,CAAC;AAClC,cAAc,gCAAgC,CAAC;AAC/C,cAAc,uBAAuB,CAAC;AACtC,cAAc,wBAAwB,CAAC;AACvC,cAAc,6BAA6B,CAAC;AAC5C,cAAc,qBAAqB,CAAC;AACpC,cAAc,YAAY,CAAC;AAC3B,cAAc,0BAA0B,CAAC;AACzC,cAAc,oBAAoB,CAAC;AACnC,cAAc,oBAAoB,CAAC;AACnC,cAAc,sBAAsB,CAAC"}
|
||||
18
node_modules/langium/lib/parser/langium-parser-builder.d.ts
generated
vendored
Normal file
18
node_modules/langium/lib/parser/langium-parser-builder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2021 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import type { LangiumCoreServices } from '../services.js';
|
||||
import { LangiumParser } from './langium-parser.js';
|
||||
/**
|
||||
* Create and finalize a Langium parser. The parser rules are derived from the grammar, which is
|
||||
* available at `services.Grammar`.
|
||||
*/
|
||||
export declare function createLangiumParser(services: LangiumCoreServices): LangiumParser;
|
||||
/**
|
||||
* Create a Langium parser without finalizing it. This is used to extract more detailed error
|
||||
* information when the parser is initially validated.
|
||||
*/
|
||||
export declare function prepareLangiumParser(services: LangiumCoreServices): LangiumParser;
|
||||
//# sourceMappingURL=langium-parser-builder.d.ts.map
|
||||
1
node_modules/langium/lib/parser/langium-parser-builder.d.ts.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/langium-parser-builder.d.ts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"langium-parser-builder.d.ts","sourceRoot":"","sources":["../../src/parser/langium-parser-builder.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAEhF,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,gBAAgB,CAAC;AAC1D,OAAO,EAAE,aAAa,EAAE,MAAM,qBAAqB,CAAC;AAGpD;;;GAGG;AACH,wBAAgB,mBAAmB,CAAC,QAAQ,EAAE,mBAAmB,GAAG,aAAa,CAIhF;AAED;;;GAGG;AACH,wBAAgB,oBAAoB,CAAC,QAAQ,EAAE,mBAAmB,GAAG,aAAa,CAKjF"}
|
||||
27
node_modules/langium/lib/parser/langium-parser-builder.js
generated
vendored
Normal file
27
node_modules/langium/lib/parser/langium-parser-builder.js
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2021 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import { LangiumParser } from './langium-parser.js';
|
||||
import { createParser } from './parser-builder-base.js';
|
||||
/**
|
||||
* Create and finalize a Langium parser. The parser rules are derived from the grammar, which is
|
||||
* available at `services.Grammar`.
|
||||
*/
|
||||
export function createLangiumParser(services) {
|
||||
const parser = prepareLangiumParser(services);
|
||||
parser.finalize();
|
||||
return parser;
|
||||
}
|
||||
/**
|
||||
* Create a Langium parser without finalizing it. This is used to extract more detailed error
|
||||
* information when the parser is initially validated.
|
||||
*/
|
||||
export function prepareLangiumParser(services) {
|
||||
const grammar = services.Grammar;
|
||||
const lexer = services.parser.Lexer;
|
||||
const parser = new LangiumParser(services);
|
||||
return createParser(grammar, parser, lexer.definition);
|
||||
}
|
||||
//# sourceMappingURL=langium-parser-builder.js.map
|
||||
1
node_modules/langium/lib/parser/langium-parser-builder.js.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/langium-parser-builder.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"langium-parser-builder.js","sourceRoot":"","sources":["../../src/parser/langium-parser-builder.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAGhF,OAAO,EAAE,aAAa,EAAE,MAAM,qBAAqB,CAAC;AACpD,OAAO,EAAE,YAAY,EAAE,MAAM,0BAA0B,CAAC;AAExD;;;GAGG;AACH,MAAM,UAAU,mBAAmB,CAAC,QAA6B;IAC7D,MAAM,MAAM,GAAG,oBAAoB,CAAC,QAAQ,CAAC,CAAC;IAC9C,MAAM,CAAC,QAAQ,EAAE,CAAC;IAClB,OAAO,MAAM,CAAC;AAClB,CAAC;AAED;;;GAGG;AACH,MAAM,UAAU,oBAAoB,CAAC,QAA6B;IAC9D,MAAM,OAAO,GAAG,QAAQ,CAAC,OAAO,CAAC;IACjC,MAAM,KAAK,GAAG,QAAQ,CAAC,MAAM,CAAC,KAAK,CAAC;IACpC,MAAM,MAAM,GAAG,IAAI,aAAa,CAAC,QAAQ,CAAC,CAAC;IAC3C,OAAO,YAAY,CAAC,OAAO,EAAE,MAAM,EAAE,KAAK,CAAC,UAAU,CAAC,CAAC;AAC3D,CAAC"}
|
||||
234
node_modules/langium/lib/parser/langium-parser.d.ts
generated
vendored
Normal file
234
node_modules/langium/lib/parser/langium-parser.d.ts
generated
vendored
Normal file
@@ -0,0 +1,234 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2021 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import type { DSLMethodOpts, ILexingError, IOrAlt, IParserErrorMessageProvider, IRecognitionException, IToken, TokenType, TokenVocabulary } from 'chevrotain';
|
||||
import type { AbstractElement, Action, ParserRule } from '../languages/generated/ast.js';
|
||||
import type { LangiumCoreServices } from '../services.js';
|
||||
import type { AstNode } from '../syntax-tree.js';
|
||||
import type { Lexer } from './lexer.js';
|
||||
import type { IParserConfig } from './parser-config.js';
|
||||
import { EmbeddedActionsParser } from 'chevrotain';
|
||||
import type { LexingReport } from './token-builder.js';
|
||||
export type ParseResult<T = AstNode> = {
|
||||
value: T;
|
||||
parserErrors: IRecognitionException[];
|
||||
lexerErrors: ILexingError[];
|
||||
lexerReport?: LexingReport;
|
||||
};
|
||||
export declare const DatatypeSymbol: unique symbol;
|
||||
type RuleResult = (args: Args) => any;
|
||||
type Args = Record<string, boolean>;
|
||||
type RuleImpl = (args: Args) => any;
|
||||
/**
|
||||
* Base interface for all parsers. Mainly used by the `parser-builder-base.ts` to perform work on different kinds of parsers.
|
||||
* The main use cases are:
|
||||
* * AST parser: Based on a string, create an AST for the current grammar
|
||||
* * Completion parser: Based on a partial string, identify the current position of the input within the grammar
|
||||
*/
|
||||
export interface BaseParser {
|
||||
/**
|
||||
* Adds a new parser rule to the parser
|
||||
*/
|
||||
rule(rule: ParserRule, impl: RuleImpl): RuleResult;
|
||||
/**
|
||||
* Returns the executable rule function for the specified rule name
|
||||
*/
|
||||
getRule(name: string): RuleResult | undefined;
|
||||
/**
|
||||
* Performs alternatives parsing (the `|` operation in EBNF/Langium)
|
||||
*/
|
||||
alternatives(idx: number, choices: Array<IOrAlt<any>>): void;
|
||||
/**
|
||||
* Parses the callback as optional (the `?` operation in EBNF/Langium)
|
||||
*/
|
||||
optional(idx: number, callback: DSLMethodOpts<unknown>): void;
|
||||
/**
|
||||
* Parses the callback 0 or more times (the `*` operation in EBNF/Langium)
|
||||
*/
|
||||
many(idx: number, callback: DSLMethodOpts<unknown>): void;
|
||||
/**
|
||||
* Parses the callback 1 or more times (the `+` operation in EBNF/Langium)
|
||||
*/
|
||||
atLeastOne(idx: number, callback: DSLMethodOpts<unknown>): void;
|
||||
/**
|
||||
* Consumes a specific token type from the token input stream.
|
||||
* Requires a unique index within the rule for a specific token type.
|
||||
*/
|
||||
consume(idx: number, tokenType: TokenType, feature: AbstractElement): void;
|
||||
/**
|
||||
* Invokes the executable function for a given parser rule.
|
||||
* Requires a unique index within the rule for a specific sub rule.
|
||||
* Arguments can be supplied to the rule invocation for semantic predicates
|
||||
*/
|
||||
subrule(idx: number, rule: RuleResult, fragment: boolean, feature: AbstractElement, args: Args): void;
|
||||
/**
|
||||
* Executes a grammar action that modifies the currently active AST node
|
||||
*/
|
||||
action($type: string, action: Action): void;
|
||||
/**
|
||||
* Finishes construction of the current AST node. Only used by the AST parser.
|
||||
*/
|
||||
construct(): unknown;
|
||||
/**
|
||||
* Whether the parser is currently actually in use or in "recording mode".
|
||||
* Recording mode is activated once when the parser is analyzing itself.
|
||||
* During this phase, no input exists and therefore no AST should be constructed
|
||||
*/
|
||||
isRecording(): boolean;
|
||||
/**
|
||||
* Current state of the unordered groups
|
||||
*/
|
||||
get unorderedGroups(): Map<string, boolean[]>;
|
||||
/**
|
||||
* The rule stack indicates the indices of rules that are currently invoked,
|
||||
* in order of their invocation.
|
||||
*/
|
||||
getRuleStack(): number[];
|
||||
}
|
||||
export declare abstract class AbstractLangiumParser implements BaseParser {
|
||||
protected readonly lexer: Lexer;
|
||||
protected readonly wrapper: ChevrotainWrapper;
|
||||
protected _unorderedGroups: Map<string, boolean[]>;
|
||||
protected allRules: Map<string, RuleResult>;
|
||||
protected mainRule: RuleResult;
|
||||
constructor(services: LangiumCoreServices);
|
||||
alternatives(idx: number, choices: Array<IOrAlt<any>>): void;
|
||||
optional(idx: number, callback: DSLMethodOpts<unknown>): void;
|
||||
many(idx: number, callback: DSLMethodOpts<unknown>): void;
|
||||
atLeastOne(idx: number, callback: DSLMethodOpts<unknown>): void;
|
||||
abstract rule(rule: ParserRule, impl: RuleImpl): RuleResult;
|
||||
abstract consume(idx: number, tokenType: TokenType, feature: AbstractElement): void;
|
||||
abstract subrule(idx: number, rule: RuleResult, fragment: boolean, feature: AbstractElement, args: Args): void;
|
||||
abstract action($type: string, action: Action): void;
|
||||
abstract construct(): unknown;
|
||||
getRule(name: string): RuleResult | undefined;
|
||||
isRecording(): boolean;
|
||||
get unorderedGroups(): Map<string, boolean[]>;
|
||||
getRuleStack(): number[];
|
||||
finalize(): void;
|
||||
}
|
||||
export interface ParserOptions {
|
||||
rule?: string;
|
||||
}
|
||||
export declare class LangiumParser extends AbstractLangiumParser {
|
||||
private readonly linker;
|
||||
private readonly converter;
|
||||
private readonly astReflection;
|
||||
private readonly nodeBuilder;
|
||||
private lexerResult?;
|
||||
private stack;
|
||||
private assignmentMap;
|
||||
private get current();
|
||||
constructor(services: LangiumCoreServices);
|
||||
rule(rule: ParserRule, impl: RuleImpl): RuleResult;
|
||||
private computeRuleType;
|
||||
parse<T extends AstNode = AstNode>(input: string, options?: ParserOptions): ParseResult<T>;
|
||||
private startImplementation;
|
||||
private extractHiddenTokens;
|
||||
consume(idx: number, tokenType: TokenType, feature: AbstractElement): void;
|
||||
/**
|
||||
* Most consumed parser tokens are valid. However there are two cases in which they are not valid:
|
||||
*
|
||||
* 1. They were inserted during error recovery by the parser. These tokens don't really exist and should not be further processed
|
||||
* 2. They contain invalid token ranges. This might include the special EOF token, or other tokens produced by invalid token builders.
|
||||
*/
|
||||
private isValidToken;
|
||||
subrule(idx: number, rule: RuleResult, fragment: boolean, feature: AbstractElement, args: Args): void;
|
||||
private performSubruleAssignment;
|
||||
action($type: string, action: Action): void;
|
||||
construct(): unknown;
|
||||
private getAssignment;
|
||||
private assign;
|
||||
private assignWithoutOverride;
|
||||
get definitionErrors(): IParserDefinitionError[];
|
||||
}
|
||||
export interface IParserDefinitionError {
|
||||
message: string;
|
||||
type: number;
|
||||
ruleName?: string;
|
||||
}
|
||||
export declare abstract class AbstractParserErrorMessageProvider implements IParserErrorMessageProvider {
|
||||
buildMismatchTokenMessage(options: {
|
||||
expected: TokenType;
|
||||
actual: IToken;
|
||||
previous: IToken;
|
||||
ruleName: string;
|
||||
}): string;
|
||||
buildNotAllInputParsedMessage(options: {
|
||||
firstRedundant: IToken;
|
||||
ruleName: string;
|
||||
}): string;
|
||||
buildNoViableAltMessage(options: {
|
||||
expectedPathsPerAlt: TokenType[][][];
|
||||
actual: IToken[];
|
||||
previous: IToken;
|
||||
customUserDescription: string;
|
||||
ruleName: string;
|
||||
}): string;
|
||||
buildEarlyExitMessage(options: {
|
||||
expectedIterationPaths: TokenType[][];
|
||||
actual: IToken[];
|
||||
previous: IToken;
|
||||
customUserDescription: string;
|
||||
ruleName: string;
|
||||
}): string;
|
||||
}
|
||||
export declare class LangiumParserErrorMessageProvider extends AbstractParserErrorMessageProvider {
|
||||
buildMismatchTokenMessage({ expected, actual }: {
|
||||
expected: TokenType;
|
||||
actual: IToken;
|
||||
previous: IToken;
|
||||
ruleName: string;
|
||||
}): string;
|
||||
buildNotAllInputParsedMessage({ firstRedundant }: {
|
||||
firstRedundant: IToken;
|
||||
ruleName: string;
|
||||
}): string;
|
||||
}
|
||||
export interface CompletionParserResult {
|
||||
tokens: IToken[];
|
||||
elementStack: AbstractElement[];
|
||||
tokenIndex: number;
|
||||
}
|
||||
export declare class LangiumCompletionParser extends AbstractLangiumParser {
|
||||
private tokens;
|
||||
private elementStack;
|
||||
private lastElementStack;
|
||||
private nextTokenIndex;
|
||||
private stackSize;
|
||||
action(): void;
|
||||
construct(): unknown;
|
||||
parse(input: string): CompletionParserResult;
|
||||
rule(rule: ParserRule, impl: RuleImpl): RuleResult;
|
||||
private resetState;
|
||||
private startImplementation;
|
||||
private removeUnexpectedElements;
|
||||
keepStackSize(): number;
|
||||
resetStackSize(size: number): void;
|
||||
consume(idx: number, tokenType: TokenType, feature: AbstractElement): void;
|
||||
subrule(idx: number, rule: RuleResult, fragment: boolean, feature: AbstractElement, args: Args): void;
|
||||
before(element: AbstractElement): void;
|
||||
after(element: AbstractElement): void;
|
||||
get currIdx(): number;
|
||||
}
|
||||
/**
|
||||
* This class wraps the embedded actions parser of chevrotain and exposes protected methods.
|
||||
* This way, we can build the `LangiumParser` as a composition.
|
||||
*/
|
||||
declare class ChevrotainWrapper extends EmbeddedActionsParser {
|
||||
definitionErrors: IParserDefinitionError[];
|
||||
constructor(tokens: TokenVocabulary, config: IParserConfig);
|
||||
get IS_RECORDING(): boolean;
|
||||
DEFINE_RULE(name: string, impl: RuleImpl): RuleResult;
|
||||
wrapSelfAnalysis(): void;
|
||||
wrapConsume(idx: number, tokenType: TokenType): IToken;
|
||||
wrapSubrule(idx: number, rule: RuleResult, args: Args): unknown;
|
||||
wrapOr(idx: number, choices: Array<IOrAlt<any>>): void;
|
||||
wrapOption(idx: number, callback: DSLMethodOpts<unknown>): void;
|
||||
wrapMany(idx: number, callback: DSLMethodOpts<unknown>): void;
|
||||
wrapAtLeastOne(idx: number, callback: DSLMethodOpts<unknown>): void;
|
||||
}
|
||||
export {};
|
||||
//# sourceMappingURL=langium-parser.d.ts.map
|
||||
1
node_modules/langium/lib/parser/langium-parser.d.ts.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/langium-parser.d.ts.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
481
node_modules/langium/lib/parser/langium-parser.js
generated
vendored
Normal file
481
node_modules/langium/lib/parser/langium-parser.js
generated
vendored
Normal file
@@ -0,0 +1,481 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2021 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import { defaultParserErrorProvider, EmbeddedActionsParser, LLkLookaheadStrategy } from 'chevrotain';
|
||||
import { LLStarLookaheadStrategy } from 'chevrotain-allstar';
|
||||
import { isAssignment, isCrossReference, isKeyword } from '../languages/generated/ast.js';
|
||||
import { getExplicitRuleType, isDataTypeRule } from '../utils/grammar-utils.js';
|
||||
import { assignMandatoryProperties, getContainerOfType, linkContentToContainer } from '../utils/ast-utils.js';
|
||||
import { CstNodeBuilder } from './cst-node-builder.js';
|
||||
export const DatatypeSymbol = Symbol('Datatype');
|
||||
function isDataTypeNode(node) {
|
||||
return node.$type === DatatypeSymbol;
|
||||
}
|
||||
const ruleSuffix = '\u200B';
|
||||
const withRuleSuffix = (name) => name.endsWith(ruleSuffix) ? name : name + ruleSuffix;
|
||||
export class AbstractLangiumParser {
|
||||
constructor(services) {
|
||||
this._unorderedGroups = new Map();
|
||||
this.allRules = new Map();
|
||||
this.lexer = services.parser.Lexer;
|
||||
const tokens = this.lexer.definition;
|
||||
const production = services.LanguageMetaData.mode === 'production';
|
||||
this.wrapper = new ChevrotainWrapper(tokens, Object.assign(Object.assign({}, services.parser.ParserConfig), { skipValidations: production, errorMessageProvider: services.parser.ParserErrorMessageProvider }));
|
||||
}
|
||||
alternatives(idx, choices) {
|
||||
this.wrapper.wrapOr(idx, choices);
|
||||
}
|
||||
optional(idx, callback) {
|
||||
this.wrapper.wrapOption(idx, callback);
|
||||
}
|
||||
many(idx, callback) {
|
||||
this.wrapper.wrapMany(idx, callback);
|
||||
}
|
||||
atLeastOne(idx, callback) {
|
||||
this.wrapper.wrapAtLeastOne(idx, callback);
|
||||
}
|
||||
getRule(name) {
|
||||
return this.allRules.get(name);
|
||||
}
|
||||
isRecording() {
|
||||
return this.wrapper.IS_RECORDING;
|
||||
}
|
||||
get unorderedGroups() {
|
||||
return this._unorderedGroups;
|
||||
}
|
||||
getRuleStack() {
|
||||
return this.wrapper.RULE_STACK;
|
||||
}
|
||||
finalize() {
|
||||
this.wrapper.wrapSelfAnalysis();
|
||||
}
|
||||
}
|
||||
export class LangiumParser extends AbstractLangiumParser {
|
||||
get current() {
|
||||
return this.stack[this.stack.length - 1];
|
||||
}
|
||||
constructor(services) {
|
||||
super(services);
|
||||
this.nodeBuilder = new CstNodeBuilder();
|
||||
this.stack = [];
|
||||
this.assignmentMap = new Map();
|
||||
this.linker = services.references.Linker;
|
||||
this.converter = services.parser.ValueConverter;
|
||||
this.astReflection = services.shared.AstReflection;
|
||||
}
|
||||
rule(rule, impl) {
|
||||
const type = this.computeRuleType(rule);
|
||||
const ruleMethod = this.wrapper.DEFINE_RULE(withRuleSuffix(rule.name), this.startImplementation(type, impl).bind(this));
|
||||
this.allRules.set(rule.name, ruleMethod);
|
||||
if (rule.entry) {
|
||||
this.mainRule = ruleMethod;
|
||||
}
|
||||
return ruleMethod;
|
||||
}
|
||||
computeRuleType(rule) {
|
||||
if (rule.fragment) {
|
||||
return undefined;
|
||||
}
|
||||
else if (isDataTypeRule(rule)) {
|
||||
return DatatypeSymbol;
|
||||
}
|
||||
else {
|
||||
const explicit = getExplicitRuleType(rule);
|
||||
return explicit !== null && explicit !== void 0 ? explicit : rule.name;
|
||||
}
|
||||
}
|
||||
parse(input, options = {}) {
|
||||
this.nodeBuilder.buildRootNode(input);
|
||||
const lexerResult = this.lexerResult = this.lexer.tokenize(input);
|
||||
this.wrapper.input = lexerResult.tokens;
|
||||
const ruleMethod = options.rule ? this.allRules.get(options.rule) : this.mainRule;
|
||||
if (!ruleMethod) {
|
||||
throw new Error(options.rule ? `No rule found with name '${options.rule}'` : 'No main rule available.');
|
||||
}
|
||||
const result = ruleMethod.call(this.wrapper, {});
|
||||
this.nodeBuilder.addHiddenNodes(lexerResult.hidden);
|
||||
this.unorderedGroups.clear();
|
||||
this.lexerResult = undefined;
|
||||
return {
|
||||
value: result,
|
||||
lexerErrors: lexerResult.errors,
|
||||
lexerReport: lexerResult.report,
|
||||
parserErrors: this.wrapper.errors
|
||||
};
|
||||
}
|
||||
startImplementation($type, implementation) {
|
||||
return (args) => {
|
||||
// Only create a new AST node in case the calling rule is not a fragment rule
|
||||
const createNode = !this.isRecording() && $type !== undefined;
|
||||
if (createNode) {
|
||||
const node = { $type };
|
||||
this.stack.push(node);
|
||||
if ($type === DatatypeSymbol) {
|
||||
node.value = '';
|
||||
}
|
||||
}
|
||||
let result;
|
||||
try {
|
||||
result = implementation(args);
|
||||
}
|
||||
catch (err) {
|
||||
result = undefined;
|
||||
}
|
||||
if (result === undefined && createNode) {
|
||||
result = this.construct();
|
||||
}
|
||||
return result;
|
||||
};
|
||||
}
|
||||
extractHiddenTokens(token) {
|
||||
const hiddenTokens = this.lexerResult.hidden;
|
||||
if (!hiddenTokens.length) {
|
||||
return [];
|
||||
}
|
||||
const offset = token.startOffset;
|
||||
for (let i = 0; i < hiddenTokens.length; i++) {
|
||||
const token = hiddenTokens[i];
|
||||
if (token.startOffset > offset) {
|
||||
return hiddenTokens.splice(0, i);
|
||||
}
|
||||
}
|
||||
return hiddenTokens.splice(0, hiddenTokens.length);
|
||||
}
|
||||
consume(idx, tokenType, feature) {
|
||||
const token = this.wrapper.wrapConsume(idx, tokenType);
|
||||
if (!this.isRecording() && this.isValidToken(token)) {
|
||||
const hiddenTokens = this.extractHiddenTokens(token);
|
||||
this.nodeBuilder.addHiddenNodes(hiddenTokens);
|
||||
const leafNode = this.nodeBuilder.buildLeafNode(token, feature);
|
||||
const { assignment, isCrossRef } = this.getAssignment(feature);
|
||||
const current = this.current;
|
||||
if (assignment) {
|
||||
const convertedValue = isKeyword(feature) ? token.image : this.converter.convert(token.image, leafNode);
|
||||
this.assign(assignment.operator, assignment.feature, convertedValue, leafNode, isCrossRef);
|
||||
}
|
||||
else if (isDataTypeNode(current)) {
|
||||
let text = token.image;
|
||||
if (!isKeyword(feature)) {
|
||||
text = this.converter.convert(text, leafNode).toString();
|
||||
}
|
||||
current.value += text;
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Most consumed parser tokens are valid. However there are two cases in which they are not valid:
|
||||
*
|
||||
* 1. They were inserted during error recovery by the parser. These tokens don't really exist and should not be further processed
|
||||
* 2. They contain invalid token ranges. This might include the special EOF token, or other tokens produced by invalid token builders.
|
||||
*/
|
||||
isValidToken(token) {
|
||||
return !token.isInsertedInRecovery && !isNaN(token.startOffset) && typeof token.endOffset === 'number' && !isNaN(token.endOffset);
|
||||
}
|
||||
subrule(idx, rule, fragment, feature, args) {
|
||||
let cstNode;
|
||||
if (!this.isRecording() && !fragment) {
|
||||
// We only want to create a new CST node if the subrule actually creates a new AST node.
|
||||
// In other cases like calls of fragment rules the current CST/AST is populated further.
|
||||
// Note that skipping this initialization and leaving cstNode unassigned also skips the subrule assignment later on.
|
||||
// This is intended, as fragment rules only enrich the current AST node
|
||||
cstNode = this.nodeBuilder.buildCompositeNode(feature);
|
||||
}
|
||||
const subruleResult = this.wrapper.wrapSubrule(idx, rule, args);
|
||||
if (!this.isRecording() && cstNode && cstNode.length > 0) {
|
||||
this.performSubruleAssignment(subruleResult, feature, cstNode);
|
||||
}
|
||||
}
|
||||
performSubruleAssignment(result, feature, cstNode) {
|
||||
const { assignment, isCrossRef } = this.getAssignment(feature);
|
||||
if (assignment) {
|
||||
this.assign(assignment.operator, assignment.feature, result, cstNode, isCrossRef);
|
||||
}
|
||||
else if (!assignment) {
|
||||
// If we call a subrule without an assignment we either:
|
||||
// 1. append the result of the subrule (data type rule)
|
||||
// 2. override the current object with the newly parsed object
|
||||
// If the current element is an AST node and the result of the subrule
|
||||
// is a data type rule, we can safely discard the results.
|
||||
const current = this.current;
|
||||
if (isDataTypeNode(current)) {
|
||||
current.value += result.toString();
|
||||
}
|
||||
else if (typeof result === 'object' && result) {
|
||||
const object = this.assignWithoutOverride(result, current);
|
||||
const newItem = object;
|
||||
this.stack.pop();
|
||||
this.stack.push(newItem);
|
||||
}
|
||||
}
|
||||
}
|
||||
action($type, action) {
|
||||
if (!this.isRecording()) {
|
||||
let last = this.current;
|
||||
if (action.feature && action.operator) {
|
||||
last = this.construct();
|
||||
this.nodeBuilder.removeNode(last.$cstNode);
|
||||
const node = this.nodeBuilder.buildCompositeNode(action);
|
||||
node.content.push(last.$cstNode);
|
||||
const newItem = { $type };
|
||||
this.stack.push(newItem);
|
||||
this.assign(action.operator, action.feature, last, last.$cstNode, false);
|
||||
}
|
||||
else {
|
||||
last.$type = $type;
|
||||
}
|
||||
}
|
||||
}
|
||||
construct() {
|
||||
if (this.isRecording()) {
|
||||
return undefined;
|
||||
}
|
||||
const obj = this.current;
|
||||
linkContentToContainer(obj);
|
||||
this.nodeBuilder.construct(obj);
|
||||
this.stack.pop();
|
||||
if (isDataTypeNode(obj)) {
|
||||
return this.converter.convert(obj.value, obj.$cstNode);
|
||||
}
|
||||
else {
|
||||
assignMandatoryProperties(this.astReflection, obj);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
getAssignment(feature) {
|
||||
if (!this.assignmentMap.has(feature)) {
|
||||
const assignment = getContainerOfType(feature, isAssignment);
|
||||
this.assignmentMap.set(feature, {
|
||||
assignment: assignment,
|
||||
isCrossRef: assignment ? isCrossReference(assignment.terminal) : false
|
||||
});
|
||||
}
|
||||
return this.assignmentMap.get(feature);
|
||||
}
|
||||
assign(operator, feature, value, cstNode, isCrossRef) {
|
||||
const obj = this.current;
|
||||
let item;
|
||||
if (isCrossRef && typeof value === 'string') {
|
||||
item = this.linker.buildReference(obj, feature, cstNode, value);
|
||||
}
|
||||
else {
|
||||
item = value;
|
||||
}
|
||||
switch (operator) {
|
||||
case '=': {
|
||||
obj[feature] = item;
|
||||
break;
|
||||
}
|
||||
case '?=': {
|
||||
obj[feature] = true;
|
||||
break;
|
||||
}
|
||||
case '+=': {
|
||||
if (!Array.isArray(obj[feature])) {
|
||||
obj[feature] = [];
|
||||
}
|
||||
obj[feature].push(item);
|
||||
}
|
||||
}
|
||||
}
|
||||
assignWithoutOverride(target, source) {
|
||||
for (const [name, existingValue] of Object.entries(source)) {
|
||||
const newValue = target[name];
|
||||
if (newValue === undefined) {
|
||||
target[name] = existingValue;
|
||||
}
|
||||
else if (Array.isArray(newValue) && Array.isArray(existingValue)) {
|
||||
existingValue.push(...newValue);
|
||||
target[name] = existingValue;
|
||||
}
|
||||
}
|
||||
// The target was parsed from a unassigned subrule
|
||||
// After the subrule construction, it received a cst node
|
||||
// This CST node will later be overriden by the cst node builder
|
||||
// To prevent references to stale AST nodes in the CST,
|
||||
// we need to remove the reference here
|
||||
const targetCstNode = target.$cstNode;
|
||||
if (targetCstNode) {
|
||||
targetCstNode.astNode = undefined;
|
||||
target.$cstNode = undefined;
|
||||
}
|
||||
return target;
|
||||
}
|
||||
get definitionErrors() {
|
||||
return this.wrapper.definitionErrors;
|
||||
}
|
||||
}
|
||||
export class AbstractParserErrorMessageProvider {
|
||||
buildMismatchTokenMessage(options) {
|
||||
return defaultParserErrorProvider.buildMismatchTokenMessage(options);
|
||||
}
|
||||
buildNotAllInputParsedMessage(options) {
|
||||
return defaultParserErrorProvider.buildNotAllInputParsedMessage(options);
|
||||
}
|
||||
buildNoViableAltMessage(options) {
|
||||
return defaultParserErrorProvider.buildNoViableAltMessage(options);
|
||||
}
|
||||
buildEarlyExitMessage(options) {
|
||||
return defaultParserErrorProvider.buildEarlyExitMessage(options);
|
||||
}
|
||||
}
|
||||
export class LangiumParserErrorMessageProvider extends AbstractParserErrorMessageProvider {
|
||||
buildMismatchTokenMessage({ expected, actual }) {
|
||||
const expectedMsg = expected.LABEL
|
||||
? '`' + expected.LABEL + '`'
|
||||
: expected.name.endsWith(':KW')
|
||||
? `keyword '${expected.name.substring(0, expected.name.length - 3)}'`
|
||||
: `token of type '${expected.name}'`;
|
||||
return `Expecting ${expectedMsg} but found \`${actual.image}\`.`;
|
||||
}
|
||||
buildNotAllInputParsedMessage({ firstRedundant }) {
|
||||
return `Expecting end of file but found \`${firstRedundant.image}\`.`;
|
||||
}
|
||||
}
|
||||
export class LangiumCompletionParser extends AbstractLangiumParser {
|
||||
constructor() {
|
||||
super(...arguments);
|
||||
this.tokens = [];
|
||||
this.elementStack = [];
|
||||
this.lastElementStack = [];
|
||||
this.nextTokenIndex = 0;
|
||||
this.stackSize = 0;
|
||||
}
|
||||
action() {
|
||||
// NOOP
|
||||
}
|
||||
construct() {
|
||||
// NOOP
|
||||
return undefined;
|
||||
}
|
||||
parse(input) {
|
||||
this.resetState();
|
||||
const tokens = this.lexer.tokenize(input, { mode: 'partial' });
|
||||
this.tokens = tokens.tokens;
|
||||
this.wrapper.input = [...this.tokens];
|
||||
this.mainRule.call(this.wrapper, {});
|
||||
this.unorderedGroups.clear();
|
||||
return {
|
||||
tokens: this.tokens,
|
||||
elementStack: [...this.lastElementStack],
|
||||
tokenIndex: this.nextTokenIndex
|
||||
};
|
||||
}
|
||||
rule(rule, impl) {
|
||||
const ruleMethod = this.wrapper.DEFINE_RULE(withRuleSuffix(rule.name), this.startImplementation(impl).bind(this));
|
||||
this.allRules.set(rule.name, ruleMethod);
|
||||
if (rule.entry) {
|
||||
this.mainRule = ruleMethod;
|
||||
}
|
||||
return ruleMethod;
|
||||
}
|
||||
resetState() {
|
||||
this.elementStack = [];
|
||||
this.lastElementStack = [];
|
||||
this.nextTokenIndex = 0;
|
||||
this.stackSize = 0;
|
||||
}
|
||||
startImplementation(implementation) {
|
||||
return (args) => {
|
||||
const size = this.keepStackSize();
|
||||
try {
|
||||
implementation(args);
|
||||
}
|
||||
finally {
|
||||
this.resetStackSize(size);
|
||||
}
|
||||
};
|
||||
}
|
||||
removeUnexpectedElements() {
|
||||
this.elementStack.splice(this.stackSize);
|
||||
}
|
||||
keepStackSize() {
|
||||
const size = this.elementStack.length;
|
||||
this.stackSize = size;
|
||||
return size;
|
||||
}
|
||||
resetStackSize(size) {
|
||||
this.removeUnexpectedElements();
|
||||
this.stackSize = size;
|
||||
}
|
||||
consume(idx, tokenType, feature) {
|
||||
this.wrapper.wrapConsume(idx, tokenType);
|
||||
if (!this.isRecording()) {
|
||||
this.lastElementStack = [...this.elementStack, feature];
|
||||
this.nextTokenIndex = this.currIdx + 1;
|
||||
}
|
||||
}
|
||||
subrule(idx, rule, fragment, feature, args) {
|
||||
this.before(feature);
|
||||
this.wrapper.wrapSubrule(idx, rule, args);
|
||||
this.after(feature);
|
||||
}
|
||||
before(element) {
|
||||
if (!this.isRecording()) {
|
||||
this.elementStack.push(element);
|
||||
}
|
||||
}
|
||||
after(element) {
|
||||
if (!this.isRecording()) {
|
||||
const index = this.elementStack.lastIndexOf(element);
|
||||
if (index >= 0) {
|
||||
this.elementStack.splice(index);
|
||||
}
|
||||
}
|
||||
}
|
||||
get currIdx() {
|
||||
return this.wrapper.currIdx;
|
||||
}
|
||||
}
|
||||
const defaultConfig = {
|
||||
recoveryEnabled: true,
|
||||
nodeLocationTracking: 'full',
|
||||
skipValidations: true,
|
||||
errorMessageProvider: new LangiumParserErrorMessageProvider()
|
||||
};
|
||||
/**
|
||||
* This class wraps the embedded actions parser of chevrotain and exposes protected methods.
|
||||
* This way, we can build the `LangiumParser` as a composition.
|
||||
*/
|
||||
class ChevrotainWrapper extends EmbeddedActionsParser {
|
||||
constructor(tokens, config) {
|
||||
const useDefaultLookahead = config && 'maxLookahead' in config;
|
||||
super(tokens, Object.assign(Object.assign(Object.assign({}, defaultConfig), { lookaheadStrategy: useDefaultLookahead
|
||||
? new LLkLookaheadStrategy({ maxLookahead: config.maxLookahead })
|
||||
: new LLStarLookaheadStrategy({
|
||||
// If validations are skipped, don't log the lookahead warnings
|
||||
logging: config.skipValidations ? () => { } : undefined
|
||||
}) }), config));
|
||||
}
|
||||
get IS_RECORDING() {
|
||||
return this.RECORDING_PHASE;
|
||||
}
|
||||
DEFINE_RULE(name, impl) {
|
||||
return this.RULE(name, impl);
|
||||
}
|
||||
wrapSelfAnalysis() {
|
||||
this.performSelfAnalysis();
|
||||
}
|
||||
wrapConsume(idx, tokenType) {
|
||||
return this.consume(idx, tokenType);
|
||||
}
|
||||
wrapSubrule(idx, rule, args) {
|
||||
return this.subrule(idx, rule, {
|
||||
ARGS: [args]
|
||||
});
|
||||
}
|
||||
wrapOr(idx, choices) {
|
||||
this.or(idx, choices);
|
||||
}
|
||||
wrapOption(idx, callback) {
|
||||
this.option(idx, callback);
|
||||
}
|
||||
wrapMany(idx, callback) {
|
||||
this.many(idx, callback);
|
||||
}
|
||||
wrapAtLeastOne(idx, callback) {
|
||||
this.atLeastOne(idx, callback);
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=langium-parser.js.map
|
||||
1
node_modules/langium/lib/parser/langium-parser.js.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/langium-parser.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
60
node_modules/langium/lib/parser/lexer.d.ts
generated
vendored
Normal file
60
node_modules/langium/lib/parser/lexer.d.ts
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2022 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import type { ILexerErrorMessageProvider, ILexingError, IMultiModeLexerDefinition, IToken, TokenType, TokenTypeDictionary, TokenVocabulary } from 'chevrotain';
|
||||
import type { LangiumCoreServices } from '../services.js';
|
||||
import { Lexer as ChevrotainLexer } from 'chevrotain';
|
||||
import type { LexingReport, TokenBuilder } from './token-builder.js';
|
||||
export declare class DefaultLexerErrorMessageProvider implements ILexerErrorMessageProvider {
|
||||
buildUnexpectedCharactersMessage(fullText: string, startOffset: number, length: number, line?: number, column?: number): string;
|
||||
buildUnableToPopLexerModeMessage(token: IToken): string;
|
||||
}
|
||||
export interface LexerResult {
|
||||
/**
|
||||
* A list of all tokens that were lexed from the input.
|
||||
*
|
||||
* Note that Langium requires the optional properties
|
||||
* `startLine`, `startColumn`, `endOffset`, `endLine` and `endColumn` to be set on each token.
|
||||
*/
|
||||
tokens: IToken[];
|
||||
/**
|
||||
* Contains hidden tokens, usually comments.
|
||||
*/
|
||||
hidden: IToken[];
|
||||
errors: ILexingError[];
|
||||
report?: LexingReport;
|
||||
}
|
||||
export type TokenizeMode = 'full' | 'partial';
|
||||
export interface TokenizeOptions {
|
||||
mode?: TokenizeMode;
|
||||
}
|
||||
export declare const DEFAULT_TOKENIZE_OPTIONS: TokenizeOptions;
|
||||
export interface Lexer {
|
||||
readonly definition: TokenTypeDictionary;
|
||||
tokenize(text: string, options?: TokenizeOptions): LexerResult;
|
||||
}
|
||||
export declare class DefaultLexer implements Lexer {
|
||||
protected readonly tokenBuilder: TokenBuilder;
|
||||
protected readonly errorMessageProvider: ILexerErrorMessageProvider;
|
||||
protected tokenTypes: TokenTypeDictionary;
|
||||
protected chevrotainLexer: ChevrotainLexer;
|
||||
constructor(services: LangiumCoreServices);
|
||||
get definition(): TokenTypeDictionary;
|
||||
tokenize(text: string, _options?: TokenizeOptions): LexerResult;
|
||||
protected toTokenTypeDictionary(buildTokens: TokenVocabulary): TokenTypeDictionary;
|
||||
}
|
||||
/**
|
||||
* Returns a check whether the given TokenVocabulary is TokenType array
|
||||
*/
|
||||
export declare function isTokenTypeArray(tokenVocabulary: TokenVocabulary): tokenVocabulary is TokenType[];
|
||||
/**
|
||||
* Returns a check whether the given TokenVocabulary is IMultiModeLexerDefinition
|
||||
*/
|
||||
export declare function isIMultiModeLexerDefinition(tokenVocabulary: TokenVocabulary): tokenVocabulary is IMultiModeLexerDefinition;
|
||||
/**
|
||||
* Returns a check whether the given TokenVocabulary is TokenTypeDictionary
|
||||
*/
|
||||
export declare function isTokenTypeDictionary(tokenVocabulary: TokenVocabulary): tokenVocabulary is TokenTypeDictionary;
|
||||
//# sourceMappingURL=lexer.d.ts.map
|
||||
1
node_modules/langium/lib/parser/lexer.d.ts.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/lexer.d.ts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"lexer.d.ts","sourceRoot":"","sources":["../../src/parser/lexer.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAEhF,OAAO,KAAK,EAAE,0BAA0B,EAAE,YAAY,EAAE,yBAAyB,EAAE,MAAM,EAAE,SAAS,EAAE,mBAAmB,EAAE,eAAe,EAAE,MAAM,YAAY,CAAC;AAC/J,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,gBAAgB,CAAC;AAC1D,OAAO,EAAE,KAAK,IAAI,eAAe,EAA6B,MAAM,YAAY,CAAC;AACjF,OAAO,KAAK,EAAE,YAAY,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAErE,qBAAa,gCAAiC,YAAW,0BAA0B;IAE/E,gCAAgC,CAAC,QAAQ,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,IAAI,CAAC,EAAE,MAAM,EAAE,MAAM,CAAC,EAAE,MAAM,GAAG,MAAM;IAI/H,gCAAgC,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM;CAG1D;AAED,MAAM,WAAW,WAAW;IACxB;;;;;OAKG;IACH,MAAM,EAAE,MAAM,EAAE,CAAC;IACjB;;OAEG;IACH,MAAM,EAAE,MAAM,EAAE,CAAC;IACjB,MAAM,EAAE,YAAY,EAAE,CAAC;IACvB,MAAM,CAAC,EAAE,YAAY,CAAC;CACzB;AAED,MAAM,MAAM,YAAY,GAAG,MAAM,GAAG,SAAS,CAAC;AAE9C,MAAM,WAAW,eAAe;IAC5B,IAAI,CAAC,EAAE,YAAY,CAAC;CACvB;AAED,eAAO,MAAM,wBAAwB,EAAE,eAAkC,CAAC;AAE1E,MAAM,WAAW,KAAK;IAClB,QAAQ,CAAC,UAAU,EAAE,mBAAmB,CAAC;IACzC,QAAQ,CAAC,IAAI,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,WAAW,CAAC;CAClE;AAED,qBAAa,YAAa,YAAW,KAAK;IAEtC,SAAS,CAAC,QAAQ,CAAC,YAAY,EAAE,YAAY,CAAC;IAC9C,SAAS,CAAC,QAAQ,CAAC,oBAAoB,EAAE,0BAA0B,CAAC;IACpE,SAAS,CAAC,UAAU,EAAE,mBAAmB,CAAC;IAC1C,SAAS,CAAC,eAAe,EAAE,eAAe,CAAC;gBAE/B,QAAQ,EAAE,mBAAmB;IAgBzC,IAAI,UAAU,IAAI,mBAAmB,CAEpC;IAED,QAAQ,CAAC,IAAI,EAAE,MAAM,EAAE,QAAQ,GAAE,eAA0C,GAAG,WAAW;IAUzF,SAAS,CAAC,qBAAqB,CAAC,WAAW,EAAE,eAAe,GAAG,mBAAmB;CAOrF;AAED;;GAEG;AACH,wBAAgB,gBAAgB,CAAC,eAAe,EAAE,eAAe,GAAG,eAAe,IAAI,SAAS,EAAE,CAEjG;AAED;;GAEG;AACH,wBAAgB,2BAA2B,CAAC,eAAe,EAAE,eAAe,GAAG,eAAe,IAAI,yBAAyB,CAE1H;AAED;;GAEG;AACH,wBAAgB,qBAAqB,CAAC,eAAe,EAAE,eAAe,GAAG,eAAe,IAAI,mBAAmB,CAE9G"}
|
||||
72
node_modules/langium/lib/parser/lexer.js
generated
vendored
Normal file
72
node_modules/langium/lib/parser/lexer.js
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2022 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import { Lexer as ChevrotainLexer, defaultLexerErrorProvider } from 'chevrotain';
|
||||
export class DefaultLexerErrorMessageProvider {
|
||||
buildUnexpectedCharactersMessage(fullText, startOffset, length, line, column) {
|
||||
return defaultLexerErrorProvider.buildUnexpectedCharactersMessage(fullText, startOffset, length, line, column);
|
||||
}
|
||||
buildUnableToPopLexerModeMessage(token) {
|
||||
return defaultLexerErrorProvider.buildUnableToPopLexerModeMessage(token);
|
||||
}
|
||||
}
|
||||
export const DEFAULT_TOKENIZE_OPTIONS = { mode: 'full' };
|
||||
export class DefaultLexer {
|
||||
constructor(services) {
|
||||
this.errorMessageProvider = services.parser.LexerErrorMessageProvider;
|
||||
this.tokenBuilder = services.parser.TokenBuilder;
|
||||
const tokens = this.tokenBuilder.buildTokens(services.Grammar, {
|
||||
caseInsensitive: services.LanguageMetaData.caseInsensitive
|
||||
});
|
||||
this.tokenTypes = this.toTokenTypeDictionary(tokens);
|
||||
const lexerTokens = isTokenTypeDictionary(tokens) ? Object.values(tokens) : tokens;
|
||||
const production = services.LanguageMetaData.mode === 'production';
|
||||
this.chevrotainLexer = new ChevrotainLexer(lexerTokens, {
|
||||
positionTracking: 'full',
|
||||
skipValidations: production,
|
||||
errorMessageProvider: this.errorMessageProvider
|
||||
});
|
||||
}
|
||||
get definition() {
|
||||
return this.tokenTypes;
|
||||
}
|
||||
tokenize(text, _options = DEFAULT_TOKENIZE_OPTIONS) {
|
||||
var _a, _b, _c;
|
||||
const chevrotainResult = this.chevrotainLexer.tokenize(text);
|
||||
return {
|
||||
tokens: chevrotainResult.tokens,
|
||||
errors: chevrotainResult.errors,
|
||||
hidden: (_a = chevrotainResult.groups.hidden) !== null && _a !== void 0 ? _a : [],
|
||||
report: (_c = (_b = this.tokenBuilder).flushLexingReport) === null || _c === void 0 ? void 0 : _c.call(_b, text)
|
||||
};
|
||||
}
|
||||
toTokenTypeDictionary(buildTokens) {
|
||||
if (isTokenTypeDictionary(buildTokens))
|
||||
return buildTokens;
|
||||
const tokens = isIMultiModeLexerDefinition(buildTokens) ? Object.values(buildTokens.modes).flat() : buildTokens;
|
||||
const res = {};
|
||||
tokens.forEach(token => res[token.name] = token);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Returns a check whether the given TokenVocabulary is TokenType array
|
||||
*/
|
||||
export function isTokenTypeArray(tokenVocabulary) {
|
||||
return Array.isArray(tokenVocabulary) && (tokenVocabulary.length === 0 || 'name' in tokenVocabulary[0]);
|
||||
}
|
||||
/**
|
||||
* Returns a check whether the given TokenVocabulary is IMultiModeLexerDefinition
|
||||
*/
|
||||
export function isIMultiModeLexerDefinition(tokenVocabulary) {
|
||||
return tokenVocabulary && 'modes' in tokenVocabulary && 'defaultMode' in tokenVocabulary;
|
||||
}
|
||||
/**
|
||||
* Returns a check whether the given TokenVocabulary is TokenTypeDictionary
|
||||
*/
|
||||
export function isTokenTypeDictionary(tokenVocabulary) {
|
||||
return !isTokenTypeArray(tokenVocabulary) && !isIMultiModeLexerDefinition(tokenVocabulary);
|
||||
}
|
||||
//# sourceMappingURL=lexer.js.map
|
||||
1
node_modules/langium/lib/parser/lexer.js.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/lexer.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"lexer.js","sourceRoot":"","sources":["../../src/parser/lexer.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAIhF,OAAO,EAAE,KAAK,IAAI,eAAe,EAAE,yBAAyB,EAAE,MAAM,YAAY,CAAC;AAGjF,MAAM,OAAO,gCAAgC;IAEzC,gCAAgC,CAAC,QAAgB,EAAE,WAAmB,EAAE,MAAc,EAAE,IAAa,EAAE,MAAe;QAClH,OAAO,yBAAyB,CAAC,gCAAgC,CAAC,QAAQ,EAAE,WAAW,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,CAAC,CAAC;IACnH,CAAC;IAED,gCAAgC,CAAC,KAAa;QAC1C,OAAO,yBAAyB,CAAC,gCAAgC,CAAC,KAAK,CAAC,CAAC;IAC7E,CAAC;CACJ;AAwBD,MAAM,CAAC,MAAM,wBAAwB,GAAoB,EAAE,IAAI,EAAE,MAAM,EAAE,CAAC;AAO1E,MAAM,OAAO,YAAY;IAOrB,YAAY,QAA6B;QACrC,IAAI,CAAC,oBAAoB,GAAG,QAAQ,CAAC,MAAM,CAAC,yBAAyB,CAAC;QACtE,IAAI,CAAC,YAAY,GAAG,QAAQ,CAAC,MAAM,CAAC,YAAY,CAAC;QACjD,MAAM,MAAM,GAAG,IAAI,CAAC,YAAY,CAAC,WAAW,CAAC,QAAQ,CAAC,OAAO,EAAE;YAC3D,eAAe,EAAE,QAAQ,CAAC,gBAAgB,CAAC,eAAe;SAC7D,CAAC,CAAC;QACH,IAAI,CAAC,UAAU,GAAG,IAAI,CAAC,qBAAqB,CAAC,MAAM,CAAC,CAAC;QACrD,MAAM,WAAW,GAAG,qBAAqB,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC;QACnF,MAAM,UAAU,GAAG,QAAQ,CAAC,gBAAgB,CAAC,IAAI,KAAK,YAAY,CAAC;QACnE,IAAI,CAAC,eAAe,GAAG,IAAI,eAAe,CAAC,WAAW,EAAE;YACpD,gBAAgB,EAAE,MAAM;YACxB,eAAe,EAAE,UAAU;YAC3B,oBAAoB,EAAE,IAAI,CAAC,oBAAoB;SAClD,CAAC,CAAC;IACP,CAAC;IAED,IAAI,UAAU;QACV,OAAO,IAAI,CAAC,UAAU,CAAC;IAC3B,CAAC;IAED,QAAQ,CAAC,IAAY,EAAE,WAA4B,wBAAwB;;QACvE,MAAM,gBAAgB,GAAG,IAAI,CAAC,eAAe,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC;QAC7D,OAAO;YACH,MAAM,EAAE,gBAAgB,CAAC,MAAM;YAC/B,MAAM,EAAE,gBAAgB,CAAC,MAAM;YAC/B,MAAM,EAAE,MAAA,gBAAgB,CAAC,MAAM,CAAC,MAAM,mCAAI,EAAE;YAC5C,MAAM,EAAE,MAAA,MAAA,IAAI,CAAC,YAAY,EAAC,iBAAiB,mDAAG,IAAI,CAAC;SACtD,CAAC;IACN,CAAC;IAES,qBAAqB,CAAC,WAA4B;QACxD,IAAI,qBAAqB,CAAC,WAAW,CAAC;YAAE,OAAO,WAAW,CAAC;QAC3D,MAAM,MAAM,GAAG,2BAA2B,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,WAAW,CAAC,KAAK,CAAC,CAAC,IAAI,EAAE,CAAC,CAAC,CAAC,WAAW,CAAC;QAChH,MAAM,GAAG,GAAwB,EAAE,CAAC;QACpC,MAAM,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,KAAK,CAAC,CAAC;QACjD,OAAO,GAAG,CAAC;IACf,CAAC;CACJ;AAED;;GAEG;AACH,MAAM,UAAU,gBAAgB,CAAC,eAAgC;IAC7D,OAAO,KAAK,CAAC,OAAO,CAAC,eAAe,CAAC,IAAI,CAAC,eAAe,CAAC,MAAM,KAAK,CAAC,IAAI,MAAM,IAAI,eAAe,CAAC,CAAC,CAAC,CAAC,CAAC;AAC5G,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,2BAA2B,CAAC,eAAgC;IACxE,OAAO,eAAe,IAAI,OAAO,IAAI,eAAe,IAAI,aAAa,IAAI,eAAe,CAAC;AAC7F,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,qBAAqB,CAAC,eAAgC;IAClE,OAAO,CAAC,gBAAgB,CAAC,eAAe,CAAC,IAAI,CAAC,2BAA2B,CAAC,eAAe,CAAC,CAAC;AAC/F,CAAC"}
|
||||
10
node_modules/langium/lib/parser/parser-builder-base.d.ts
generated
vendored
Normal file
10
node_modules/langium/lib/parser/parser-builder-base.d.ts
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2022 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import type { TokenTypeDictionary } from 'chevrotain';
|
||||
import type { Grammar } from '../languages/generated/ast.js';
|
||||
import type { BaseParser } from './langium-parser.js';
|
||||
export declare function createParser<T extends BaseParser>(grammar: Grammar, parser: T, tokens: TokenTypeDictionary): T;
|
||||
//# sourceMappingURL=parser-builder-base.d.ts.map
|
||||
1
node_modules/langium/lib/parser/parser-builder-base.d.ts.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/parser-builder-base.d.ts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"parser-builder-base.d.ts","sourceRoot":"","sources":["../../src/parser/parser-builder-base.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAEhF,OAAO,KAAK,EAAqB,mBAAmB,EAAE,MAAM,YAAY,CAAC;AACzE,OAAO,KAAK,EAAoE,OAAO,EAAuE,MAAM,+BAA+B,CAAC;AACpM,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,qBAAqB,CAAC;AA+BtD,wBAAgB,YAAY,CAAC,CAAC,SAAS,UAAU,EAAE,OAAO,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,EAAE,MAAM,EAAE,mBAAmB,GAAG,CAAC,CAQ9G"}
|
||||
369
node_modules/langium/lib/parser/parser-builder-base.js
generated
vendored
Normal file
369
node_modules/langium/lib/parser/parser-builder-base.js
generated
vendored
Normal file
@@ -0,0 +1,369 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2022 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import { EMPTY_ALT, EOF } from 'chevrotain';
|
||||
import { isAction, isAlternatives, isEndOfFile, isAssignment, isConjunction, isCrossReference, isDisjunction, isGroup, isKeyword, isNegation, isParameterReference, isParserRule, isRuleCall, isTerminalRule, isUnorderedGroup, isBooleanLiteral } from '../languages/generated/ast.js';
|
||||
import { assertUnreachable, ErrorWithLocation } from '../utils/errors.js';
|
||||
import { stream } from '../utils/stream.js';
|
||||
import { findNameAssignment, getAllReachableRules, getTypeName } from '../utils/grammar-utils.js';
|
||||
export function createParser(grammar, parser, tokens) {
|
||||
const parserContext = {
|
||||
parser,
|
||||
tokens,
|
||||
ruleNames: new Map()
|
||||
};
|
||||
buildRules(parserContext, grammar);
|
||||
return parser;
|
||||
}
|
||||
function buildRules(parserContext, grammar) {
|
||||
const reachable = getAllReachableRules(grammar, false);
|
||||
const parserRules = stream(grammar.rules).filter(isParserRule).filter(rule => reachable.has(rule));
|
||||
for (const rule of parserRules) {
|
||||
const ctx = Object.assign(Object.assign({}, parserContext), { consume: 1, optional: 1, subrule: 1, many: 1, or: 1 });
|
||||
parserContext.parser.rule(rule, buildElement(ctx, rule.definition));
|
||||
}
|
||||
}
|
||||
function buildElement(ctx, element, ignoreGuard = false) {
|
||||
let method;
|
||||
if (isKeyword(element)) {
|
||||
method = buildKeyword(ctx, element);
|
||||
}
|
||||
else if (isAction(element)) {
|
||||
method = buildAction(ctx, element);
|
||||
}
|
||||
else if (isAssignment(element)) {
|
||||
method = buildElement(ctx, element.terminal);
|
||||
}
|
||||
else if (isCrossReference(element)) {
|
||||
method = buildCrossReference(ctx, element);
|
||||
}
|
||||
else if (isRuleCall(element)) {
|
||||
method = buildRuleCall(ctx, element);
|
||||
}
|
||||
else if (isAlternatives(element)) {
|
||||
method = buildAlternatives(ctx, element);
|
||||
}
|
||||
else if (isUnorderedGroup(element)) {
|
||||
method = buildUnorderedGroup(ctx, element);
|
||||
}
|
||||
else if (isGroup(element)) {
|
||||
method = buildGroup(ctx, element);
|
||||
}
|
||||
else if (isEndOfFile(element)) {
|
||||
const idx = ctx.consume++;
|
||||
method = () => ctx.parser.consume(idx, EOF, element);
|
||||
}
|
||||
else {
|
||||
throw new ErrorWithLocation(element.$cstNode, `Unexpected element type: ${element.$type}`);
|
||||
}
|
||||
return wrap(ctx, ignoreGuard ? undefined : getGuardCondition(element), method, element.cardinality);
|
||||
}
|
||||
function buildAction(ctx, action) {
|
||||
const actionType = getTypeName(action);
|
||||
return () => ctx.parser.action(actionType, action);
|
||||
}
|
||||
function buildRuleCall(ctx, ruleCall) {
|
||||
const rule = ruleCall.rule.ref;
|
||||
if (isParserRule(rule)) {
|
||||
const idx = ctx.subrule++;
|
||||
const fragment = rule.fragment;
|
||||
const predicate = ruleCall.arguments.length > 0 ? buildRuleCallPredicate(rule, ruleCall.arguments) : () => ({});
|
||||
return (args) => ctx.parser.subrule(idx, getRule(ctx, rule), fragment, ruleCall, predicate(args));
|
||||
}
|
||||
else if (isTerminalRule(rule)) {
|
||||
const idx = ctx.consume++;
|
||||
const method = getToken(ctx, rule.name);
|
||||
return () => ctx.parser.consume(idx, method, ruleCall);
|
||||
}
|
||||
else if (!rule) {
|
||||
throw new ErrorWithLocation(ruleCall.$cstNode, `Undefined rule: ${ruleCall.rule.$refText}`);
|
||||
}
|
||||
else {
|
||||
assertUnreachable(rule);
|
||||
}
|
||||
}
|
||||
function buildRuleCallPredicate(rule, namedArgs) {
|
||||
const predicates = namedArgs.map(e => buildPredicate(e.value));
|
||||
return (args) => {
|
||||
const ruleArgs = {};
|
||||
for (let i = 0; i < predicates.length; i++) {
|
||||
const ruleTarget = rule.parameters[i];
|
||||
const predicate = predicates[i];
|
||||
ruleArgs[ruleTarget.name] = predicate(args);
|
||||
}
|
||||
return ruleArgs;
|
||||
};
|
||||
}
|
||||
function buildPredicate(condition) {
|
||||
if (isDisjunction(condition)) {
|
||||
const left = buildPredicate(condition.left);
|
||||
const right = buildPredicate(condition.right);
|
||||
return (args) => (left(args) || right(args));
|
||||
}
|
||||
else if (isConjunction(condition)) {
|
||||
const left = buildPredicate(condition.left);
|
||||
const right = buildPredicate(condition.right);
|
||||
return (args) => (left(args) && right(args));
|
||||
}
|
||||
else if (isNegation(condition)) {
|
||||
const value = buildPredicate(condition.value);
|
||||
return (args) => !value(args);
|
||||
}
|
||||
else if (isParameterReference(condition)) {
|
||||
const name = condition.parameter.ref.name;
|
||||
return (args) => args !== undefined && args[name] === true;
|
||||
}
|
||||
else if (isBooleanLiteral(condition)) {
|
||||
const value = Boolean(condition.true);
|
||||
return () => value;
|
||||
}
|
||||
assertUnreachable(condition);
|
||||
}
|
||||
function buildAlternatives(ctx, alternatives) {
|
||||
if (alternatives.elements.length === 1) {
|
||||
return buildElement(ctx, alternatives.elements[0]);
|
||||
}
|
||||
else {
|
||||
const methods = [];
|
||||
for (const element of alternatives.elements) {
|
||||
const predicatedMethod = {
|
||||
// Since we handle the guard condition in the alternative already
|
||||
// We can ignore the group guard condition inside
|
||||
ALT: buildElement(ctx, element, true)
|
||||
};
|
||||
const guard = getGuardCondition(element);
|
||||
if (guard) {
|
||||
predicatedMethod.GATE = buildPredicate(guard);
|
||||
}
|
||||
methods.push(predicatedMethod);
|
||||
}
|
||||
const idx = ctx.or++;
|
||||
return (args) => ctx.parser.alternatives(idx, methods.map(method => {
|
||||
const alt = {
|
||||
ALT: () => method.ALT(args)
|
||||
};
|
||||
const gate = method.GATE;
|
||||
if (gate) {
|
||||
alt.GATE = () => gate(args);
|
||||
}
|
||||
return alt;
|
||||
}));
|
||||
}
|
||||
}
|
||||
function buildUnorderedGroup(ctx, group) {
|
||||
if (group.elements.length === 1) {
|
||||
return buildElement(ctx, group.elements[0]);
|
||||
}
|
||||
const methods = [];
|
||||
for (const element of group.elements) {
|
||||
const predicatedMethod = {
|
||||
// Since we handle the guard condition in the alternative already
|
||||
// We can ignore the group guard condition inside
|
||||
ALT: buildElement(ctx, element, true)
|
||||
};
|
||||
const guard = getGuardCondition(element);
|
||||
if (guard) {
|
||||
predicatedMethod.GATE = buildPredicate(guard);
|
||||
}
|
||||
methods.push(predicatedMethod);
|
||||
}
|
||||
const orIdx = ctx.or++;
|
||||
const idFunc = (groupIdx, lParser) => {
|
||||
const stackId = lParser.getRuleStack().join('-');
|
||||
return `uGroup_${groupIdx}_${stackId}`;
|
||||
};
|
||||
const alternatives = (args) => ctx.parser.alternatives(orIdx, methods.map((method, idx) => {
|
||||
const alt = { ALT: () => true };
|
||||
const parser = ctx.parser;
|
||||
alt.ALT = () => {
|
||||
method.ALT(args);
|
||||
if (!parser.isRecording()) {
|
||||
const key = idFunc(orIdx, parser);
|
||||
if (!parser.unorderedGroups.get(key)) {
|
||||
// init after clear state
|
||||
parser.unorderedGroups.set(key, []);
|
||||
}
|
||||
const groupState = parser.unorderedGroups.get(key);
|
||||
if (typeof (groupState === null || groupState === void 0 ? void 0 : groupState[idx]) === 'undefined') {
|
||||
// Not accessed yet
|
||||
groupState[idx] = true;
|
||||
}
|
||||
}
|
||||
};
|
||||
const gate = method.GATE;
|
||||
if (gate) {
|
||||
alt.GATE = () => gate(args);
|
||||
}
|
||||
else {
|
||||
alt.GATE = () => {
|
||||
const trackedAlternatives = parser.unorderedGroups.get(idFunc(orIdx, parser));
|
||||
const allow = !(trackedAlternatives === null || trackedAlternatives === void 0 ? void 0 : trackedAlternatives[idx]);
|
||||
return allow;
|
||||
};
|
||||
}
|
||||
return alt;
|
||||
}));
|
||||
const wrapped = wrap(ctx, getGuardCondition(group), alternatives, '*');
|
||||
return (args) => {
|
||||
wrapped(args);
|
||||
if (!ctx.parser.isRecording()) {
|
||||
ctx.parser.unorderedGroups.delete(idFunc(orIdx, ctx.parser));
|
||||
}
|
||||
};
|
||||
}
|
||||
function buildGroup(ctx, group) {
|
||||
const methods = group.elements.map(e => buildElement(ctx, e));
|
||||
return (args) => methods.forEach(method => method(args));
|
||||
}
|
||||
function getGuardCondition(element) {
|
||||
if (isGroup(element)) {
|
||||
return element.guardCondition;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
function buildCrossReference(ctx, crossRef, terminal = crossRef.terminal) {
|
||||
if (!terminal) {
|
||||
if (!crossRef.type.ref) {
|
||||
throw new Error('Could not resolve reference to type: ' + crossRef.type.$refText);
|
||||
}
|
||||
const assignment = findNameAssignment(crossRef.type.ref);
|
||||
const assignTerminal = assignment === null || assignment === void 0 ? void 0 : assignment.terminal;
|
||||
if (!assignTerminal) {
|
||||
throw new Error('Could not find name assignment for type: ' + getTypeName(crossRef.type.ref));
|
||||
}
|
||||
return buildCrossReference(ctx, crossRef, assignTerminal);
|
||||
}
|
||||
else if (isRuleCall(terminal) && isParserRule(terminal.rule.ref)) {
|
||||
// The terminal is a data type rule here. Everything else will result in a validation error.
|
||||
const rule = terminal.rule.ref;
|
||||
const idx = ctx.subrule++;
|
||||
return (args) => ctx.parser.subrule(idx, getRule(ctx, rule), false, crossRef, args);
|
||||
}
|
||||
else if (isRuleCall(terminal) && isTerminalRule(terminal.rule.ref)) {
|
||||
const idx = ctx.consume++;
|
||||
const terminalRule = getToken(ctx, terminal.rule.ref.name);
|
||||
return () => ctx.parser.consume(idx, terminalRule, crossRef);
|
||||
}
|
||||
else if (isKeyword(terminal)) {
|
||||
const idx = ctx.consume++;
|
||||
const keyword = getToken(ctx, terminal.value);
|
||||
return () => ctx.parser.consume(idx, keyword, crossRef);
|
||||
}
|
||||
else {
|
||||
throw new Error('Could not build cross reference parser');
|
||||
}
|
||||
}
|
||||
function buildKeyword(ctx, keyword) {
|
||||
const idx = ctx.consume++;
|
||||
const token = ctx.tokens[keyword.value];
|
||||
if (!token) {
|
||||
throw new Error('Could not find token for keyword: ' + keyword.value);
|
||||
}
|
||||
return () => ctx.parser.consume(idx, token, keyword);
|
||||
}
|
||||
function wrap(ctx, guard, method, cardinality) {
|
||||
const gate = guard && buildPredicate(guard);
|
||||
if (!cardinality) {
|
||||
if (gate) {
|
||||
const idx = ctx.or++;
|
||||
return (args) => ctx.parser.alternatives(idx, [
|
||||
{
|
||||
ALT: () => method(args),
|
||||
GATE: () => gate(args)
|
||||
},
|
||||
{
|
||||
ALT: EMPTY_ALT(),
|
||||
GATE: () => !gate(args)
|
||||
}
|
||||
]);
|
||||
}
|
||||
else {
|
||||
return method;
|
||||
}
|
||||
}
|
||||
if (cardinality === '*') {
|
||||
const idx = ctx.many++;
|
||||
return (args) => ctx.parser.many(idx, {
|
||||
DEF: () => method(args),
|
||||
GATE: gate ? () => gate(args) : undefined
|
||||
});
|
||||
}
|
||||
else if (cardinality === '+') {
|
||||
const idx = ctx.many++;
|
||||
if (gate) {
|
||||
const orIdx = ctx.or++;
|
||||
// In the case of a guard condition for the `+` group
|
||||
// We combine it with an empty alternative
|
||||
// If the condition returns true, it needs to parse at least a single iteration
|
||||
// If its false, it is not allowed to parse anything
|
||||
return (args) => ctx.parser.alternatives(orIdx, [
|
||||
{
|
||||
ALT: () => ctx.parser.atLeastOne(idx, {
|
||||
DEF: () => method(args)
|
||||
}),
|
||||
GATE: () => gate(args)
|
||||
},
|
||||
{
|
||||
ALT: EMPTY_ALT(),
|
||||
GATE: () => !gate(args)
|
||||
}
|
||||
]);
|
||||
}
|
||||
else {
|
||||
return (args) => ctx.parser.atLeastOne(idx, {
|
||||
DEF: () => method(args),
|
||||
});
|
||||
}
|
||||
}
|
||||
else if (cardinality === '?') {
|
||||
const idx = ctx.optional++;
|
||||
return (args) => ctx.parser.optional(idx, {
|
||||
DEF: () => method(args),
|
||||
GATE: gate ? () => gate(args) : undefined
|
||||
});
|
||||
}
|
||||
else {
|
||||
assertUnreachable(cardinality);
|
||||
}
|
||||
}
|
||||
function getRule(ctx, element) {
|
||||
const name = getRuleName(ctx, element);
|
||||
const rule = ctx.parser.getRule(name);
|
||||
if (!rule)
|
||||
throw new Error(`Rule "${name}" not found."`);
|
||||
return rule;
|
||||
}
|
||||
function getRuleName(ctx, element) {
|
||||
if (isParserRule(element)) {
|
||||
return element.name;
|
||||
}
|
||||
else if (ctx.ruleNames.has(element)) {
|
||||
return ctx.ruleNames.get(element);
|
||||
}
|
||||
else {
|
||||
let item = element;
|
||||
let parent = item.$container;
|
||||
let ruleName = element.$type;
|
||||
while (!isParserRule(parent)) {
|
||||
if (isGroup(parent) || isAlternatives(parent) || isUnorderedGroup(parent)) {
|
||||
const index = parent.elements.indexOf(item);
|
||||
ruleName = index.toString() + ':' + ruleName;
|
||||
}
|
||||
item = parent;
|
||||
parent = parent.$container;
|
||||
}
|
||||
const rule = parent;
|
||||
ruleName = rule.name + ':' + ruleName;
|
||||
ctx.ruleNames.set(element, ruleName);
|
||||
return ruleName;
|
||||
}
|
||||
}
|
||||
function getToken(ctx, name) {
|
||||
const token = ctx.tokens[name];
|
||||
if (!token)
|
||||
throw new Error(`Token "${name}" not found."`);
|
||||
return token;
|
||||
}
|
||||
//# sourceMappingURL=parser-builder-base.js.map
|
||||
1
node_modules/langium/lib/parser/parser-builder-base.js.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/parser-builder-base.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
7
node_modules/langium/lib/parser/parser-config.d.ts
generated
vendored
Normal file
7
node_modules/langium/lib/parser/parser-config.d.ts
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2021 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
export type { IParserConfig } from 'chevrotain';
|
||||
//# sourceMappingURL=parser-config.d.ts.map
|
||||
1
node_modules/langium/lib/parser/parser-config.d.ts.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/parser-config.d.ts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"parser-config.d.ts","sourceRoot":"","sources":["../../src/parser/parser-config.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAEhF,YAAY,EAAE,aAAa,EAAE,MAAM,YAAY,CAAC"}
|
||||
7
node_modules/langium/lib/parser/parser-config.js
generated
vendored
Normal file
7
node_modules/langium/lib/parser/parser-config.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2021 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
export {};
|
||||
//# sourceMappingURL=parser-config.js.map
|
||||
1
node_modules/langium/lib/parser/parser-config.js.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/parser-config.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"parser-config.js","sourceRoot":"","sources":["../../src/parser/parser-config.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF"}
|
||||
49
node_modules/langium/lib/parser/token-builder.d.ts
generated
vendored
Normal file
49
node_modules/langium/lib/parser/token-builder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2021 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import type { CustomPatternMatcherFunc, ILexingError, TokenPattern, TokenType, TokenVocabulary } from 'chevrotain';
|
||||
import type { AbstractRule, Grammar, Keyword, TerminalRule } from '../languages/generated/ast.js';
|
||||
import type { Stream } from '../utils/stream.js';
|
||||
export interface TokenBuilderOptions {
|
||||
caseInsensitive?: boolean;
|
||||
}
|
||||
export interface TokenBuilder {
|
||||
buildTokens(grammar: Grammar, options?: TokenBuilderOptions): TokenVocabulary;
|
||||
/**
|
||||
* Produces a lexing report for the given text that was just tokenized using the tokens provided by this builder.
|
||||
*
|
||||
* @param text The text that was tokenized.
|
||||
*/
|
||||
flushLexingReport?(text: string): LexingReport;
|
||||
}
|
||||
/**
|
||||
* A custom lexing report that can be produced by the token builder during the lexing process.
|
||||
* Adopters need to ensure that the any custom fields are serializable so they can be sent across worker threads.
|
||||
*/
|
||||
export interface LexingReport {
|
||||
diagnostics: LexingDiagnostic[];
|
||||
}
|
||||
export type LexingDiagnosticSeverity = 'error' | 'warning' | 'info' | 'hint';
|
||||
export interface LexingDiagnostic extends ILexingError {
|
||||
severity?: LexingDiagnosticSeverity;
|
||||
}
|
||||
export declare class DefaultTokenBuilder implements TokenBuilder {
|
||||
/**
|
||||
* The list of diagnostics stored during the lexing process of a single text.
|
||||
*/
|
||||
protected diagnostics: LexingDiagnostic[];
|
||||
buildTokens(grammar: Grammar, options?: TokenBuilderOptions): TokenVocabulary;
|
||||
flushLexingReport(text: string): LexingReport;
|
||||
protected popDiagnostics(): LexingDiagnostic[];
|
||||
protected buildTerminalTokens(rules: Stream<AbstractRule>): TokenType[];
|
||||
protected buildTerminalToken(terminal: TerminalRule): TokenType;
|
||||
protected requiresCustomPattern(regex: RegExp): boolean;
|
||||
protected regexPatternFunction(regex: RegExp): CustomPatternMatcherFunc;
|
||||
protected buildKeywordTokens(rules: Stream<AbstractRule>, terminalTokens: TokenType[], options?: TokenBuilderOptions): TokenType[];
|
||||
protected buildKeywordToken(keyword: Keyword, terminalTokens: TokenType[], caseInsensitive: boolean): TokenType;
|
||||
protected buildKeywordPattern(keyword: Keyword, caseInsensitive: boolean): TokenPattern;
|
||||
protected findLongerAlt(keyword: Keyword, terminalTokens: TokenType[]): TokenType[];
|
||||
}
|
||||
//# sourceMappingURL=token-builder.d.ts.map
|
||||
1
node_modules/langium/lib/parser/token-builder.d.ts.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/token-builder.d.ts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"token-builder.d.ts","sourceRoot":"","sources":["../../src/parser/token-builder.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAEhF,OAAO,KAAK,EAAE,wBAAwB,EAAE,YAAY,EAAE,YAAY,EAAE,SAAS,EAAE,eAAe,EAAE,MAAM,YAAY,CAAC;AACnH,OAAO,KAAK,EAAE,YAAY,EAAE,OAAO,EAAE,OAAO,EAAE,YAAY,EAAE,MAAM,+BAA+B,CAAC;AAClG,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,oBAAoB,CAAC;AAQjD,MAAM,WAAW,mBAAmB;IAChC,eAAe,CAAC,EAAE,OAAO,CAAA;CAC5B;AAED,MAAM,WAAW,YAAY;IACzB,WAAW,CAAC,OAAO,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,mBAAmB,GAAG,eAAe,CAAC;IAC9E;;;;OAIG;IACH,iBAAiB,CAAC,CAAC,IAAI,EAAE,MAAM,GAAG,YAAY,CAAC;CAClD;AAED;;;GAGG;AACH,MAAM,WAAW,YAAY;IACzB,WAAW,EAAE,gBAAgB,EAAE,CAAC;CACnC;AAED,MAAM,MAAM,wBAAwB,GAAG,OAAO,GAAG,SAAS,GAAG,MAAM,GAAG,MAAM,CAAC;AAE7E,MAAM,WAAW,gBAAiB,SAAQ,YAAY;IAClD,QAAQ,CAAC,EAAE,wBAAwB,CAAC;CACvC;AAED,qBAAa,mBAAoB,YAAW,YAAY;IACpD;;OAEG;IACH,SAAS,CAAC,WAAW,EAAE,gBAAgB,EAAE,CAAM;IAE/C,WAAW,CAAC,OAAO,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,mBAAmB,GAAG,eAAe;IAmB7E,iBAAiB,CAAC,IAAI,EAAE,MAAM,GAAG,YAAY;IAI7C,SAAS,CAAC,cAAc,IAAI,gBAAgB,EAAE;IAM9C,SAAS,CAAC,mBAAmB,CAAC,KAAK,EAAE,MAAM,CAAC,YAAY,CAAC,GAAG,SAAS,EAAE;IAKvE,SAAS,CAAC,kBAAkB,CAAC,QAAQ,EAAE,YAAY,GAAG,SAAS;IAiB/D,SAAS,CAAC,qBAAqB,CAAC,KAAK,EAAE,MAAM,GAAG,OAAO;IAYvD,SAAS,CAAC,oBAAoB,CAAC,KAAK,EAAE,MAAM,GAAG,wBAAwB;IASvE,SAAS,CAAC,kBAAkB,CAAC,KAAK,EAAE,MAAM,CAAC,YAAY,CAAC,EAAE,cAAc,EAAE,SAAS,EAAE,EAAE,OAAO,CAAC,EAAE,mBAAmB,GAAG,SAAS,EAAE;IAWlI,SAAS,CAAC,iBAAiB,CAAC,OAAO,EAAE,OAAO,EAAE,cAAc,EAAE,SAAS,EAAE,EAAE,eAAe,EAAE,OAAO,GAAG,SAAS;IAe/G,SAAS,CAAC,mBAAmB,CAAC,OAAO,EAAE,OAAO,EAAE,eAAe,EAAE,OAAO,GAAG,YAAY;IAMvF,SAAS,CAAC,aAAa,CAAC,OAAO,EAAE,OAAO,EAAE,cAAc,EAAE,SAAS,EAAE,GAAG,SAAS,EAAE;CAStF"}
|
||||
123
node_modules/langium/lib/parser/token-builder.js
generated
vendored
Normal file
123
node_modules/langium/lib/parser/token-builder.js
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2021 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import { Lexer } from 'chevrotain';
|
||||
import { isKeyword, isParserRule, isTerminalRule } from '../languages/generated/ast.js';
|
||||
import { streamAllContents } from '../utils/ast-utils.js';
|
||||
import { getAllReachableRules, terminalRegex } from '../utils/grammar-utils.js';
|
||||
import { getCaseInsensitivePattern, isWhitespace, partialMatches } from '../utils/regexp-utils.js';
|
||||
import { stream } from '../utils/stream.js';
|
||||
export class DefaultTokenBuilder {
|
||||
constructor() {
|
||||
/**
|
||||
* The list of diagnostics stored during the lexing process of a single text.
|
||||
*/
|
||||
this.diagnostics = [];
|
||||
}
|
||||
buildTokens(grammar, options) {
|
||||
const reachableRules = stream(getAllReachableRules(grammar, false));
|
||||
const terminalTokens = this.buildTerminalTokens(reachableRules);
|
||||
const tokens = this.buildKeywordTokens(reachableRules, terminalTokens, options);
|
||||
terminalTokens.forEach(terminalToken => {
|
||||
const pattern = terminalToken.PATTERN;
|
||||
if (typeof pattern === 'object' && pattern && 'test' in pattern && isWhitespace(pattern)) {
|
||||
tokens.unshift(terminalToken);
|
||||
}
|
||||
else {
|
||||
tokens.push(terminalToken);
|
||||
}
|
||||
});
|
||||
// We don't need to add the EOF token explicitly.
|
||||
// It is automatically available at the end of the token stream.
|
||||
return tokens;
|
||||
}
|
||||
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||
flushLexingReport(text) {
|
||||
return { diagnostics: this.popDiagnostics() };
|
||||
}
|
||||
popDiagnostics() {
|
||||
const diagnostics = [...this.diagnostics];
|
||||
this.diagnostics = [];
|
||||
return diagnostics;
|
||||
}
|
||||
buildTerminalTokens(rules) {
|
||||
return rules.filter(isTerminalRule).filter(e => !e.fragment)
|
||||
.map(terminal => this.buildTerminalToken(terminal)).toArray();
|
||||
}
|
||||
buildTerminalToken(terminal) {
|
||||
const regex = terminalRegex(terminal);
|
||||
const pattern = this.requiresCustomPattern(regex) ? this.regexPatternFunction(regex) : regex;
|
||||
const tokenType = {
|
||||
name: terminal.name,
|
||||
PATTERN: pattern,
|
||||
};
|
||||
if (typeof pattern === 'function') {
|
||||
tokenType.LINE_BREAKS = true;
|
||||
}
|
||||
if (terminal.hidden) {
|
||||
// Only skip tokens that are able to accept whitespace
|
||||
tokenType.GROUP = isWhitespace(regex) ? Lexer.SKIPPED : 'hidden';
|
||||
}
|
||||
return tokenType;
|
||||
}
|
||||
requiresCustomPattern(regex) {
|
||||
if (regex.flags.includes('u') || regex.flags.includes('s')) {
|
||||
// Unicode and dotall regexes are not supported by Chevrotain.
|
||||
return true;
|
||||
}
|
||||
else if (regex.source.includes('?<=') || regex.source.includes('?<!')) {
|
||||
// Negative and positive lookbehind are not supported by Chevrotain yet.
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
regexPatternFunction(regex) {
|
||||
const stickyRegex = new RegExp(regex, regex.flags + 'y');
|
||||
return (text, offset) => {
|
||||
stickyRegex.lastIndex = offset;
|
||||
const execResult = stickyRegex.exec(text);
|
||||
return execResult;
|
||||
};
|
||||
}
|
||||
buildKeywordTokens(rules, terminalTokens, options) {
|
||||
return rules
|
||||
// We filter by parser rules, since keywords in terminal rules get transformed into regex and are not actual tokens
|
||||
.filter(isParserRule)
|
||||
.flatMap(rule => streamAllContents(rule).filter(isKeyword))
|
||||
.distinct(e => e.value).toArray()
|
||||
// Sort keywords by descending length
|
||||
.sort((a, b) => b.value.length - a.value.length)
|
||||
.map(keyword => this.buildKeywordToken(keyword, terminalTokens, Boolean(options === null || options === void 0 ? void 0 : options.caseInsensitive)));
|
||||
}
|
||||
buildKeywordToken(keyword, terminalTokens, caseInsensitive) {
|
||||
const keywordPattern = this.buildKeywordPattern(keyword, caseInsensitive);
|
||||
const tokenType = {
|
||||
name: keyword.value,
|
||||
PATTERN: keywordPattern,
|
||||
LONGER_ALT: this.findLongerAlt(keyword, terminalTokens)
|
||||
};
|
||||
if (typeof keywordPattern === 'function') {
|
||||
tokenType.LINE_BREAKS = true;
|
||||
}
|
||||
return tokenType;
|
||||
}
|
||||
buildKeywordPattern(keyword, caseInsensitive) {
|
||||
return caseInsensitive ?
|
||||
new RegExp(getCaseInsensitivePattern(keyword.value)) :
|
||||
keyword.value;
|
||||
}
|
||||
findLongerAlt(keyword, terminalTokens) {
|
||||
return terminalTokens.reduce((longerAlts, token) => {
|
||||
const pattern = token === null || token === void 0 ? void 0 : token.PATTERN;
|
||||
if ((pattern === null || pattern === void 0 ? void 0 : pattern.source) && partialMatches('^' + pattern.source + '$', keyword.value)) {
|
||||
longerAlts.push(token);
|
||||
}
|
||||
return longerAlts;
|
||||
}, []);
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=token-builder.js.map
|
||||
1
node_modules/langium/lib/parser/token-builder.js.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/token-builder.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"token-builder.js","sourceRoot":"","sources":["../../src/parser/token-builder.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAKhF,OAAO,EAAE,KAAK,EAAE,MAAM,YAAY,CAAC;AACnC,OAAO,EAAE,SAAS,EAAE,YAAY,EAAE,cAAc,EAAE,MAAM,+BAA+B,CAAC;AACxF,OAAO,EAAE,iBAAiB,EAAE,MAAM,uBAAuB,CAAC;AAC1D,OAAO,EAAE,oBAAoB,EAAE,aAAa,EAAE,MAAM,2BAA2B,CAAC;AAChF,OAAO,EAAE,yBAAyB,EAAE,YAAY,EAAE,cAAc,EAAE,MAAM,0BAA0B,CAAC;AACnG,OAAO,EAAE,MAAM,EAAE,MAAM,oBAAoB,CAAC;AA8B5C,MAAM,OAAO,mBAAmB;IAAhC;QACI;;WAEG;QACO,gBAAW,GAAuB,EAAE,CAAC;IAmHnD,CAAC;IAjHG,WAAW,CAAC,OAAgB,EAAE,OAA6B;QACvD,MAAM,cAAc,GAAG,MAAM,CAAC,oBAAoB,CAAC,OAAO,EAAE,KAAK,CAAC,CAAC,CAAC;QACpE,MAAM,cAAc,GAAgB,IAAI,CAAC,mBAAmB,CAAC,cAAc,CAAC,CAAC;QAC7E,MAAM,MAAM,GAAgB,IAAI,CAAC,kBAAkB,CAAC,cAAc,EAAE,cAAc,EAAE,OAAO,CAAC,CAAC;QAE7F,cAAc,CAAC,OAAO,CAAC,aAAa,CAAC,EAAE;YACnC,MAAM,OAAO,GAAG,aAAa,CAAC,OAAO,CAAC;YACtC,IAAI,OAAO,OAAO,KAAK,QAAQ,IAAI,OAAO,IAAI,MAAM,IAAI,OAAO,IAAI,YAAY,CAAC,OAAO,CAAC,EAAE,CAAC;gBACvF,MAAM,CAAC,OAAO,CAAC,aAAa,CAAC,CAAC;YAClC,CAAC;iBAAM,CAAC;gBACJ,MAAM,CAAC,IAAI,CAAC,aAAa,CAAC,CAAC;YAC/B,CAAC;QACL,CAAC,CAAC,CAAC;QACH,iDAAiD;QACjD,gEAAgE;QAChE,OAAO,MAAM,CAAC;IAClB,CAAC;IAED,6DAA6D;IAC7D,iBAAiB,CAAC,IAAY;QAC1B,OAAO,EAAE,WAAW,EAAE,IAAI,CAAC,cAAc,EAAE,EAAE,CAAC;IAClD,CAAC;IAES,cAAc;QACpB,MAAM,WAAW,GAAG,CAAC,GAAG,IAAI,CAAC,WAAW,CAAC,CAAC;QAC1C,IAAI,CAAC,WAAW,GAAG,EAAE,CAAC;QACtB,OAAO,WAAW,CAAC;IACvB,CAAC;IAES,mBAAmB,CAAC,KAA2B;QACrD,OAAO,KAAK,CAAC,MAAM,CAAC,cAAc,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC;aACvD,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,kBAAkB,CAAC,QAAQ,CAAC,CAAC,CAAC,OAAO,EAAE,CAAC;IACtE,CAAC;IAES,kBAAkB,CAAC,QAAsB;QAC/C,MAAM,KAAK,GAAG,aAAa,CAAC,QAAQ,CAAC,CAAC;QACtC,MAAM,OAAO,GAAG,IAAI,CAAC,qBAAqB,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,oBAAoB,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;QAC7F,MAAM,SAAS,GAAc;YACzB,IAAI,EAAE,QAAQ,CAAC,IAAI;YACnB,OAAO,EAAE,OAAO;SACnB,CAAC;QACF,IAAI,OAAO,OAAO,KAAK,UAAU,EAAE,CAAC;YAChC,SAAS,CAAC,WAAW,GAAG,IAAI,CAAC;QACjC,CAAC;QACD,IAAI,QAAQ,CAAC,MAAM,EAAE,CAAC;YAClB,sDAAsD;YACtD,SAAS,CAAC,KAAK,GAAG,YAAY,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,QAAQ,CAAC;QACrE,CAAC;QACD,OAAO,SAAS,CAAC;IACrB,CAAC;IAES,qBAAqB,CAAC,KAAa;QACzC,IAAI,KAAK,CAAC,KAAK,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,KAAK,CAAC,KAAK,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE,CAAC;YACzD,8DAA8D;YAC9D,OAAO,IAAI,CAAC;QAChB,CAAC;aAAM,IAAI,KAAK,CAAC,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,IAAI,KAAK,CAAC,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,EAAE,CAAC;YACtE,wEAAwE;YACxE,OAAO,IAAI,CAAC;QAChB,CAAC;aAAM,CAAC;YACJ,OAAO,KAAK,CAAC;QACjB,CAAC;IACL,CAAC;IAES,oBAAoB,CAAC,KAAa;QACxC,MAAM,WAAW,GAAG,IAAI,MAAM,CAAC,KAAK,EAAE,KAAK,CAAC,KAAK,GAAG,GAAG,CAAC,CAAC;QACzD,OAAO,CAAC,IAAI,EAAE,MAAM,EAAE,EAAE;YACpB,WAAW,CAAC,SAAS,GAAG,MAAM,CAAC;YAC/B,MAAM,UAAU,GAAG,WAAW,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;YAC1C,OAAO,UAAU,CAAC;QACtB,CAAC,CAAC;IACN,CAAC;IAES,kBAAkB,CAAC,KAA2B,EAAE,cAA2B,EAAE,OAA6B;QAChH,OAAO,KAAK;YACR,mHAAmH;aAClH,MAAM,CAAC,YAAY,CAAC;aACpB,OAAO,CAAC,IAAI,CAAC,EAAE,CAAC,iBAAiB,CAAC,IAAI,CAAC,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;aAC1D,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,OAAO,EAAE;YACjC,qCAAqC;aACpC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC;aAC/C,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC,IAAI,CAAC,iBAAiB,CAAC,OAAO,EAAE,cAAc,EAAE,OAAO,CAAC,OAAO,aAAP,OAAO,uBAAP,OAAO,CAAE,eAAe,CAAC,CAAC,CAAC,CAAC;IAC5G,CAAC;IAES,iBAAiB,CAAC,OAAgB,EAAE,cAA2B,EAAE,eAAwB;QAC/F,MAAM,cAAc,GAAG,IAAI,CAAC,mBAAmB,CAAC,OAAO,EAAE,eAAe,CAAC,CAAC;QAC1E,MAAM,SAAS,GAAc;YACzB,IAAI,EAAE,OAAO,CAAC,KAAK;YACnB,OAAO,EAAE,cAAc;YACvB,UAAU,EAAE,IAAI,CAAC,aAAa,CAAC,OAAO,EAAE,cAAc,CAAC;SAC1D,CAAC;QAEF,IAAI,OAAO,cAAc,KAAK,UAAU,EAAE,CAAC;YACvC,SAAS,CAAC,WAAW,GAAG,IAAI,CAAC;QACjC,CAAC;QAED,OAAO,SAAS,CAAC;IACrB,CAAC;IAES,mBAAmB,CAAC,OAAgB,EAAE,eAAwB;QACpE,OAAO,eAAe,CAAC,CAAC;YACpB,IAAI,MAAM,CAAC,yBAAyB,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;YACtD,OAAO,CAAC,KAAK,CAAC;IACtB,CAAC;IAES,aAAa,CAAC,OAAgB,EAAE,cAA2B;QACjE,OAAO,cAAc,CAAC,MAAM,CAAC,CAAC,UAAuB,EAAE,KAAK,EAAE,EAAE;YAC5D,MAAM,OAAO,GAAG,KAAK,aAAL,KAAK,uBAAL,KAAK,CAAE,OAAiB,CAAC;YACzC,IAAI,CAAA,OAAO,aAAP,OAAO,uBAAP,OAAO,CAAE,MAAM,KAAI,cAAc,CAAC,GAAG,GAAG,OAAO,CAAC,MAAM,GAAG,GAAG,EAAE,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC;gBAC/E,UAAU,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;YAC3B,CAAC;YACD,OAAO,UAAU,CAAC;QACtB,CAAC,EAAE,EAAE,CAAC,CAAC;IACX,CAAC;CACJ"}
|
||||
31
node_modules/langium/lib/parser/value-converter.d.ts
generated
vendored
Normal file
31
node_modules/langium/lib/parser/value-converter.d.ts
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2021 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import type { AbstractRule } from '../languages/generated/ast.js';
|
||||
import type { CstNode } from '../syntax-tree.js';
|
||||
/**
|
||||
* Language-specific service for converting string values from the source text format into a value to be held in the AST.
|
||||
*/
|
||||
export interface ValueConverter {
|
||||
/**
|
||||
* Converts a string value from the source text format into a value to be held in the AST.
|
||||
*/
|
||||
convert(input: string, cstNode: CstNode): ValueType;
|
||||
}
|
||||
export type ValueType = string | number | boolean | bigint | Date;
|
||||
export declare class DefaultValueConverter implements ValueConverter {
|
||||
convert(input: string, cstNode: CstNode): ValueType;
|
||||
protected runConverter(rule: AbstractRule, input: string, cstNode: CstNode): ValueType;
|
||||
}
|
||||
export declare namespace ValueConverter {
|
||||
function convertString(input: string): string;
|
||||
function convertID(input: string): string;
|
||||
function convertInt(input: string): number;
|
||||
function convertBigint(input: string): bigint;
|
||||
function convertDate(input: string): Date;
|
||||
function convertNumber(input: string): number;
|
||||
function convertBoolean(input: string): boolean;
|
||||
}
|
||||
//# sourceMappingURL=value-converter.d.ts.map
|
||||
1
node_modules/langium/lib/parser/value-converter.d.ts.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/value-converter.d.ts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"value-converter.d.ts","sourceRoot":"","sources":["../../src/parser/value-converter.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAEhF,OAAO,KAAK,EAAmB,YAAY,EAAE,MAAM,+BAA+B,CAAC;AACnF,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AAIjD;;GAEG;AACH,MAAM,WAAW,cAAc;IAC3B;;OAEG;IACH,OAAO,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,OAAO,GAAG,SAAS,CAAC;CACvD;AAED,MAAM,MAAM,SAAS,GAAG,MAAM,GAAG,MAAM,GAAG,OAAO,GAAG,MAAM,GAAG,IAAI,CAAC;AAElE,qBAAa,qBAAsB,YAAW,cAAc;IAExD,OAAO,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,OAAO,GAAG,SAAS;IAgBnD,SAAS,CAAC,YAAY,CAAC,IAAI,EAAE,YAAY,EAAE,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,OAAO,GAAG,SAAS;CAczF;AAED,yBAAiB,cAAc,CAAC;IAE5B,SAAgB,aAAa,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM,CAYnD;IAeD,SAAgB,SAAS,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM,CAM/C;IAED,SAAgB,UAAU,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM,CAEhD;IAED,SAAgB,aAAa,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM,CAEnD;IAED,SAAgB,WAAW,CAAC,KAAK,EAAE,MAAM,GAAG,IAAI,CAE/C;IAED,SAAgB,aAAa,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM,CAEnD;IAED,SAAgB,cAAc,CAAC,KAAK,EAAE,MAAM,GAAG,OAAO,CAErD;CAEJ"}
|
||||
99
node_modules/langium/lib/parser/value-converter.js
generated
vendored
Normal file
99
node_modules/langium/lib/parser/value-converter.js
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
/******************************************************************************
|
||||
* Copyright 2021 TypeFox GmbH
|
||||
* This program and the accompanying materials are made available under the
|
||||
* terms of the MIT License, which is available in the project root.
|
||||
******************************************************************************/
|
||||
import { isCrossReference, isRuleCall } from '../languages/generated/ast.js';
|
||||
import { getCrossReferenceTerminal, getRuleType } from '../utils/grammar-utils.js';
|
||||
export class DefaultValueConverter {
|
||||
convert(input, cstNode) {
|
||||
let feature = cstNode.grammarSource;
|
||||
if (isCrossReference(feature)) {
|
||||
feature = getCrossReferenceTerminal(feature);
|
||||
}
|
||||
if (isRuleCall(feature)) {
|
||||
const rule = feature.rule.ref;
|
||||
if (!rule) {
|
||||
throw new Error('This cst node was not parsed by a rule.');
|
||||
}
|
||||
return this.runConverter(rule, input, cstNode);
|
||||
}
|
||||
return input;
|
||||
}
|
||||
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||
runConverter(rule, input, cstNode) {
|
||||
var _a;
|
||||
switch (rule.name.toUpperCase()) {
|
||||
case 'INT': return ValueConverter.convertInt(input);
|
||||
case 'STRING': return ValueConverter.convertString(input);
|
||||
case 'ID': return ValueConverter.convertID(input);
|
||||
}
|
||||
switch ((_a = getRuleType(rule)) === null || _a === void 0 ? void 0 : _a.toLowerCase()) {
|
||||
case 'number': return ValueConverter.convertNumber(input);
|
||||
case 'boolean': return ValueConverter.convertBoolean(input);
|
||||
case 'bigint': return ValueConverter.convertBigint(input);
|
||||
case 'date': return ValueConverter.convertDate(input);
|
||||
default: return input;
|
||||
}
|
||||
}
|
||||
}
|
||||
export var ValueConverter;
|
||||
(function (ValueConverter) {
|
||||
function convertString(input) {
|
||||
let result = '';
|
||||
for (let i = 1; i < input.length - 1; i++) {
|
||||
const c = input.charAt(i);
|
||||
if (c === '\\') {
|
||||
const c1 = input.charAt(++i);
|
||||
result += convertEscapeCharacter(c1);
|
||||
}
|
||||
else {
|
||||
result += c;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
ValueConverter.convertString = convertString;
|
||||
function convertEscapeCharacter(char) {
|
||||
switch (char) {
|
||||
case 'b': return '\b';
|
||||
case 'f': return '\f';
|
||||
case 'n': return '\n';
|
||||
case 'r': return '\r';
|
||||
case 't': return '\t';
|
||||
case 'v': return '\v';
|
||||
case '0': return '\0';
|
||||
default: return char;
|
||||
}
|
||||
}
|
||||
function convertID(input) {
|
||||
if (input.charAt(0) === '^') {
|
||||
return input.substring(1);
|
||||
}
|
||||
else {
|
||||
return input;
|
||||
}
|
||||
}
|
||||
ValueConverter.convertID = convertID;
|
||||
function convertInt(input) {
|
||||
return parseInt(input);
|
||||
}
|
||||
ValueConverter.convertInt = convertInt;
|
||||
function convertBigint(input) {
|
||||
return BigInt(input);
|
||||
}
|
||||
ValueConverter.convertBigint = convertBigint;
|
||||
function convertDate(input) {
|
||||
return new Date(input);
|
||||
}
|
||||
ValueConverter.convertDate = convertDate;
|
||||
function convertNumber(input) {
|
||||
return Number(input);
|
||||
}
|
||||
ValueConverter.convertNumber = convertNumber;
|
||||
function convertBoolean(input) {
|
||||
return input.toLowerCase() === 'true';
|
||||
}
|
||||
ValueConverter.convertBoolean = convertBoolean;
|
||||
})(ValueConverter || (ValueConverter = {}));
|
||||
//# sourceMappingURL=value-converter.js.map
|
||||
1
node_modules/langium/lib/parser/value-converter.js.map
generated
vendored
Normal file
1
node_modules/langium/lib/parser/value-converter.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"value-converter.js","sourceRoot":"","sources":["../../src/parser/value-converter.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAIhF,OAAO,EAAE,gBAAgB,EAAE,UAAU,EAAE,MAAM,+BAA+B,CAAC;AAC7E,OAAO,EAAE,yBAAyB,EAAE,WAAW,EAAE,MAAM,2BAA2B,CAAC;AAcnF,MAAM,OAAO,qBAAqB;IAE9B,OAAO,CAAC,KAAa,EAAE,OAAgB;QACnC,IAAI,OAAO,GAAgC,OAAO,CAAC,aAAa,CAAC;QACjE,IAAI,gBAAgB,CAAC,OAAO,CAAC,EAAE,CAAC;YAC5B,OAAO,GAAG,yBAAyB,CAAC,OAAO,CAAC,CAAC;QACjD,CAAC;QACD,IAAI,UAAU,CAAC,OAAO,CAAC,EAAE,CAAC;YACtB,MAAM,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC,GAAG,CAAC;YAC9B,IAAI,CAAC,IAAI,EAAE,CAAC;gBACR,MAAM,IAAI,KAAK,CAAC,yCAAyC,CAAC,CAAC;YAC/D,CAAC;YACD,OAAO,IAAI,CAAC,YAAY,CAAC,IAAI,EAAE,KAAK,EAAE,OAAO,CAAC,CAAC;QACnD,CAAC;QACD,OAAO,KAAK,CAAC;IACjB,CAAC;IAED,6DAA6D;IACnD,YAAY,CAAC,IAAkB,EAAE,KAAa,EAAE,OAAgB;;QACtE,QAAQ,IAAI,CAAC,IAAI,CAAC,WAAW,EAAE,EAAE,CAAC;YAC9B,KAAK,KAAK,CAAC,CAAC,OAAO,cAAc,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC;YACpD,KAAK,QAAQ,CAAC,CAAC,OAAO,cAAc,CAAC,aAAa,CAAC,KAAK,CAAC,CAAC;YAC1D,KAAK,IAAI,CAAC,CAAC,OAAO,cAAc,CAAC,SAAS,CAAC,KAAK,CAAC,CAAC;QACtD,CAAC;QACD,QAAQ,MAAA,WAAW,CAAC,IAAI,CAAC,0CAAE,WAAW,EAAE,EAAE,CAAC;YACvC,KAAK,QAAQ,CAAC,CAAC,OAAO,cAAc,CAAC,aAAa,CAAC,KAAK,CAAC,CAAC;YAC1D,KAAK,SAAS,CAAC,CAAC,OAAO,cAAc,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC;YAC5D,KAAK,QAAQ,CAAC,CAAC,OAAO,cAAc,CAAC,aAAa,CAAC,KAAK,CAAC,CAAC;YAC1D,KAAK,MAAM,CAAC,CAAC,OAAO,cAAc,CAAC,WAAW,CAAC,KAAK,CAAC,CAAC;YACtD,OAAO,CAAC,CAAC,OAAO,KAAK,CAAC;QAC1B,CAAC;IACL,CAAC;CACJ;AAED,MAAM,KAAW,cAAc,CAyD9B;AAzDD,WAAiB,cAAc;IAE3B,SAAgB,aAAa,CAAC,KAAa;QACvC,IAAI,MAAM,GAAG,EAAE,CAAC;QAChB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,KAAK,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC;YACxC,MAAM,CAAC,GAAG,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC;YAC1B,IAAI,CAAC,KAAK,IAAI,EAAE,CAAC;gBACb,MAAM,EAAE,GAAG,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC;gBAC7B,MAAM,IAAI,sBAAsB,CAAC,EAAE,CAAC,CAAC;YACzC,CAAC;iBAAM,CAAC;gBACJ,MAAM,IAAI,CAAC,CAAC;YAChB,CAAC;QACL,CAAC;QACD,OAAO,MAAM,CAAC;IAClB,CAAC;IAZe,4BAAa,gBAY5B,CAAA;IAED,SAAS,sBAAsB,CAAC,IAAY;QACxC,QAAQ,IAAI,EAAE,CAAC;YACX,KAAK,GAAG,CAAC,CAAC,OAAO,IAAI,CAAC;YACtB,KAAK,GAAG,CAAC,CAAC,OAAO,IAAI,CAAC;YACtB,KAAK,GAAG,CAAC,CAAC,OAAO,IAAI,CAAC;YACtB,KAAK,GAAG,CAAC,CAAC,OAAO,IAAI,CAAC;YACtB,KAAK,GAAG,CAAC,CAAC,OAAO,IAAI,CAAC;YACtB,KAAK,GAAG,CAAC,CAAC,OAAO,IAAI,CAAC;YACtB,KAAK,GAAG,CAAC,CAAC,OAAO,IAAI,CAAC;YACtB,OAAO,CAAC,CAAC,OAAO,IAAI,CAAC;QACzB,CAAC;IACL,CAAC;IAED,SAAgB,SAAS,CAAC,KAAa;QACnC,IAAI,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,GAAG,EAAE,CAAC;YAC1B,OAAO,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC;QAC9B,CAAC;aAAM,CAAC;YACJ,OAAO,KAAK,CAAC;QACjB,CAAC;IACL,CAAC;IANe,wBAAS,YAMxB,CAAA;IAED,SAAgB,UAAU,CAAC,KAAa;QACpC,OAAO,QAAQ,CAAC,KAAK,CAAC,CAAC;IAC3B,CAAC;IAFe,yBAAU,aAEzB,CAAA;IAED,SAAgB,aAAa,CAAC,KAAa;QACvC,OAAO,MAAM,CAAC,KAAK,CAAC,CAAC;IACzB,CAAC;IAFe,4BAAa,gBAE5B,CAAA;IAED,SAAgB,WAAW,CAAC,KAAa;QACrC,OAAO,IAAI,IAAI,CAAC,KAAK,CAAC,CAAC;IAC3B,CAAC;IAFe,0BAAW,cAE1B,CAAA;IAED,SAAgB,aAAa,CAAC,KAAa;QACvC,OAAO,MAAM,CAAC,KAAK,CAAC,CAAC;IACzB,CAAC;IAFe,4BAAa,gBAE5B,CAAA;IAED,SAAgB,cAAc,CAAC,KAAa;QACxC,OAAO,KAAK,CAAC,WAAW,EAAE,KAAK,MAAM,CAAC;IAC1C,CAAC;IAFe,6BAAc,iBAE7B,CAAA;AAEL,CAAC,EAzDgB,cAAc,KAAd,cAAc,QAyD9B"}
|
||||
Reference in New Issue
Block a user