This commit is contained in:
nik
2025-10-03 22:27:28 +03:00
parent 829fad0e17
commit 871cf7e792
16520 changed files with 2967597 additions and 3 deletions

View File

@@ -0,0 +1,175 @@
/******************************************************************************
* Copyright 2021 TypeFox GmbH
* This program and the accompanying materials are made available under the
* terms of the MIT License, which is available in the project root.
******************************************************************************/
import type { CompletionItem, CompletionParams, TextEdit } from 'vscode-languageserver-protocol';
import type { LangiumCompletionParser } from '../../parser/langium-parser.js';
import type { NameProvider } from '../../references/name-provider.js';
import type { ScopeProvider } from '../../references/scope-provider.js';
import type { LangiumServices } from '../lsp-services.js';
import type { AstNode, AstNodeDescription, AstReflection, CstNode, ReferenceInfo } from '../../syntax-tree.js';
import type { CancellationToken } from '../../utils/cancellation.js';
import type { MaybePromise } from '../../utils/promise-utils.js';
import type { LangiumDocument, TextDocument } from '../../workspace/documents.js';
import type { NextFeature } from './follow-element-computation.js';
import type { NodeKindProvider } from '../node-kind-provider.js';
import type { FuzzyMatcher } from '../fuzzy-matcher.js';
import type { GrammarConfig } from '../../languages/grammar-config.js';
import type { Lexer } from '../../parser/lexer.js';
import type { DocumentationProvider } from '../../documentation/documentation-provider.js';
import type { MarkupContent } from 'vscode-languageserver';
import { CompletionItemKind, CompletionList, Position } from 'vscode-languageserver';
import * as ast from '../../languages/generated/ast.js';
import { type Stream } from '../../utils/stream.js';
export type CompletionAcceptor = (context: CompletionContext, value: CompletionValueItem) => void;
export type CompletionValueItem = ({
label?: string;
} | {
node: AstNode;
} | {
nodeDescription: AstNodeDescription;
}) & Partial<CompletionItem>;
export interface CompletionContext {
node?: AstNode;
document: LangiumDocument;
textDocument: TextDocument;
features: NextFeature[];
/**
* Index at the start of the token related to this context.
* If the context performs completion for a token that doesn't exist yet, it is equal to the `offset`.
*/
tokenOffset: number;
/**
* Index at the end of the token related to this context, even if it is behind the cursor position.
* Points at the first character after the last token.
* If the context performs completion for a token that doesn't exist yet, it is equal to the `offset`.
*/
tokenEndOffset: number;
/**
* Index of the requested completed position.
*/
offset: number;
position: Position;
}
export interface CompletionProviderOptions {
/**
* Most tools trigger completion request automatically without explicitly requesting
* it using a keyboard shortcut (e.g. Ctrl+Space). Typically they do so when the user
* starts to type an identifier. For example if the user types `c` in a JavaScript file
* code complete will automatically pop up present `console` besides others as a
* completion item. Characters that make up identifiers don't need to be listed here.
*
* If code complete should automatically be trigger on characters not being valid inside
* an identifier (for example `.` in JavaScript) list them in `triggerCharacters`.
*/
triggerCharacters?: string[];
/**
* The list of all possible characters that commit a completion. This field can be used
* if clients don't support individual commit characters per completion item.
*
* If a server provides both `allCommitCharacters` and commit characters on an individual
* completion item the ones on the completion item win.
*/
allCommitCharacters?: string[];
}
export interface CompletionBacktrackingInformation {
previousTokenStart?: number;
previousTokenEnd?: number;
nextTokenStart: number;
nextTokenEnd: number;
}
export declare function mergeCompletionProviderOptions(options: Array<CompletionProviderOptions | undefined>): CompletionProviderOptions;
/**
* Language-specific service for handling completion requests.
*/
export interface CompletionProvider {
/**
* Handle a completion request.
*
* @param document - the document for which the completion request was triggered
* @param params - the completion parameters
* @param cancelToken - a token that can be used to cancel the request
*
* @throws `OperationCancelled` if cancellation is detected during execution
* @throws `ResponseError` if an error is detected that should be sent as response to the client
*/
getCompletion(document: LangiumDocument, params: CompletionParams, cancelToken?: CancellationToken): MaybePromise<CompletionList | undefined>;
/**
* Contains the completion options for this completion provider.
*
* If multiple languages return different options, they are merged before being sent to the language client.
*/
readonly completionOptions?: CompletionProviderOptions;
}
export declare class DefaultCompletionProvider implements CompletionProvider {
protected readonly completionParser: LangiumCompletionParser;
protected readonly documentationProvider: DocumentationProvider;
protected readonly scopeProvider: ScopeProvider;
protected readonly grammar: ast.Grammar;
protected readonly nameProvider: NameProvider;
protected readonly lexer: Lexer;
protected readonly nodeKindProvider: NodeKindProvider;
protected readonly fuzzyMatcher: FuzzyMatcher;
protected readonly grammarConfig: GrammarConfig;
protected readonly astReflection: AstReflection;
readonly completionOptions?: CompletionProviderOptions;
constructor(services: LangiumServices);
getCompletion(document: LangiumDocument, params: CompletionParams, _cancelToken?: CancellationToken): Promise<CompletionList | undefined>;
/**
* The completion algorithm could yield the same reference/keyword multiple times.
*
* This methods deduplicates these items afterwards before returning to the client.
* Unique items are identified as a combination of `kind`, `label` and `detail`.
*/
protected deduplicateItems(items: CompletionItem[]): CompletionItem[];
protected findFeaturesAt(document: TextDocument, offset: number): NextFeature[];
protected buildContexts(document: LangiumDocument, position: Position): IterableIterator<CompletionContext>;
protected performNextTokenCompletion(document: LangiumDocument, text: string, _offset: number, _end: number): boolean;
protected findDataTypeRuleStart(cst: CstNode, offset: number): [number, number] | undefined;
/**
* Indicates whether the completion should continue to process the next completion context.
*
* The default implementation continues the completion only if there are currently no proposed completion items.
*/
protected continueCompletion(items: CompletionItem[]): boolean;
/**
* This method returns two sets of token offset information.
*
* The `nextToken*` offsets are related to the token at the cursor position.
* If there is none, both offsets are simply set to `offset`.
*
* The `previousToken*` offsets are related to the last token before the current token at the cursor position.
* They are `undefined`, if there is no token before the cursor position.
*/
protected backtrackToAnyToken(text: string, offset: number): CompletionBacktrackingInformation;
protected completionFor(context: CompletionContext, next: NextFeature, acceptor: CompletionAcceptor): MaybePromise<void>;
protected completionForCrossReference(context: CompletionContext, next: NextFeature<ast.CrossReference>, acceptor: CompletionAcceptor): MaybePromise<void>;
/**
* Override this method to change how the stream of candidates is determined for a reference.
* This way completion-specific modifications and refinements can be added to the proposals computation
* beyond the rules being implemented in the scope provider, e.g. filtering.
*
* @param refInfo Information about the reference for which the candidates are requested.
* @param _context Information about the completion request including document, cursor position, token under cursor, etc.
* @returns A stream of all elements being valid for the given reference.
*/
protected getReferenceCandidates(refInfo: ReferenceInfo, _context: CompletionContext): Stream<AstNodeDescription>;
/**
* Override this method to change how reference completion items are created.
*
* To change the `kind` of a completion item, override the `NodeKindProvider` service instead.
* To change the `documentation`, override the `DocumentationProvider` service instead.
*
* @param nodeDescription The description of a reference candidate
* @returns A partial completion item
*/
protected createReferenceCompletionItem(nodeDescription: AstNodeDescription): CompletionValueItem;
protected getReferenceDocumentation(nodeDescription: AstNodeDescription): MarkupContent | string | undefined;
protected completionForKeyword(context: CompletionContext, keyword: ast.Keyword, acceptor: CompletionAcceptor): MaybePromise<void>;
protected getKeywordCompletionItemKind(_keyword: ast.Keyword): CompletionItemKind;
protected filterKeyword(context: CompletionContext, keyword: ast.Keyword): boolean;
protected fillCompletionItem(context: CompletionContext, item: CompletionValueItem): CompletionItem | undefined;
protected buildCompletionTextEdit(context: CompletionContext, label: string, newText: string): TextEdit | undefined;
}
//# sourceMappingURL=completion-provider.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"completion-provider.d.ts","sourceRoot":"","sources":["../../../src/lsp/completion/completion-provider.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAEhF,OAAO,KAAK,EAAE,cAAc,EAAE,gBAAgB,EAAE,QAAQ,EAAE,MAAM,gCAAgC,CAAC;AACjG,OAAO,KAAK,EAAE,uBAAuB,EAAE,MAAM,gCAAgC,CAAC;AAC9E,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,mCAAmC,CAAC;AACtE,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,oCAAoC,CAAC;AACxE,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,oBAAoB,CAAC;AAC1D,OAAO,KAAK,EAAE,OAAO,EAAE,kBAAkB,EAAE,aAAa,EAAE,OAAO,EAAE,aAAa,EAAE,MAAM,sBAAsB,CAAC;AAC/G,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,6BAA6B,CAAC;AACrE,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,8BAA8B,CAAC;AACjE,OAAO,KAAK,EAAE,eAAe,EAAE,YAAY,EAAE,MAAM,8BAA8B,CAAC;AAClF,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,iCAAiC,CAAC;AACnE,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,0BAA0B,CAAC;AACjE,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,qBAAqB,CAAC;AACxD,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,mCAAmC,CAAC;AACvE,OAAO,KAAK,EAAE,KAAK,EAAE,MAAM,uBAAuB,CAAC;AACnD,OAAO,KAAK,EAAE,qBAAqB,EAAE,MAAM,+CAA+C,CAAC;AAE3F,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,uBAAuB,CAAC;AAC3D,OAAO,EAAE,kBAAkB,EAAE,cAAc,EAAE,QAAQ,EAAE,MAAM,uBAAuB,CAAC;AACrF,OAAO,KAAK,GAAG,MAAM,kCAAkC,CAAC;AAIxD,OAAO,EAAU,KAAK,MAAM,EAAE,MAAM,uBAAuB,CAAC;AAG5D,MAAM,MAAM,kBAAkB,GAAG,CAAC,OAAO,EAAE,iBAAiB,EAAE,KAAK,EAAE,mBAAmB,KAAK,IAAI,CAAA;AAEjG,MAAM,MAAM,mBAAmB,GAAG,CAAC;IAC/B,KAAK,CAAC,EAAE,MAAM,CAAA;CACjB,GAAG;IACA,IAAI,EAAE,OAAO,CAAA;CAChB,GAAG;IACA,eAAe,EAAE,kBAAkB,CAAA;CACtC,CAAC,GAAG,OAAO,CAAC,cAAc,CAAC,CAAC;AAE7B,MAAM,WAAW,iBAAiB;IAC9B,IAAI,CAAC,EAAE,OAAO,CAAA;IACd,QAAQ,EAAE,eAAe,CAAA;IACzB,YAAY,EAAE,YAAY,CAAA;IAC1B,QAAQ,EAAE,WAAW,EAAE,CAAA;IACvB;;;OAGG;IACH,WAAW,EAAE,MAAM,CAAA;IACnB;;;;OAIG;IACH,cAAc,EAAE,MAAM,CAAA;IACtB;;OAEG;IACH,MAAM,EAAE,MAAM,CAAA;IACd,QAAQ,EAAE,QAAQ,CAAA;CACrB;AAED,MAAM,WAAW,yBAAyB;IACtC;;;;;;;;;OASG;IACH,iBAAiB,CAAC,EAAE,MAAM,EAAE,CAAC;IAC7B;;;;;;OAMG;IACH,mBAAmB,CAAC,EAAE,MAAM,EAAE,CAAC;CAClC;AAED,MAAM,WAAW,iCAAiC;IAC9C,kBAAkB,CAAC,EAAE,MAAM,CAAC;IAC5B,gBAAgB,CAAC,EAAE,MAAM,CAAC;IAC1B,cAAc,EAAE,MAAM,CAAC;IACvB,YAAY,EAAE,MAAM,CAAC;CACxB;AAED,wBAAgB,8BAA8B,CAAC,OAAO,EAAE,KAAK,CAAC,yBAAyB,GAAG,SAAS,CAAC,GAAG,yBAAyB,CAO/H;AAED;;GAEG;AACH,MAAM,WAAW,kBAAkB;IAC/B;;;;;;;;;OASG;IACH,aAAa,CAAC,QAAQ,EAAE,eAAe,EAAE,MAAM,EAAE,gBAAgB,EAAE,WAAW,CAAC,EAAE,iBAAiB,GAAG,YAAY,CAAC,cAAc,GAAG,SAAS,CAAC,CAAA;IAC7I;;;;OAIG;IACH,QAAQ,CAAC,iBAAiB,CAAC,EAAE,yBAAyB,CAAC;CAC1D;AAED,qBAAa,yBAA0B,YAAW,kBAAkB;IAEhE,SAAS,CAAC,QAAQ,CAAC,gBAAgB,EAAE,uBAAuB,CAAC;IAC7D,SAAS,CAAC,QAAQ,CAAC,qBAAqB,EAAE,qBAAqB,CAAC;IAChE,SAAS,CAAC,QAAQ,CAAC,aAAa,EAAE,aAAa,CAAC;IAChD,SAAS,CAAC,QAAQ,CAAC,OAAO,EAAE,GAAG,CAAC,OAAO,CAAC;IACxC,SAAS,CAAC,QAAQ,CAAC,YAAY,EAAE,YAAY,CAAC;IAC9C,SAAS,CAAC,QAAQ,CAAC,KAAK,EAAE,KAAK,CAAC;IAChC,SAAS,CAAC,QAAQ,CAAC,gBAAgB,EAAE,gBAAgB,CAAC;IACtD,SAAS,CAAC,QAAQ,CAAC,YAAY,EAAE,YAAY,CAAC;IAC9C,SAAS,CAAC,QAAQ,CAAC,aAAa,EAAE,aAAa,CAAC;IAChD,SAAS,CAAC,QAAQ,CAAC,aAAa,EAAE,aAAa,CAAC;IAChD,QAAQ,CAAC,iBAAiB,CAAC,EAAE,yBAAyB,CAAC;gBAE3C,QAAQ,EAAE,eAAe;IAa/B,aAAa,CAAC,QAAQ,EAAE,eAAe,EAAE,MAAM,EAAE,gBAAgB,EAAE,YAAY,CAAC,EAAE,iBAAiB,GAAG,OAAO,CAAC,cAAc,GAAG,SAAS,CAAC;IAsC/I;;;;;OAKG;IACH,SAAS,CAAC,gBAAgB,CAAC,KAAK,EAAE,cAAc,EAAE,GAAG,cAAc,EAAE;IAIrE,SAAS,CAAC,cAAc,CAAC,QAAQ,EAAE,YAAY,EAAE,MAAM,EAAE,MAAM,GAAG,WAAW,EAAE;IA4B/E,SAAS,CAAE,aAAa,CAAC,QAAQ,EAAE,eAAe,EAAE,QAAQ,EAAE,QAAQ,GAAG,gBAAgB,CAAC,iBAAiB,CAAC;IA2F5G,SAAS,CAAC,0BAA0B,CAAC,QAAQ,EAAE,eAAe,EAAE,IAAI,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,GAAG,OAAO;IAQrH,SAAS,CAAC,qBAAqB,CAAC,GAAG,EAAE,OAAO,EAAE,MAAM,EAAE,MAAM,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC,GAAG,SAAS;IAiB3F;;;;OAIG;IACH,SAAS,CAAC,kBAAkB,CAAC,KAAK,EAAE,cAAc,EAAE,GAAG,OAAO;IAI9D;;;;;;;;OAQG;IACH,SAAS,CAAC,mBAAmB,CAAC,IAAI,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,GAAG,iCAAiC;IA2C9F,SAAS,CAAC,aAAa,CAAC,OAAO,EAAE,iBAAiB,EAAE,IAAI,EAAE,WAAW,EAAE,QAAQ,EAAE,kBAAkB,GAAG,YAAY,CAAC,IAAI,CAAC;IAWxH,SAAS,CAAC,2BAA2B,CAAC,OAAO,EAAE,iBAAiB,EAAE,IAAI,EAAE,WAAW,CAAC,GAAG,CAAC,cAAc,CAAC,EAAE,QAAQ,EAAE,kBAAkB,GAAG,YAAY,CAAC,IAAI,CAAC;IAgC1J;;;;;;;;OAQG;IACH,SAAS,CAAC,sBAAsB,CAAC,OAAO,EAAE,aAAa,EAAE,QAAQ,EAAE,iBAAiB,GAAG,MAAM,CAAC,kBAAkB,CAAC;IAIjH;;;;;;;;OAQG;IACH,SAAS,CAAC,6BAA6B,CAAC,eAAe,EAAE,kBAAkB,GAAG,mBAAmB;IAYjG,SAAS,CAAC,yBAAyB,CAAC,eAAe,EAAE,kBAAkB,GAAG,aAAa,GAAG,MAAM,GAAG,SAAS;IAW5G,SAAS,CAAC,oBAAoB,CAAC,OAAO,EAAE,iBAAiB,EAAE,OAAO,EAAE,GAAG,CAAC,OAAO,EAAE,QAAQ,EAAE,kBAAkB,GAAG,YAAY,CAAC,IAAI,CAAC;IAYlI,SAAS,CAAC,4BAA4B,CAAC,QAAQ,EAAE,GAAG,CAAC,OAAO,GAAG,kBAAkB;IAIjF,SAAS,CAAC,aAAa,CAAC,OAAO,EAAE,iBAAiB,EAAE,OAAO,EAAE,GAAG,CAAC,OAAO,GAAG,OAAO;IAKlF,SAAS,CAAC,kBAAkB,CAAC,OAAO,EAAE,iBAAiB,EAAE,IAAI,EAAE,mBAAmB,GAAG,cAAc,GAAG,SAAS;IAmD/G,SAAS,CAAC,uBAAuB,CAAC,OAAO,EAAE,iBAAiB,EAAE,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,GAAG,QAAQ,GAAG,SAAS;CAiBtH"}

View File

@@ -0,0 +1,421 @@
/******************************************************************************
* Copyright 2021 TypeFox GmbH
* This program and the accompanying materials are made available under the
* terms of the MIT License, which is available in the project root.
******************************************************************************/
import { CompletionItemKind, CompletionList, Position } from 'vscode-languageserver';
import * as ast from '../../languages/generated/ast.js';
import { assignMandatoryProperties, getContainerOfType } from '../../utils/ast-utils.js';
import { findDeclarationNodeAtOffset, findLeafNodeBeforeOffset } from '../../utils/cst-utils.js';
import { getEntryRule, getExplicitRuleType } from '../../utils/grammar-utils.js';
import { stream } from '../../utils/stream.js';
import { findFirstFeatures, findNextFeatures } from './follow-element-computation.js';
export function mergeCompletionProviderOptions(options) {
const triggerCharacters = Array.from(new Set(options.flatMap(option => { var _a; return (_a = option === null || option === void 0 ? void 0 : option.triggerCharacters) !== null && _a !== void 0 ? _a : []; })));
const allCommitCharacters = Array.from(new Set(options.flatMap(option => { var _a; return (_a = option === null || option === void 0 ? void 0 : option.allCommitCharacters) !== null && _a !== void 0 ? _a : []; })));
return {
triggerCharacters: triggerCharacters.length > 0 ? triggerCharacters : undefined,
allCommitCharacters: allCommitCharacters.length > 0 ? allCommitCharacters : undefined
};
}
export class DefaultCompletionProvider {
constructor(services) {
this.scopeProvider = services.references.ScopeProvider;
this.grammar = services.Grammar;
this.completionParser = services.parser.CompletionParser;
this.nameProvider = services.references.NameProvider;
this.lexer = services.parser.Lexer;
this.nodeKindProvider = services.shared.lsp.NodeKindProvider;
this.fuzzyMatcher = services.shared.lsp.FuzzyMatcher;
this.grammarConfig = services.parser.GrammarConfig;
this.astReflection = services.shared.AstReflection;
this.documentationProvider = services.documentation.DocumentationProvider;
}
async getCompletion(document, params, _cancelToken) {
const items = [];
const contexts = this.buildContexts(document, params.position);
const acceptor = (context, value) => {
const completionItem = this.fillCompletionItem(context, value);
if (completionItem) {
items.push(completionItem);
}
};
const distinctionFunction = (element) => {
if (ast.isKeyword(element.feature)) {
return element.feature.value;
}
else {
return element.feature;
}
};
const completedFeatures = [];
for (const context of contexts) {
await Promise.all(stream(context.features)
.distinct(distinctionFunction)
.exclude(completedFeatures)
.map(e => this.completionFor(context, e, acceptor)));
// Do not try to complete the same feature multiple times
completedFeatures.push(...context.features);
// We might want to stop computing completion results
if (!this.continueCompletion(items)) {
break;
}
}
return CompletionList.create(this.deduplicateItems(items), true);
}
/**
* The completion algorithm could yield the same reference/keyword multiple times.
*
* This methods deduplicates these items afterwards before returning to the client.
* Unique items are identified as a combination of `kind`, `label` and `detail`.
*/
deduplicateItems(items) {
return stream(items).distinct(item => `${item.kind}_${item.label}_${item.detail}`).toArray();
}
findFeaturesAt(document, offset) {
const text = document.getText({
start: Position.create(0, 0),
end: document.positionAt(offset)
});
const parserResult = this.completionParser.parse(text);
const tokens = parserResult.tokens;
// If the parser didn't parse any tokens, return the next features of the entry rule
if (parserResult.tokenIndex === 0) {
const parserRule = getEntryRule(this.grammar);
const firstFeatures = findFirstFeatures({
feature: parserRule.definition,
type: getExplicitRuleType(parserRule)
});
if (tokens.length > 0) {
// We have to skip the first token
// The interpreter will only look at the next features, which requires every token after the first
tokens.shift();
return findNextFeatures(firstFeatures.map(e => [e]), tokens);
}
else {
return firstFeatures;
}
}
const leftoverTokens = [...tokens].splice(parserResult.tokenIndex);
const features = findNextFeatures([parserResult.elementStack.map(feature => ({ feature }))], leftoverTokens);
return features;
}
*buildContexts(document, position) {
var _a, _b;
const cst = document.parseResult.value.$cstNode;
if (!cst) {
return;
}
const textDocument = document.textDocument;
const text = textDocument.getText();
const offset = textDocument.offsetAt(position);
const partialContext = {
document,
textDocument,
offset,
position
};
// Data type rules need special handling, as their tokens are irrelevant for completion purposes.
// If we encounter a data type rule at the current offset, we jump to the start of the data type rule.
const dataTypeRuleOffsets = this.findDataTypeRuleStart(cst, offset);
if (dataTypeRuleOffsets) {
const [ruleStart, ruleEnd] = dataTypeRuleOffsets;
const parentNode = (_a = findLeafNodeBeforeOffset(cst, ruleStart)) === null || _a === void 0 ? void 0 : _a.astNode;
yield Object.assign(Object.assign({}, partialContext), { node: parentNode, tokenOffset: ruleStart, tokenEndOffset: ruleEnd, features: this.findFeaturesAt(textDocument, ruleStart) });
}
// For all other purposes, it's enough to jump to the start of the current/previous token
const { nextTokenStart, nextTokenEnd, previousTokenStart, previousTokenEnd } = this.backtrackToAnyToken(text, offset);
let astNodeOffset = nextTokenStart;
if (offset <= nextTokenStart && previousTokenStart !== undefined) {
// This check indicates that the cursor is still before the next token, so we should use the previous AST node (if it exists)
astNodeOffset = previousTokenStart;
}
const astNode = (_b = findLeafNodeBeforeOffset(cst, astNodeOffset)) === null || _b === void 0 ? void 0 : _b.astNode;
let performNextCompletion = true;
if (previousTokenStart !== undefined && previousTokenEnd !== undefined && previousTokenEnd === offset) {
// This context aims to complete the current feature
yield Object.assign(Object.assign({}, partialContext), { node: astNode, tokenOffset: previousTokenStart, tokenEndOffset: previousTokenEnd, features: this.findFeaturesAt(textDocument, previousTokenStart) });
// The completion after the current token should be prevented in case we find out that the current token definitely isn't completed yet
// This is usually the case when the current token ends on a letter.
performNextCompletion = this.performNextTokenCompletion(document, text.substring(previousTokenStart, previousTokenEnd), previousTokenStart, previousTokenEnd);
if (performNextCompletion) {
// This context aims to complete the immediate next feature (if one exists at the current cursor position)
// It uses the previous cst start/offset for that.
yield Object.assign(Object.assign({}, partialContext), { node: astNode, tokenOffset: previousTokenEnd, tokenEndOffset: previousTokenEnd, features: this.findFeaturesAt(textDocument, previousTokenEnd) });
}
}
if (!astNode) {
const parserRule = getEntryRule(this.grammar);
if (!parserRule) {
throw new Error('Missing entry parser rule');
}
// This context aims to perform completion for the grammar start (usually when the document is empty)
yield Object.assign(Object.assign({}, partialContext), { tokenOffset: nextTokenStart, tokenEndOffset: nextTokenEnd, features: findFirstFeatures(parserRule.definition) });
}
else if (performNextCompletion) {
// This context aims to complete the next feature, using the next cst start/end
yield Object.assign(Object.assign({}, partialContext), { node: astNode, tokenOffset: nextTokenStart, tokenEndOffset: nextTokenEnd, features: this.findFeaturesAt(textDocument, nextTokenStart) });
}
}
performNextTokenCompletion(document, text, _offset, _end) {
// This regex returns false if the text ends with a letter.
// We don't want to complete new text immediately after a keyword, ID etc.
// We only care about the last character in the text, so we use $ here.
// The \P{L} used here is a Unicode category that matches any character that is not a letter
return /\P{L}$/u.test(text);
}
findDataTypeRuleStart(cst, offset) {
var _a, _b;
let containerNode = findDeclarationNodeAtOffset(cst, offset, this.grammarConfig.nameRegexp);
// Identify whether the element was parsed as part of a data type rule
let isDataTypeNode = Boolean((_a = getContainerOfType(containerNode === null || containerNode === void 0 ? void 0 : containerNode.grammarSource, ast.isParserRule)) === null || _a === void 0 ? void 0 : _a.dataType);
if (isDataTypeNode) {
while (isDataTypeNode) {
// Use the container to find the correct parent element
containerNode = containerNode === null || containerNode === void 0 ? void 0 : containerNode.container;
isDataTypeNode = Boolean((_b = getContainerOfType(containerNode === null || containerNode === void 0 ? void 0 : containerNode.grammarSource, ast.isParserRule)) === null || _b === void 0 ? void 0 : _b.dataType);
}
if (containerNode) {
return [containerNode.offset, containerNode.end];
}
}
return undefined;
}
/**
* Indicates whether the completion should continue to process the next completion context.
*
* The default implementation continues the completion only if there are currently no proposed completion items.
*/
continueCompletion(items) {
return items.length === 0;
}
/**
* This method returns two sets of token offset information.
*
* The `nextToken*` offsets are related to the token at the cursor position.
* If there is none, both offsets are simply set to `offset`.
*
* The `previousToken*` offsets are related to the last token before the current token at the cursor position.
* They are `undefined`, if there is no token before the cursor position.
*/
backtrackToAnyToken(text, offset) {
const tokens = this.lexer.tokenize(text).tokens;
if (tokens.length === 0) {
// If we don't have any tokens in our document, just return the offset position
return {
nextTokenStart: offset,
nextTokenEnd: offset
};
}
let previousToken;
for (const token of tokens) {
if (token.startOffset >= offset) {
// We are between two tokens
// Return the current offset as the next token index
return {
nextTokenStart: offset,
nextTokenEnd: offset,
previousTokenStart: previousToken ? previousToken.startOffset : undefined,
previousTokenEnd: previousToken ? previousToken.endOffset + 1 : undefined
};
}
if (token.endOffset >= offset) {
// We are within a token
// Return the current and previous token offsets as normal
return {
nextTokenStart: token.startOffset,
nextTokenEnd: token.endOffset + 1,
previousTokenStart: previousToken ? previousToken.startOffset : undefined,
previousTokenEnd: previousToken ? previousToken.endOffset + 1 : undefined
};
}
previousToken = token;
}
// We have run into the end of the file
// Return the current offset as the next token index
return {
nextTokenStart: offset,
nextTokenEnd: offset,
previousTokenStart: previousToken ? previousToken.startOffset : undefined,
previousTokenEnd: previousToken ? previousToken.endOffset + 1 : undefined
};
}
completionFor(context, next, acceptor) {
if (ast.isKeyword(next.feature)) {
return this.completionForKeyword(context, next.feature, acceptor);
}
else if (ast.isCrossReference(next.feature) && context.node) {
return this.completionForCrossReference(context, next, acceptor);
}
// Don't offer any completion for other elements (i.e. terminals, datatype rules)
// We - from a framework level - cannot reasonably assume their contents.
// Adopters can just override `completionFor` if they want to do that anyway.
}
completionForCrossReference(context, next, acceptor) {
const assignment = getContainerOfType(next.feature, ast.isAssignment);
let node = context.node;
if (assignment && node) {
if (next.type) {
// When `type` is set, it indicates that we have just entered a new parser rule.
// The cross reference that we're trying to complete is on a new element that doesn't exist yet.
// So we create a new synthetic element with the correct type information.
node = {
$type: next.type,
$container: node,
$containerProperty: next.property
};
assignMandatoryProperties(this.astReflection, node);
}
const refInfo = {
reference: {
$refText: ''
},
container: node,
property: assignment.feature
};
try {
for (const candidate of this.getReferenceCandidates(refInfo, context)) {
acceptor(context, this.createReferenceCompletionItem(candidate));
}
}
catch (err) {
console.error(err);
}
}
}
/**
* Override this method to change how the stream of candidates is determined for a reference.
* This way completion-specific modifications and refinements can be added to the proposals computation
* beyond the rules being implemented in the scope provider, e.g. filtering.
*
* @param refInfo Information about the reference for which the candidates are requested.
* @param _context Information about the completion request including document, cursor position, token under cursor, etc.
* @returns A stream of all elements being valid for the given reference.
*/
getReferenceCandidates(refInfo, _context) {
return this.scopeProvider.getScope(refInfo).getAllElements();
}
/**
* Override this method to change how reference completion items are created.
*
* To change the `kind` of a completion item, override the `NodeKindProvider` service instead.
* To change the `documentation`, override the `DocumentationProvider` service instead.
*
* @param nodeDescription The description of a reference candidate
* @returns A partial completion item
*/
createReferenceCompletionItem(nodeDescription) {
const kind = this.nodeKindProvider.getCompletionItemKind(nodeDescription);
const documentation = this.getReferenceDocumentation(nodeDescription);
return {
nodeDescription,
kind,
documentation,
detail: nodeDescription.type,
sortText: '0'
};
}
getReferenceDocumentation(nodeDescription) {
if (!nodeDescription.node) {
return undefined;
}
const documentationText = this.documentationProvider.getDocumentation(nodeDescription.node);
if (!documentationText) {
return undefined;
}
return { kind: 'markdown', value: documentationText };
}
completionForKeyword(context, keyword, acceptor) {
if (!this.filterKeyword(context, keyword)) {
return;
}
acceptor(context, {
label: keyword.value,
kind: this.getKeywordCompletionItemKind(keyword),
detail: 'Keyword',
sortText: '1'
});
}
getKeywordCompletionItemKind(_keyword) {
return CompletionItemKind.Keyword;
}
filterKeyword(context, keyword) {
// Filter out keywords that do not contain any word character
return /\p{L}/u.test(keyword.value);
}
fillCompletionItem(context, item) {
var _a, _b;
let label;
if (typeof item.label === 'string') {
label = item.label;
}
else if ('node' in item) {
const name = this.nameProvider.getName(item.node);
if (!name) {
return undefined;
}
label = name;
}
else if ('nodeDescription' in item) {
label = item.nodeDescription.name;
}
else {
return undefined;
}
let insertText;
if (typeof ((_a = item.textEdit) === null || _a === void 0 ? void 0 : _a.newText) === 'string') {
insertText = item.textEdit.newText;
}
else if (typeof item.insertText === 'string') {
insertText = item.insertText;
}
else {
insertText = label;
}
const textEdit = (_b = item.textEdit) !== null && _b !== void 0 ? _b : this.buildCompletionTextEdit(context, label, insertText);
if (!textEdit) {
return undefined;
}
// Copy all valid properties of `CompletionItem`
const completionItem = {
additionalTextEdits: item.additionalTextEdits,
command: item.command,
commitCharacters: item.commitCharacters,
data: item.data,
detail: item.detail,
documentation: item.documentation,
filterText: item.filterText,
insertText: item.insertText,
insertTextFormat: item.insertTextFormat,
insertTextMode: item.insertTextMode,
kind: item.kind,
labelDetails: item.labelDetails,
preselect: item.preselect,
sortText: item.sortText,
tags: item.tags,
textEditText: item.textEditText,
textEdit,
label
};
return completionItem;
}
buildCompletionTextEdit(context, label, newText) {
const content = context.textDocument.getText();
const identifier = content.substring(context.tokenOffset, context.offset);
if (this.fuzzyMatcher.match(identifier, label)) {
const start = context.textDocument.positionAt(context.tokenOffset);
const end = context.position;
return {
newText,
range: {
start,
end
}
};
}
else {
return undefined;
}
}
}
//# sourceMappingURL=completion-provider.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,35 @@
/******************************************************************************
* Copyright 2021 TypeFox GmbH
* This program and the accompanying materials are made available under the
* terms of the MIT License, which is available in the project root.
******************************************************************************/
import type { IToken } from 'chevrotain';
import * as ast from '../../languages/generated/ast.js';
export interface NextFeature<T extends ast.AbstractElement = ast.AbstractElement> {
/**
* A feature that could appear during completion.
*/
feature: T;
/**
* The type that carries this `feature`. Only set if we encounter a new type.
*/
type?: string;
/**
* The container property for the new `type`
*/
property?: string;
}
/**
* Calculates any features that can follow the given feature stack.
* This also includes features following optional features and features from previously called rules that could follow the last feature.
* @param featureStack A stack of features starting at the entry rule and ending at the feature of the current cursor position.
* @param unparsedTokens All tokens which haven't been parsed successfully yet. This is the case when we call this function inside an alternative.
* @returns Any `AbstractElement` that could be following the given feature stack.
*/
export declare function findNextFeatures(featureStack: NextFeature[][], unparsedTokens: IToken[]): NextFeature[];
/**
* Calculates the first child feature of any `AbstractElement`.
* @param next The `AbstractElement` whose first child features should be calculated.
*/
export declare function findFirstFeatures(next: ast.AbstractElement | NextFeature): NextFeature[];
//# sourceMappingURL=follow-element-computation.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"follow-element-computation.d.ts","sourceRoot":"","sources":["../../../src/lsp/completion/follow-element-computation.ts"],"names":[],"mappings":"AAAA;;;;gFAIgF;AAEhF,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,YAAY,CAAC;AACzC,OAAO,KAAK,GAAG,MAAM,kCAAkC,CAAC;AAMxD,MAAM,WAAW,WAAW,CAAC,CAAC,SAAS,GAAG,CAAC,eAAe,GAAG,GAAG,CAAC,eAAe;IAC5E;;OAEG;IACH,OAAO,EAAE,CAAC,CAAA;IACV;;OAEG;IACH,IAAI,CAAC,EAAE,MAAM,CAAA;IACb;;OAEG;IACH,QAAQ,CAAC,EAAE,MAAM,CAAA;CACpB;AAED;;;;;;GAMG;AACH,wBAAgB,gBAAgB,CAAC,YAAY,EAAE,WAAW,EAAE,EAAE,EAAE,cAAc,EAAE,MAAM,EAAE,GAAG,WAAW,EAAE,CAWvG;AAmED;;;GAGG;AACH,wBAAgB,iBAAiB,CAAC,IAAI,EAAE,GAAG,CAAC,eAAe,GAAG,WAAW,GAAG,WAAW,EAAE,CAKxF"}

View File

@@ -0,0 +1,279 @@
/******************************************************************************
* Copyright 2021 TypeFox GmbH
* This program and the accompanying materials are made available under the
* terms of the MIT License, which is available in the project root.
******************************************************************************/
import * as ast from '../../languages/generated/ast.js';
import { isAstNode } from '../../syntax-tree.js';
import { getContainerOfType } from '../../utils/ast-utils.js';
import { getCrossReferenceTerminal, getExplicitRuleType, getTypeName, isArrayCardinality, isOptionalCardinality, terminalRegex } from '../../utils/grammar-utils.js';
/**
* Calculates any features that can follow the given feature stack.
* This also includes features following optional features and features from previously called rules that could follow the last feature.
* @param featureStack A stack of features starting at the entry rule and ending at the feature of the current cursor position.
* @param unparsedTokens All tokens which haven't been parsed successfully yet. This is the case when we call this function inside an alternative.
* @returns Any `AbstractElement` that could be following the given feature stack.
*/
export function findNextFeatures(featureStack, unparsedTokens) {
const context = {
stacks: featureStack,
tokens: unparsedTokens
};
interpretTokens(context);
// Reset the container property
context.stacks.flat().forEach(feature => { feature.property = undefined; });
const nextStacks = findNextFeatureStacks(context.stacks);
// We only need the last element of each stack
return nextStacks.map(e => e[e.length - 1]);
}
function findNextFeaturesInternal(options) {
const { next, cardinalities, visited, plus } = options;
const features = [];
const feature = next.feature;
if (visited.has(feature)) {
return [];
}
else if (!ast.isGroup(feature)) {
// Do not add the feature to the list if it is a group
// `findFirstFeaturesInternal` will take care of this
visited.add(feature);
}
let parent;
let item = feature;
while (item.$container) {
if (ast.isGroup(item.$container)) {
parent = item.$container;
break;
}
else if (ast.isAbstractElement(item.$container)) {
item = item.$container;
}
else {
break;
}
}
// First try to iterate the same element again
if (isArrayCardinality(item.cardinality)) {
const repeatingFeatures = findFirstFeaturesInternal({
next: {
feature: item,
type: next.type
},
cardinalities,
visited,
plus
});
for (const repeatingFeature of repeatingFeatures) {
plus.add(repeatingFeature.feature);
}
features.push(...repeatingFeatures);
}
if (parent) {
const ownIndex = parent.elements.indexOf(item);
// Find next elements of the same group
if (ownIndex !== undefined && ownIndex < parent.elements.length - 1) {
features.push(...findNextFeaturesInGroup({
feature: parent,
type: next.type
}, ownIndex + 1, cardinalities, visited, plus));
}
// Try to find the next elements of the parent
// Only do this if every following element is either optional or has been parsed as +
if (features.every(e => isOptionalCardinality(e.feature.cardinality, e.feature) || isOptionalCardinality(cardinalities.get(e.feature)) || plus.has(e.feature))) {
features.push(...findNextFeaturesInternal({
next: {
feature: parent,
type: next.type
},
cardinalities,
visited,
plus
}));
}
}
return features;
}
/**
* Calculates the first child feature of any `AbstractElement`.
* @param next The `AbstractElement` whose first child features should be calculated.
*/
export function findFirstFeatures(next) {
if (isAstNode(next)) {
next = { feature: next };
}
return findFirstFeaturesInternal({ next, cardinalities: new Map(), visited: new Set(), plus: new Set() });
}
function findFirstFeaturesInternal(options) {
var _a, _b, _c;
const { next, cardinalities, visited, plus } = options;
if (next === undefined) {
return [];
}
const { feature, type } = next;
if (ast.isGroup(feature)) {
if (visited.has(feature)) {
return [];
}
else {
visited.add(feature);
}
return findNextFeaturesInGroup(next, 0, cardinalities, visited, plus)
.map(e => modifyCardinality(e, feature.cardinality, cardinalities));
}
else if (ast.isAlternatives(feature) || ast.isUnorderedGroup(feature)) {
return feature.elements.flatMap(e => findFirstFeaturesInternal({
next: {
feature: e,
type,
property: next.property
},
cardinalities,
visited,
plus
}))
.map(e => modifyCardinality(e, feature.cardinality, cardinalities));
}
else if (ast.isAssignment(feature)) {
const assignmentNext = {
feature: feature.terminal,
type,
property: (_a = next.property) !== null && _a !== void 0 ? _a : feature.feature
};
return findFirstFeaturesInternal({ next: assignmentNext, cardinalities, visited, plus })
.map(e => modifyCardinality(e, feature.cardinality, cardinalities));
}
else if (ast.isAction(feature)) {
return findNextFeaturesInternal({
next: {
feature,
type: getTypeName(feature),
property: (_b = next.property) !== null && _b !== void 0 ? _b : feature.feature
},
cardinalities,
visited,
plus
});
}
else if (ast.isRuleCall(feature) && ast.isParserRule(feature.rule.ref)) {
const rule = feature.rule.ref;
const ruleCallNext = {
feature: rule.definition,
type: rule.fragment || rule.dataType ? undefined : ((_c = getExplicitRuleType(rule)) !== null && _c !== void 0 ? _c : rule.name),
property: next.property
};
return findFirstFeaturesInternal({ next: ruleCallNext, cardinalities, visited, plus })
.map(e => modifyCardinality(e, feature.cardinality, cardinalities));
}
else {
return [next];
}
}
/**
* Modifying the cardinality is necessary to identify which features are coming from an optional feature.
* Those features should be optional as well.
* @param next The next feature that could be made optionally.
* @param cardinality The cardinality of the calling (parent) object.
* @returns A new feature that could be now optional (`?` or `*`).
*/
function modifyCardinality(next, cardinality, cardinalities) {
cardinalities.set(next.feature, cardinality);
return next;
}
function findNextFeaturesInGroup(next, index, cardinalities, visited, plus) {
var _a;
const features = [];
let firstFeature;
while (index < next.feature.elements.length) {
const feature = next.feature.elements[index++];
firstFeature = {
feature,
type: next.type
};
features.push(...findFirstFeaturesInternal({
next: firstFeature,
cardinalities,
visited,
plus
}));
if (!isOptionalCardinality((_a = firstFeature.feature.cardinality) !== null && _a !== void 0 ? _a : cardinalities.get(firstFeature.feature), firstFeature.feature)) {
break;
}
}
return features;
}
function interpretTokens(context) {
for (const token of context.tokens) {
const nextFeatureStacks = findNextFeatureStacks(context.stacks, token);
context.stacks = nextFeatureStacks;
}
}
function findNextFeatureStacks(stacks, token) {
const newStacks = [];
for (const stack of stacks) {
newStacks.push(...interpretStackToken(stack, token));
}
return newStacks;
}
function interpretStackToken(stack, token) {
const cardinalities = new Map();
const plus = new Set(stack.map(e => e.feature).filter(isPlusFeature));
const newStacks = [];
while (stack.length > 0) {
const top = stack.pop();
const allNextFeatures = findNextFeaturesInternal({
next: top,
cardinalities,
plus,
visited: new Set()
}).filter(next => token ? featureMatches(next.feature, token) : true);
for (const nextFeature of allNextFeatures) {
newStacks.push([...stack, nextFeature]);
}
if (!allNextFeatures.every(e => isOptionalCardinality(e.feature.cardinality, e.feature) || isOptionalCardinality(cardinalities.get(e.feature)))) {
break;
}
}
return newStacks;
}
function isPlusFeature(feature) {
if (feature.cardinality === '+') {
return true;
}
const assignment = getContainerOfType(feature, ast.isAssignment);
if (assignment && assignment.cardinality === '+') {
return true;
}
return false;
}
function featureMatches(feature, token) {
if (ast.isKeyword(feature)) {
const content = feature.value;
return content === token.image;
}
else if (ast.isRuleCall(feature)) {
return ruleMatches(feature.rule.ref, token);
}
else if (ast.isCrossReference(feature)) {
const crossRefTerminal = getCrossReferenceTerminal(feature);
if (crossRefTerminal) {
return featureMatches(crossRefTerminal, token);
}
}
return false;
}
function ruleMatches(rule, token) {
if (ast.isParserRule(rule)) {
const ruleFeatures = findFirstFeatures(rule.definition);
return ruleFeatures.some(e => featureMatches(e.feature, token));
}
else if (ast.isTerminalRule(rule)) {
// We have to take keywords into account
// e.g. most keywords are valid IDs as well
// Only return 'true' if this terminal does not match a keyword. TODO
return terminalRegex(rule).test(token.image);
}
else {
return false;
}
}
//# sourceMappingURL=follow-element-computation.js.map

File diff suppressed because one or more lines are too long