2023-10-13 11:16:36 +08:00
|
|
|
|
import {
|
|
|
|
|
Parser,
|
|
|
|
|
Lexer,
|
2023-06-09 11:22:53 +08:00
|
|
|
|
Token,
|
2023-10-13 11:16:36 +08:00
|
|
|
|
CharStreams,
|
|
|
|
|
CommonTokenStream,
|
2024-02-26 20:25:09 +08:00
|
|
|
|
CharStream,
|
2023-10-13 11:16:36 +08:00
|
|
|
|
ParserRuleContext,
|
2024-02-26 20:25:09 +08:00
|
|
|
|
ParseTreeWalker,
|
|
|
|
|
ParseTreeListener,
|
|
|
|
|
} from 'antlr4ng';
|
2023-06-09 11:22:53 +08:00
|
|
|
|
import { CandidatesCollection, CodeCompletionCore } from 'antlr4-c3';
|
2023-12-12 19:39:28 +08:00
|
|
|
|
import { findCaretTokenIndex } from './utils/findCaretTokenIndex';
|
2023-10-13 11:16:36 +08:00
|
|
|
|
import {
|
2023-06-09 11:22:53 +08:00
|
|
|
|
CaretPosition,
|
|
|
|
|
Suggestions,
|
|
|
|
|
SyntaxSuggestion,
|
|
|
|
|
WordRange,
|
2023-10-13 11:16:36 +08:00
|
|
|
|
TextSlice,
|
2023-06-09 11:22:53 +08:00
|
|
|
|
} from './basic-parser-types';
|
2023-12-11 17:34:49 +08:00
|
|
|
|
import ParseErrorListener, { ParseError, ErrorListener } from './parseErrorListener';
|
|
|
|
|
import { ErrorStrategy } from './errorStrategy';
|
2020-09-11 17:39:10 +08:00
|
|
|
|
|
2023-06-09 11:22:53 +08:00
|
|
|
|
interface IParser<IParserRuleContext extends ParserRuleContext> extends Parser {
|
2023-05-04 10:13:05 +08:00
|
|
|
|
// Customized in our parser
|
2023-06-09 11:22:53 +08:00
|
|
|
|
program(): IParserRuleContext;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
interface SplitListener extends ParseTreeListener {
|
|
|
|
|
statementsContext: ParserRuleContext[];
|
2023-05-04 10:13:05 +08:00
|
|
|
|
}
|
|
|
|
|
|
2020-09-11 17:39:10 +08:00
|
|
|
|
/**
|
|
|
|
|
* Custom Parser class, subclass needs extends it.
|
|
|
|
|
*/
|
2023-06-09 11:22:53 +08:00
|
|
|
|
export default abstract class BasicParser<
|
2023-10-13 11:16:36 +08:00
|
|
|
|
L extends Lexer = Lexer,
|
2023-06-09 11:22:53 +08:00
|
|
|
|
PRC extends ParserRuleContext = ParserRuleContext,
|
2023-10-13 11:16:36 +08:00
|
|
|
|
P extends IParser<PRC> = IParser<PRC>,
|
|
|
|
|
> {
|
2023-10-16 17:59:28 +08:00
|
|
|
|
/** members for cache start */
|
2024-02-26 20:25:09 +08:00
|
|
|
|
protected _charStreams: CharStream;
|
2023-06-09 11:22:53 +08:00
|
|
|
|
protected _lexer: L;
|
|
|
|
|
protected _tokenStream: CommonTokenStream;
|
|
|
|
|
protected _parser: P;
|
2023-10-16 17:59:28 +08:00
|
|
|
|
protected _parseTree: PRC;
|
2023-06-09 11:22:53 +08:00
|
|
|
|
protected _parsedInput: string = null;
|
2023-10-16 17:59:28 +08:00
|
|
|
|
protected _parseErrors: ParseError[] = [];
|
|
|
|
|
/** members for cache end */
|
|
|
|
|
|
2023-12-11 17:34:49 +08:00
|
|
|
|
private _errorListener: ErrorListener<any> = (error) => {
|
2023-10-16 17:59:28 +08:00
|
|
|
|
this._parseErrors.push(error);
|
|
|
|
|
};
|
2020-09-11 17:39:10 +08:00
|
|
|
|
|
2023-06-09 11:22:53 +08:00
|
|
|
|
/**
|
2023-06-16 16:14:53 +08:00
|
|
|
|
* PreferredRules for antlr4-c3
|
2023-06-09 11:22:53 +08:00
|
|
|
|
*/
|
2023-06-16 16:14:53 +08:00
|
|
|
|
protected abstract preferredRules: Set<number>;
|
2023-10-13 11:16:36 +08:00
|
|
|
|
|
2023-06-09 11:22:53 +08:00
|
|
|
|
/**
|
2023-10-16 17:59:28 +08:00
|
|
|
|
* Create a antlr4 Lexer instance.
|
2023-06-09 11:22:53 +08:00
|
|
|
|
* @param input source string
|
|
|
|
|
*/
|
2024-02-26 20:25:09 +08:00
|
|
|
|
protected abstract createLexerFromCharStream(charStreams: CharStream): L;
|
2020-09-11 17:39:10 +08:00
|
|
|
|
|
2023-06-09 11:22:53 +08:00
|
|
|
|
/**
|
|
|
|
|
* Create Parser by CommonTokenStream
|
|
|
|
|
* @param tokenStream CommonTokenStream
|
|
|
|
|
*/
|
2023-06-16 16:14:53 +08:00
|
|
|
|
protected abstract createParserFromTokenStream(tokenStream: CommonTokenStream): P;
|
2023-10-13 11:16:36 +08:00
|
|
|
|
|
2023-06-09 11:22:53 +08:00
|
|
|
|
/**
|
2023-06-16 16:14:53 +08:00
|
|
|
|
* Convert candidates to suggestions
|
2023-06-09 11:22:53 +08:00
|
|
|
|
* @param candidates candidate list
|
|
|
|
|
* @param allTokens all tokens from input
|
|
|
|
|
* @param caretTokenIndex tokenIndex of caretPosition
|
2023-10-16 17:59:28 +08:00
|
|
|
|
* @param tokenIndexOffset offset of the tokenIndex in the candidates compared to the tokenIndex in allTokens
|
2023-06-09 11:22:53 +08:00
|
|
|
|
*/
|
2023-06-16 16:14:53 +08:00
|
|
|
|
protected abstract processCandidates(
|
2023-10-13 11:16:36 +08:00
|
|
|
|
candidates: CandidatesCollection,
|
|
|
|
|
allTokens: Token[],
|
2023-06-09 11:22:53 +08:00
|
|
|
|
caretTokenIndex: number,
|
2023-10-13 11:16:36 +08:00
|
|
|
|
tokenIndexOffset: number
|
2023-06-09 11:22:53 +08:00
|
|
|
|
): Suggestions<Token>;
|
2020-09-11 17:39:10 +08:00
|
|
|
|
|
2023-06-09 11:22:53 +08:00
|
|
|
|
/**
|
2023-06-16 16:14:53 +08:00
|
|
|
|
* Get splitListener instance.
|
2023-06-09 11:22:53 +08:00
|
|
|
|
*/
|
2023-10-13 11:16:36 +08:00
|
|
|
|
protected abstract get splitListener(): SplitListener;
|
2020-09-11 17:39:10 +08:00
|
|
|
|
|
2023-06-09 11:22:53 +08:00
|
|
|
|
/**
|
2023-10-16 17:59:28 +08:00
|
|
|
|
* Create an antlr4 lexer from input.
|
2023-06-16 16:14:53 +08:00
|
|
|
|
* @param input string
|
|
|
|
|
*/
|
2023-12-11 17:34:49 +08:00
|
|
|
|
public createLexer(input: string, errorListener?: ErrorListener<any>) {
|
2024-02-26 20:25:09 +08:00
|
|
|
|
const charStreams = CharStreams.fromString(input);
|
2024-01-04 11:45:41 +08:00
|
|
|
|
const lexer = this.createLexerFromCharStream(charStreams);
|
2023-10-16 17:59:28 +08:00
|
|
|
|
if (errorListener) {
|
|
|
|
|
lexer.removeErrorListeners();
|
|
|
|
|
lexer.addErrorListener(new ParseErrorListener(errorListener));
|
|
|
|
|
}
|
2023-06-16 16:14:53 +08:00
|
|
|
|
return lexer;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
2023-10-16 17:59:28 +08:00
|
|
|
|
* Create an antlr4 parser from input.
|
2023-06-16 16:14:53 +08:00
|
|
|
|
* @param input string
|
|
|
|
|
*/
|
2023-12-11 17:34:49 +08:00
|
|
|
|
public createParser(input: string, errorListener?: ErrorListener<any>) {
|
2023-10-16 17:59:28 +08:00
|
|
|
|
const lexer = this.createLexer(input, errorListener);
|
2023-06-16 16:14:53 +08:00
|
|
|
|
const tokenStream = new CommonTokenStream(lexer);
|
|
|
|
|
const parser = this.createParserFromTokenStream(tokenStream);
|
2023-10-16 17:59:28 +08:00
|
|
|
|
|
|
|
|
|
if (errorListener) {
|
|
|
|
|
parser.removeErrorListeners();
|
|
|
|
|
parser.addErrorListener(new ParseErrorListener(errorListener));
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-13 11:16:36 +08:00
|
|
|
|
return parser;
|
2023-06-16 16:14:53 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
2023-10-16 17:59:28 +08:00
|
|
|
|
* Parse input string and return parseTree.
|
|
|
|
|
* @param input string
|
|
|
|
|
* @param errorListener listen parse errors and lexer errors.
|
|
|
|
|
* @returns parseTree
|
|
|
|
|
*/
|
2023-12-11 17:34:49 +08:00
|
|
|
|
public parse(input: string, errorListener?: ErrorListener<any>) {
|
2023-10-16 17:59:28 +08:00
|
|
|
|
const parser = this.createParser(input, errorListener);
|
2024-02-26 20:25:09 +08:00
|
|
|
|
parser.buildParseTrees = true;
|
2023-12-11 17:34:49 +08:00
|
|
|
|
parser.errorHandler = new ErrorStrategy();
|
2023-10-16 17:59:28 +08:00
|
|
|
|
|
|
|
|
|
return parser.program();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Create an antlr4 parser from input.
|
2023-06-16 16:14:53 +08:00
|
|
|
|
* And the instances will be cache.
|
|
|
|
|
* @param input string
|
|
|
|
|
*/
|
2023-10-16 17:59:28 +08:00
|
|
|
|
private createParserWithCache(input: string): P {
|
|
|
|
|
this._parseTree = null;
|
2024-02-26 20:25:09 +08:00
|
|
|
|
this._charStreams = CharStreams.fromString(input);
|
2024-01-04 11:45:41 +08:00
|
|
|
|
this._lexer = this.createLexerFromCharStream(this._charStreams);
|
2023-06-16 16:14:53 +08:00
|
|
|
|
|
2023-10-16 17:59:28 +08:00
|
|
|
|
this._lexer.removeErrorListeners();
|
2023-12-11 17:34:49 +08:00
|
|
|
|
this._lexer.addErrorListener(new ParseErrorListener(this._errorListener));
|
2023-10-16 17:59:28 +08:00
|
|
|
|
|
2023-06-16 16:14:53 +08:00
|
|
|
|
this._tokenStream = new CommonTokenStream(this._lexer);
|
2023-10-16 17:59:28 +08:00
|
|
|
|
/**
|
|
|
|
|
* All tokens are generated in advance.
|
|
|
|
|
* This can cause performance degradation, but it seems necessary for now.
|
|
|
|
|
* Because the tokens will be used multiple times.
|
|
|
|
|
*/
|
2023-06-16 16:14:53 +08:00
|
|
|
|
this._tokenStream.fill();
|
2023-10-13 11:16:36 +08:00
|
|
|
|
|
2023-06-16 16:14:53 +08:00
|
|
|
|
this._parser = this.createParserFromTokenStream(this._tokenStream);
|
2024-02-26 20:25:09 +08:00
|
|
|
|
this._parser.buildParseTrees = true;
|
2023-12-11 17:34:49 +08:00
|
|
|
|
this._parser.errorHandler = new ErrorStrategy();
|
2023-06-16 16:14:53 +08:00
|
|
|
|
|
2023-10-13 11:16:36 +08:00
|
|
|
|
return this._parser;
|
2023-06-16 16:14:53 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* If it is invoked multiple times in a row and the input parameters is the same,
|
|
|
|
|
* this method returns the parsing result directly for the first time
|
|
|
|
|
* unless the errorListener parameter is passed.
|
2023-06-09 11:22:53 +08:00
|
|
|
|
* @param input source string
|
|
|
|
|
* @param errorListener listen errors
|
2023-10-16 17:59:28 +08:00
|
|
|
|
* @returns parseTree
|
2023-06-09 11:22:53 +08:00
|
|
|
|
*/
|
2023-12-11 17:34:49 +08:00
|
|
|
|
private parseWithCache(input: string, errorListener?: ErrorListener<any>) {
|
2023-06-16 16:14:53 +08:00
|
|
|
|
// Avoid parsing the same input repeatedly.
|
2023-10-13 11:16:36 +08:00
|
|
|
|
if (this._parsedInput === input && !errorListener) {
|
2023-10-16 17:59:28 +08:00
|
|
|
|
return this._parseTree;
|
2023-06-09 11:22:53 +08:00
|
|
|
|
}
|
2023-10-16 17:59:28 +08:00
|
|
|
|
this._parseErrors = [];
|
2023-06-16 16:14:53 +08:00
|
|
|
|
const parser = this.createParserWithCache(input);
|
2023-06-09 11:22:53 +08:00
|
|
|
|
this._parsedInput = input;
|
2020-09-11 17:39:10 +08:00
|
|
|
|
|
|
|
|
|
parser.removeErrorListeners();
|
2023-12-11 17:34:49 +08:00
|
|
|
|
parser.addErrorListener(new ParseErrorListener(this._errorListener));
|
2020-09-11 17:39:10 +08:00
|
|
|
|
|
2023-10-16 17:59:28 +08:00
|
|
|
|
this._parseTree = parser.program();
|
2023-10-13 11:16:36 +08:00
|
|
|
|
|
2023-10-16 17:59:28 +08:00
|
|
|
|
return this._parseTree;
|
2020-09-11 17:39:10 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
2023-06-16 16:14:53 +08:00
|
|
|
|
* Validate input string and return syntax errors if exists.
|
2020-09-11 17:39:10 +08:00
|
|
|
|
* @param input source string
|
2023-06-09 11:22:53 +08:00
|
|
|
|
* @returns syntax errors
|
2020-09-11 17:39:10 +08:00
|
|
|
|
*/
|
2023-10-16 17:59:28 +08:00
|
|
|
|
public validate(input: string): ParseError[] {
|
|
|
|
|
this.parseWithCache(input);
|
|
|
|
|
return this._parseErrors;
|
2023-06-09 11:22:53 +08:00
|
|
|
|
}
|
2020-09-11 17:39:10 +08:00
|
|
|
|
|
|
|
|
|
/**
|
2023-10-16 17:59:28 +08:00
|
|
|
|
* Get all Tokens of input string,'<EOF>' is not included.
|
2023-06-09 11:22:53 +08:00
|
|
|
|
* @param input source string
|
2023-05-04 10:13:05 +08:00
|
|
|
|
* @returns Token[]
|
2020-09-11 17:39:10 +08:00
|
|
|
|
*/
|
2023-06-09 11:22:53 +08:00
|
|
|
|
public getAllTokens(input: string): Token[] {
|
2023-10-16 17:59:28 +08:00
|
|
|
|
this.parseWithCache(input);
|
2023-06-09 11:22:53 +08:00
|
|
|
|
let allTokens = this._tokenStream.getTokens();
|
2023-10-13 11:16:36 +08:00
|
|
|
|
if (allTokens[allTokens.length - 1].text === '<EOF>') {
|
|
|
|
|
allTokens = allTokens.slice(0, -1);
|
2023-06-09 11:22:53 +08:00
|
|
|
|
}
|
2023-10-13 11:16:36 +08:00
|
|
|
|
return allTokens;
|
|
|
|
|
}
|
2020-09-11 17:39:10 +08:00
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* @param listener Listener instance extends ParserListener
|
2023-10-16 17:59:28 +08:00
|
|
|
|
* @param parseTree parser Tree
|
2020-09-11 17:39:10 +08:00
|
|
|
|
*/
|
2023-10-13 11:16:36 +08:00
|
|
|
|
public listen<PTL extends ParseTreeListener = ParseTreeListener>(
|
|
|
|
|
listener: PTL,
|
2023-10-16 17:59:28 +08:00
|
|
|
|
parseTree: PRC
|
2023-10-13 11:16:36 +08:00
|
|
|
|
) {
|
2023-10-16 17:59:28 +08:00
|
|
|
|
ParseTreeWalker.DEFAULT.walk(listener, parseTree);
|
2020-09-11 17:39:10 +08:00
|
|
|
|
}
|
2023-06-09 11:22:53 +08:00
|
|
|
|
|
|
|
|
|
/**
|
2023-06-16 16:14:53 +08:00
|
|
|
|
* Split input into statements.
|
|
|
|
|
* If exist syntax error it will return null.
|
2023-06-09 11:22:53 +08:00
|
|
|
|
* @param input source string
|
|
|
|
|
*/
|
2023-06-16 16:14:53 +08:00
|
|
|
|
public splitSQLByStatement(input): TextSlice[] {
|
2023-10-16 17:59:28 +08:00
|
|
|
|
const errors = this.validate(input);
|
|
|
|
|
if (errors.length) {
|
|
|
|
|
return null;
|
|
|
|
|
}
|
2023-06-09 11:22:53 +08:00
|
|
|
|
const splitListener = this.splitListener;
|
2023-10-16 17:59:28 +08:00
|
|
|
|
// TODO: add splitListener to all sqlParser implements add remove following if
|
|
|
|
|
if (!splitListener) return null;
|
|
|
|
|
|
|
|
|
|
this.listen(splitListener, this._parseTree);
|
2023-10-13 11:16:36 +08:00
|
|
|
|
|
|
|
|
|
const res = splitListener.statementsContext.map((context) => {
|
2023-06-09 11:22:53 +08:00
|
|
|
|
const { start, stop } = context;
|
|
|
|
|
return {
|
2024-02-26 20:25:09 +08:00
|
|
|
|
startIndex: start.start,
|
|
|
|
|
endIndex: stop.stop,
|
2023-06-09 11:22:53 +08:00
|
|
|
|
startLine: start.line,
|
|
|
|
|
endLine: stop.line,
|
2024-02-26 20:25:09 +08:00
|
|
|
|
startColumn: start.column + 1,
|
|
|
|
|
endColumn: stop.column + 1 + stop.text.length,
|
|
|
|
|
text: this._parsedInput.slice(start.start, stop.stop + 1),
|
2023-10-13 11:16:36 +08:00
|
|
|
|
};
|
|
|
|
|
});
|
2023-06-09 11:22:53 +08:00
|
|
|
|
|
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Get suggestions of syntax and token at caretPosition
|
|
|
|
|
* @param input source string
|
|
|
|
|
* @param caretPosition caret position, such as cursor position
|
|
|
|
|
* @returns suggestion
|
|
|
|
|
*/
|
2023-10-13 11:16:36 +08:00
|
|
|
|
public getSuggestionAtCaretPosition(
|
|
|
|
|
input: string,
|
|
|
|
|
caretPosition: CaretPosition
|
|
|
|
|
): Suggestions | null {
|
2023-06-09 11:22:53 +08:00
|
|
|
|
const splitListener = this.splitListener;
|
|
|
|
|
// TODO: add splitListener to all sqlParser implements add remove following if
|
2023-10-13 11:16:36 +08:00
|
|
|
|
if (!splitListener) return null;
|
2023-06-09 11:22:53 +08:00
|
|
|
|
|
2023-10-16 17:59:28 +08:00
|
|
|
|
this.parseWithCache(input);
|
2023-06-09 11:22:53 +08:00
|
|
|
|
let sqlParserIns = this._parser;
|
2023-06-12 15:21:27 +08:00
|
|
|
|
const allTokens = this.getAllTokens(input);
|
2023-06-09 11:22:53 +08:00
|
|
|
|
let caretTokenIndex = findCaretTokenIndex(caretPosition, allTokens);
|
2023-10-16 17:59:28 +08:00
|
|
|
|
let c3Context: ParserRuleContext = this._parseTree;
|
2023-06-09 11:22:53 +08:00
|
|
|
|
let tokenIndexOffset: number = 0;
|
|
|
|
|
|
2023-10-13 11:16:36 +08:00
|
|
|
|
if (!caretTokenIndex && caretTokenIndex !== 0) return null;
|
|
|
|
|
|
2023-06-09 11:22:53 +08:00
|
|
|
|
/**
|
|
|
|
|
* Split sql by statement.
|
2023-12-13 11:33:47 +08:00
|
|
|
|
* Try to collect candidates in as small a range as possible.
|
2023-06-09 11:22:53 +08:00
|
|
|
|
*/
|
2023-10-16 17:59:28 +08:00
|
|
|
|
this.listen(splitListener, this._parseTree);
|
2023-12-13 11:33:47 +08:00
|
|
|
|
const statementCount = splitListener.statementsContext?.length;
|
|
|
|
|
const statementsContext = splitListener.statementsContext;
|
2023-06-09 11:22:53 +08:00
|
|
|
|
|
|
|
|
|
// If there are multiple statements.
|
2023-12-13 11:33:47 +08:00
|
|
|
|
if (statementCount > 1) {
|
|
|
|
|
/**
|
|
|
|
|
* Find a minimum valid range, reparse the fragment, and provide a new parse tree to C3.
|
|
|
|
|
* The boundaries of this range must be statements with no syntax errors.
|
|
|
|
|
* This can ensure the stable performance of the C3.
|
|
|
|
|
*/
|
|
|
|
|
let startStatement: ParserRuleContext;
|
|
|
|
|
let stopStatement: ParserRuleContext;
|
|
|
|
|
|
|
|
|
|
for (let index = 0; index < statementCount; index++) {
|
|
|
|
|
const ctx = statementsContext[index];
|
|
|
|
|
const isCurrentCtxValid = !ctx.exception;
|
|
|
|
|
if (!isCurrentCtxValid) continue;
|
|
|
|
|
|
2023-06-09 11:22:53 +08:00
|
|
|
|
/**
|
2023-12-13 11:33:47 +08:00
|
|
|
|
* Ensure that the statementContext before the left boundary
|
|
|
|
|
* and the last statementContext on the right boundary are qualified SQL statements.
|
2023-06-09 11:22:53 +08:00
|
|
|
|
*/
|
2023-12-13 11:33:47 +08:00
|
|
|
|
const isPrevCtxValid = index === 0 || !statementsContext[index - 1]?.exception;
|
|
|
|
|
const isNextCtxValid =
|
|
|
|
|
index === statementCount - 1 || !statementsContext[index + 1]?.exception;
|
|
|
|
|
|
|
|
|
|
if (ctx.stop.tokenIndex < caretTokenIndex && isPrevCtxValid) {
|
|
|
|
|
startStatement = ctx;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!stopStatement && ctx.start.tokenIndex > caretTokenIndex && isNextCtxValid) {
|
|
|
|
|
stopStatement = ctx;
|
|
|
|
|
break;
|
2023-06-09 11:22:53 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
2023-12-13 11:33:47 +08:00
|
|
|
|
|
|
|
|
|
// A boundary consisting of the index of the input.
|
2024-02-26 20:25:09 +08:00
|
|
|
|
const startIndex = startStatement?.start?.start ?? 0;
|
|
|
|
|
const stopIndex = stopStatement?.stop?.stop ?? input.length - 1;
|
2023-12-13 11:33:47 +08:00
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Save offset of the tokenIndex in the range of input
|
|
|
|
|
* compared to the tokenIndex in the whole input
|
|
|
|
|
*/
|
|
|
|
|
tokenIndexOffset = startStatement?.start?.tokenIndex ?? 0;
|
|
|
|
|
caretTokenIndex = caretTokenIndex - tokenIndexOffset;
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Reparse the input fragment,
|
|
|
|
|
* and c3 will collect candidates in the newly generated parseTree.
|
|
|
|
|
*/
|
|
|
|
|
const inputSlice = input.slice(startIndex, stopIndex);
|
|
|
|
|
|
|
|
|
|
const lexer = this.createLexer(inputSlice);
|
|
|
|
|
lexer.removeErrorListeners();
|
|
|
|
|
const tokenStream = new CommonTokenStream(lexer);
|
|
|
|
|
tokenStream.fill();
|
|
|
|
|
|
|
|
|
|
const parser = this.createParserFromTokenStream(tokenStream);
|
|
|
|
|
parser.removeErrorListeners();
|
2024-02-26 20:25:09 +08:00
|
|
|
|
parser.buildParseTrees = true;
|
2023-12-13 11:33:47 +08:00
|
|
|
|
parser.errorHandler = new ErrorStrategy();
|
|
|
|
|
|
|
|
|
|
sqlParserIns = parser;
|
|
|
|
|
c3Context = parser.program();
|
2023-06-09 11:22:53 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const core = new CodeCompletionCore(sqlParserIns);
|
|
|
|
|
core.preferredRules = this.preferredRules;
|
|
|
|
|
|
|
|
|
|
const candidates = core.collectCandidates(caretTokenIndex, c3Context);
|
2023-10-13 11:16:36 +08:00
|
|
|
|
const originalSuggestions = this.processCandidates(
|
|
|
|
|
candidates,
|
|
|
|
|
allTokens,
|
|
|
|
|
caretTokenIndex,
|
|
|
|
|
tokenIndexOffset
|
|
|
|
|
);
|
2023-06-09 11:22:53 +08:00
|
|
|
|
|
2023-10-13 11:16:36 +08:00
|
|
|
|
const syntaxSuggestions: SyntaxSuggestion<WordRange>[] = originalSuggestions.syntax.map(
|
|
|
|
|
(syntaxCtx) => {
|
|
|
|
|
const wordRanges: WordRange[] = syntaxCtx.wordRanges.map((token) => {
|
2023-06-09 11:22:53 +08:00
|
|
|
|
return {
|
2024-02-26 20:25:09 +08:00
|
|
|
|
text: this._parsedInput.slice(token.start, token.stop + 1),
|
|
|
|
|
startIndex: token.start,
|
|
|
|
|
endIndex: token.stop,
|
2023-06-09 11:22:53 +08:00
|
|
|
|
line: token.line,
|
2024-02-26 20:25:09 +08:00
|
|
|
|
startColumn: token.column + 1,
|
|
|
|
|
stopColumn: token.column + 1 + token.text.length,
|
2023-10-13 11:16:36 +08:00
|
|
|
|
};
|
|
|
|
|
});
|
2023-06-09 11:22:53 +08:00
|
|
|
|
return {
|
|
|
|
|
syntaxContextType: syntaxCtx.syntaxContextType,
|
|
|
|
|
wordRanges,
|
2023-10-13 11:16:36 +08:00
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
);
|
2023-06-09 11:22:53 +08:00
|
|
|
|
return {
|
|
|
|
|
syntax: syntaxSuggestions,
|
2023-10-13 11:16:36 +08:00
|
|
|
|
keywords: originalSuggestions.keywords,
|
|
|
|
|
};
|
2023-06-09 11:22:53 +08:00
|
|
|
|
}
|
2020-09-11 17:39:10 +08:00
|
|
|
|
}
|