chroe: devops (#180)

* ci: add dependencies about lint tool

* ci: replace eslint with prettier

* ci: add husky, cz and commitlint

* style: lint fix via prettier

* ci: add prettier and check-types to github workflow

'
This commit is contained in:
Hayden
2023-10-13 11:16:36 +08:00
committed by GitHub
parent 4d1dfa676f
commit 7de192d486
105 changed files with 2615 additions and 1823 deletions

View File

@ -78,4 +78,4 @@ export interface TextSlice {
startColumn: number;
endColumn: number;
text: string;
}
}

View File

@ -1,21 +1,21 @@
import {
Parser,
Lexer,
import {
Parser,
Lexer,
Token,
CharStreams,
CommonTokenStream,
CharStreams,
CommonTokenStream,
CodePointCharStream,
ParserRuleContext
ParserRuleContext,
} from 'antlr4ts';
import { ParseTreeWalker, ParseTreeListener } from 'antlr4ts/tree';
import { CandidatesCollection, CodeCompletionCore } from 'antlr4-c3';
import { findCaretTokenIndex } from '../../utils/findCaretTokenIndex';
import {
import {
CaretPosition,
Suggestions,
SyntaxSuggestion,
WordRange,
TextSlice
TextSlice,
} from './basic-parser-types';
import ParserErrorListener, {
ParserError,
@ -36,10 +36,10 @@ interface SplitListener extends ParseTreeListener {
* Custom Parser class, subclass needs extends it.
*/
export default abstract class BasicParser<
L extends Lexer = Lexer,
L extends Lexer = Lexer,
PRC extends ParserRuleContext = ParserRuleContext,
P extends IParser<PRC> = IParser<PRC>
> {
P extends IParser<PRC> = IParser<PRC>,
> {
protected _charStreams: CodePointCharStream;
protected _lexer: L;
protected _tokenStream: CommonTokenStream;
@ -52,7 +52,7 @@ export default abstract class BasicParser<
* PreferredRules for antlr4-c3
*/
protected abstract preferredRules: Set<number>;
/**
* Create a antrl4 Lexer instance
* @param input source string
@ -64,26 +64,26 @@ export default abstract class BasicParser<
* @param tokenStream CommonTokenStream
*/
protected abstract createParserFromTokenStream(tokenStream: CommonTokenStream): P;
/**
* Convert candidates to suggestions
* @param candidates candidate list
* @param allTokens all tokens from input
* @param caretTokenIndex tokenIndex of caretPosition
* @param tokenIndexOffset offset of the tokenIndex in the candidates
* @param tokenIndexOffset offset of the tokenIndex in the candidates
* compared to the tokenIndex in allTokens
*/
protected abstract processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number,
tokenIndexOffset: number,
tokenIndexOffset: number
): Suggestions<Token>;
/**
* Get splitListener instance.
*/
protected abstract get splitListener (): SplitListener;
protected abstract get splitListener(): SplitListener;
/**
* Create an anltr4 lexer from input.
@ -92,8 +92,8 @@ export default abstract class BasicParser<
public createLexer(input: string) {
const charStreams = CharStreams.fromString(input.toUpperCase());
const lexer = this.createLexerFormCharStream(charStreams);
return lexer;
}
/**
@ -104,7 +104,7 @@ export default abstract class BasicParser<
const lexer = this.createLexer(input);
const tokenStream = new CommonTokenStream(lexer);
const parser = this.createParserFromTokenStream(tokenStream);
return parser;
return parser;
}
/**
@ -119,11 +119,11 @@ export default abstract class BasicParser<
this._tokenStream = new CommonTokenStream(this._lexer);
this._tokenStream.fill();
this._parser = this.createParserFromTokenStream(this._tokenStream);
this._parser.buildParseTree = true;
return this._parser
return this._parser;
}
/**
@ -134,13 +134,10 @@ export default abstract class BasicParser<
* @param errorListener listen errors
* @returns parserTree
*/
public parse(
input: string,
errorListener?: ErrorHandler<any>
) {
public parse(input: string, errorListener?: ErrorHandler<any>) {
// Avoid parsing the same input repeatedly.
if(this._parsedInput === input && !errorListener) {
return;
if (this._parsedInput === input && !errorListener) {
return this._parserTree;
}
const parser = this.createParserWithCache(input);
@ -150,12 +147,12 @@ export default abstract class BasicParser<
this._errorCollector.clear();
parser.addErrorListener(this._errorCollector);
if(errorListener) {
if (errorListener) {
parser.addErrorListener(new ParserErrorListener(errorListener));
}
this._parserTree = parser.program();
return this._parserTree;
}
@ -178,11 +175,11 @@ export default abstract class BasicParser<
public getAllTokens(input: string): Token[] {
this.parse(input);
let allTokens = this._tokenStream.getTokens();
if(allTokens[allTokens.length - 1].text === '<EOF>') {
allTokens = allTokens.slice(0, -1)
if (allTokens[allTokens.length - 1].text === '<EOF>') {
allTokens = allTokens.slice(0, -1);
}
return allTokens
};
return allTokens;
}
/**
* It convert tree to string, it's convenient to use in unit test.
* @param string input
@ -204,7 +201,10 @@ export default abstract class BasicParser<
* @param listener Listener instance extends ParserListener
* @param parserTree parser Tree
*/
public listen<PTL extends ParseTreeListener = ParseTreeListener>(listener: PTL, parserTree: PRC) {
public listen<PTL extends ParseTreeListener = ParseTreeListener>(
listener: PTL,
parserTree: PRC
) {
ParseTreeWalker.DEFAULT.walk(listener, parserTree);
}
@ -217,8 +217,8 @@ export default abstract class BasicParser<
this.parse(input);
const splitListener = this.splitListener;
this.listen(splitListener, this._parserTree);
const res = splitListener.statementsContext.map(context => {
const res = splitListener.statementsContext.map((context) => {
const { start, stop } = context;
return {
startIndex: start.startIndex,
@ -228,8 +228,8 @@ export default abstract class BasicParser<
startColumn: start.charPositionInLine + 1,
endColumn: stop.charPositionInLine + stop.text.length,
text: this._parsedInput.slice(start.startIndex, stop.stopIndex + 1),
}
})
};
});
return res;
}
@ -240,10 +240,13 @@ export default abstract class BasicParser<
* @param caretPosition caret position, such as cursor position
* @returns suggestion
*/
public getSuggestionAtCaretPosition(input: string, caretPosition: CaretPosition): Suggestions | null {
public getSuggestionAtCaretPosition(
input: string,
caretPosition: CaretPosition
): Suggestions | null {
const splitListener = this.splitListener;
// TODO: add splitListener to all sqlParser implements add remove following if
if(!splitListener) return null;
if (!splitListener) return null;
this.parse(input);
let sqlParserIns = this._parser;
@ -252,8 +255,8 @@ export default abstract class BasicParser<
let c3Context: ParserRuleContext = this._parserTree;
let tokenIndexOffset: number = 0;
if(!caretTokenIndex && caretTokenIndex !== 0) return null;
if (!caretTokenIndex && caretTokenIndex !== 0) return null;
/**
* Split sql by statement.
* Try to collect candidates from the caret statement only.
@ -263,16 +266,19 @@ export default abstract class BasicParser<
// If there are multiple statements.
if (splitListener.statementsContext.length > 1) {
// find statement rule context where caretPosition is located.
const caretStatementContext = splitListener?.statementsContext.find(ctx => {
return caretTokenIndex <= ctx.stop?.tokenIndex && caretTokenIndex >= ctx.start.tokenIndex;
const caretStatementContext = splitListener?.statementsContext.find((ctx) => {
return (
caretTokenIndex <= ctx.stop?.tokenIndex &&
caretTokenIndex >= ctx.start.tokenIndex
);
});
if(caretStatementContext) {
c3Context = caretStatementContext
if (caretStatementContext) {
c3Context = caretStatementContext;
} else {
const lastStatementToken= splitListener
.statementsContext[splitListener?.statementsContext.length - 1]
.start;
const lastStatementToken =
splitListener.statementsContext[splitListener?.statementsContext.length - 1]
.start;
/**
* If caretStatementContext is not found and it follows all statements.
* Reparses part of the input following the penultimate statement.
@ -281,8 +287,8 @@ export default abstract class BasicParser<
if (caretTokenIndex > lastStatementToken?.tokenIndex) {
/**
* Save offset of the tokenIndex in the partInput
* compared to the tokenIndex in the whole input
*/
* compared to the tokenIndex in the whole input
*/
tokenIndexOffset = lastStatementToken?.tokenIndex;
// Correct caretTokenIndex
caretTokenIndex = caretTokenIndex - tokenIndexOffset;
@ -304,28 +310,34 @@ export default abstract class BasicParser<
core.preferredRules = this.preferredRules;
const candidates = core.collectCandidates(caretTokenIndex, c3Context);
const originalSuggestions = this.processCandidates(candidates, allTokens, caretTokenIndex, tokenIndexOffset);
const originalSuggestions = this.processCandidates(
candidates,
allTokens,
caretTokenIndex,
tokenIndexOffset
);
const syntaxSuggestions: SyntaxSuggestion<WordRange>[] = originalSuggestions.syntax
.map(syntaxCtx => {
const wordRanges: WordRange[] = syntaxCtx.wordRanges.map(token => {
const syntaxSuggestions: SyntaxSuggestion<WordRange>[] = originalSuggestions.syntax.map(
(syntaxCtx) => {
const wordRanges: WordRange[] = syntaxCtx.wordRanges.map((token) => {
return {
text: this._parsedInput.slice(token.startIndex, token.stopIndex + 1),
startIndex: token.startIndex,
stopIndex: token.stopIndex,
line: token.line,
startColumn: token.charPositionInLine + 1,
stopColumn: token.charPositionInLine + token.text.length
}
})
stopColumn: token.charPositionInLine + token.text.length,
};
});
return {
syntaxContextType: syntaxCtx.syntaxContextType,
wordRanges,
}
})
};
}
);
return {
syntax: syntaxSuggestions,
keywords: originalSuggestions.keywords
}
keywords: originalSuggestions.keywords,
};
}
}

View File

@ -1,5 +1,5 @@
import { Token, Recognizer, ParserErrorListener, RecognitionException } from 'antlr4ts';
import { ATNSimulator } from 'antlr4ts/atn/ATNSimulator'
import { ATNSimulator } from 'antlr4ts/atn/ATNSimulator';
export interface ParserError {
startLine: number;
@ -26,15 +26,15 @@ export class ParserErrorCollector implements ParserErrorListener {
syntaxError(
recognizer: Recognizer<Token, ATNSimulator>,
offendingSymbol: Token,
offendingSymbol: Token,
line: number,
charPositionInLine: number,
msg: string,
e: RecognitionException,
charPositionInLine: number,
msg: string,
e: RecognitionException
) {
let endCol = charPositionInLine + 1;
if (offendingSymbol && offendingSymbol.text !== null) {
endCol = charPositionInLine + offendingSymbol.text.length;
endCol = charPositionInLine + (offendingSymbol.text?.length ?? 0);
}
this._parseErrors.push({
startLine: line,
@ -51,7 +51,7 @@ export class ParserErrorCollector implements ParserErrorListener {
recognizer,
offendingSymbol,
charPositionInLine,
})
});
}
clear() {
@ -59,12 +59,12 @@ export class ParserErrorCollector implements ParserErrorListener {
this._syntaxErrors = [];
}
get parserErrors () {
return this._parseErrors
get parserErrors() {
return this._parseErrors;
}
}
export default class CustomParserErrorListener implements ParserErrorListener {
export default class CustomParserErrorListener implements ParserErrorListener {
private _errorHandler;
constructor(errorListener: ErrorHandler<Token>) {
@ -72,28 +72,35 @@ export default class CustomParserErrorListener implements ParserErrorListener {
}
syntaxError(
recognizer: Recognizer<Token, ATNSimulator>, offendingSymbol: Token, line: number,
charPositionInLine: number, msg: string, e: RecognitionException,
recognizer: Recognizer<Token, ATNSimulator>,
offendingSymbol: Token,
line: number,
charPositionInLine: number,
msg: string,
e: RecognitionException
) {
let endCol = charPositionInLine + 1;
if (offendingSymbol && offendingSymbol.text !== null) {
endCol = charPositionInLine + offendingSymbol.text.length;
}
if (this._errorHandler) {
this._errorHandler({
startLine: line,
endLine: line,
startCol: charPositionInLine,
endCol: endCol,
message: msg,
}, {
e,
line,
msg,
recognizer,
offendingSymbol,
charPositionInLine,
});
this._errorHandler(
{
startLine: line,
endLine: line,
startCol: charPositionInLine,
endCol: endCol,
message: msg,
},
{
e,
line,
msg,
recognizer,
offendingSymbol,
charPositionInLine,
}
);
}
}
}

View File

@ -5,7 +5,7 @@ import {
FlinkSqlParser,
ProgramContext,
SqlStatementContext,
SqlStatementsContext
SqlStatementsContext,
} from '../lib/flinksql/FlinkSqlParser';
import { FlinkSqlParserListener } from '../lib/flinksql/FlinkSqlParserListener';
import { SyntaxContextType, Suggestions, SyntaxSuggestion } from './common/basic-parser-types';
@ -34,7 +34,7 @@ export default class FlinkSQL extends BasicParser<FlinkSqlLexer, ProgramContext,
FlinkSqlParser.RULE_functionNameCreate, // functionName that will be created
]);
protected get splitListener () {
protected get splitListener() {
return new FlinkSqlSplitListener();
}
@ -50,7 +50,10 @@ export default class FlinkSQL extends BasicParser<FlinkSqlLexer, ProgramContext,
for (let candidate of candidates.rules) {
const [ruleType, candidateRule] = candidate;
const startTokenIndex = candidateRule.startTokenIndex + tokenIndexOffset;
const tokenRanges = allTokens.slice(startTokenIndex, caretTokenIndex + tokenIndexOffset + 1);
const tokenRanges = allTokens.slice(
startTokenIndex,
caretTokenIndex + tokenIndexOffset + 1
);
let syntaxContextType: SyntaxContextType;
switch (ruleType) {
@ -78,15 +81,15 @@ export default class FlinkSQL extends BasicParser<FlinkSqlLexer, ProgramContext,
syntaxContextType = SyntaxContextType.VIEW;
break;
}
case FlinkSqlParser.RULE_viewPathCreate : {
case FlinkSqlParser.RULE_viewPathCreate: {
syntaxContextType = SyntaxContextType.VIEW_CREATE;
break;
}
case FlinkSqlParser.RULE_functionName : {
case FlinkSqlParser.RULE_functionName: {
syntaxContextType = SyntaxContextType.FUNCTION;
break;
}
case FlinkSqlParser.RULE_functionNameCreate : {
case FlinkSqlParser.RULE_functionNameCreate: {
syntaxContextType = SyntaxContextType.FUNCTION_CREATE;
break;
}
@ -97,25 +100,26 @@ export default class FlinkSQL extends BasicParser<FlinkSqlLexer, ProgramContext,
if (syntaxContextType) {
originalSyntaxSuggestions.push({
syntaxContextType,
wordRanges: tokenRanges
})
wordRanges: tokenRanges,
});
}
}
for (let candidate of candidates.tokens) {
const symbolicName = this._parser.vocabulary.getSymbolicName(candidate[0]);
const displayName = this._parser.vocabulary.getDisplayName(candidate[0]);
if(symbolicName && symbolicName.startsWith('KW_')) {
const keyword = displayName.startsWith("'") && displayName.endsWith("'")
? displayName.slice(1, -1)
: displayName
if (symbolicName && symbolicName.startsWith('KW_')) {
const keyword =
displayName.startsWith("'") && displayName.endsWith("'")
? displayName.slice(1, -1)
: displayName;
keywords.push(keyword);
}
}
return {
syntax: originalSyntaxSuggestions,
keywords,
}
};
}
}
@ -124,12 +128,11 @@ export class FlinkSqlSplitListener implements FlinkSqlParserListener {
exitSqlStatement = (ctx: SqlStatementContext) => {
this._statementsContext.push(ctx);
}
enterSqlStatements = (ctx: SqlStatementsContext) => {
};
get statementsContext () {
enterSqlStatements = (ctx: SqlStatementsContext) => {};
get statementsContext() {
return this._statementsContext;
}
}

View File

@ -17,19 +17,18 @@ export default class GenericSQL extends BasicParser<SqlLexer, ProgramContext, Sq
protected preferredRules: Set<number> = new Set();
protected get splitListener () {
protected get splitListener() {
return null as any;
}
protected processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number
): Suggestions<Token> {
return {
syntax: [],
keywords: []
}
keywords: [],
};
}
}

View File

@ -1,12 +1,17 @@
import { Token } from 'antlr4ts';
import { CandidatesCollection } from 'antlr4-c3';
import { HiveSqlLexer } from '../lib/hive/HiveSqlLexer';
import { HiveSqlParser, ProgramContext, StatementContext, ExplainStatementContext, ExecStatementContext } from '../lib/hive/HiveSqlParser';
import {
HiveSqlParser,
ProgramContext,
StatementContext,
ExplainStatementContext,
ExecStatementContext,
} from '../lib/hive/HiveSqlParser';
import BasicParser from './common/basicParser';
import { HiveSqlParserListener } from '../lib/hive/HiveSqlParserListener';
import { SyntaxContextType, Suggestions, SyntaxSuggestion } from './common/basic-parser-types';
export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, HiveSqlParser> {
protected createLexerFormCharStream(charStreams) {
const lexer = new HiveSqlLexer(charStreams);
@ -27,10 +32,9 @@ export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, H
HiveSqlParser.RULE_functionNameForDDL, // function name
HiveSqlParser.RULE_functionNameForInvoke, // function name
HiveSqlParser.RULE_functionNameCreate, // function name that will be created
]);
protected get splitListener () {
protected get splitListener() {
return new HiveSqlSplitListener();
}
@ -38,14 +42,17 @@ export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, H
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number,
tokenIndexOffset: number,
tokenIndexOffset: number
): Suggestions<Token> {
const originalSyntaxSuggestions: SyntaxSuggestion<Token>[] = [];
const keywords: string[] = [];
for (let candidate of candidates.rules) {
const [ruleType, candidateRule] = candidate;
const startTokenIndex = candidateRule.startTokenIndex + tokenIndexOffset;
const tokenRanges = allTokens.slice(startTokenIndex, caretTokenIndex + tokenIndexOffset + 1);
const tokenRanges = allTokens.slice(
startTokenIndex,
caretTokenIndex + tokenIndexOffset + 1
);
let syntaxContextType: SyntaxContextType;
switch (ruleType) {
@ -62,7 +69,7 @@ export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, H
break;
}
case HiveSqlParser.RULE_tableNameCreate: {
syntaxContextType = SyntaxContextType.TABLE_CREATE
syntaxContextType = SyntaxContextType.TABLE_CREATE;
break;
}
case HiveSqlParser.RULE_viewName: {
@ -73,7 +80,7 @@ export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, H
syntaxContextType = SyntaxContextType.VIEW_CREATE;
break;
}
case HiveSqlParser.RULE_functionNameForDDL:
case HiveSqlParser.RULE_functionNameForDDL:
case HiveSqlParser.RULE_functionNameForInvoke: {
syntaxContextType = SyntaxContextType.FUNCTION;
break;
@ -98,7 +105,10 @@ export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, H
const symbolicName = this._parser.vocabulary.getSymbolicName(candidate[0]);
const displayName = this._parser.vocabulary.getDisplayName(candidate[0]);
if (symbolicName && symbolicName.startsWith('KW_')) {
const keyword = displayName.startsWith("'") && displayName.endsWith("'") ? displayName.slice(1, -1) : displayName;
const keyword =
displayName.startsWith("'") && displayName.endsWith("'")
? displayName.slice(1, -1)
: displayName;
keywords.push(keyword);
}
}
@ -111,16 +121,14 @@ export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, H
export class HiveSqlSplitListener implements HiveSqlParserListener {
private _statementContext: StatementContext[] = [];
exitStatement = (ctx: StatementContext) => {
this._statementContext.push(ctx);
}
enterStatement = (ctx: StatementContext) => {
};
enterStatement = (ctx: StatementContext) => {};
get statementsContext() {
return this._statementContext;
}
}

View File

@ -5,7 +5,11 @@ import { PostgreSQLParser, ProgramContext } from '../lib/pgsql/PostgreSQLParser'
import BasicParser from './common/basicParser';
import { Suggestions } from './common/basic-parser-types';
export default class PostgresSQL extends BasicParser<PostgreSQLLexer, ProgramContext, PostgreSQLParser> {
export default class PostgresSQL extends BasicParser<
PostgreSQLLexer,
ProgramContext,
PostgreSQLParser
> {
protected createLexerFormCharStream(charStreams) {
const lexer = new PostgreSQLLexer(charStreams);
return lexer;
@ -17,18 +21,18 @@ export default class PostgresSQL extends BasicParser<PostgreSQLLexer, ProgramCon
protected preferredRules: Set<number> = new Set();
protected get splitListener () {
protected get splitListener() {
return null as any;
}
protected processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number
): Suggestions<Token> {
return {
syntax: [],
keywords: []
}
keywords: [],
};
}
}

View File

@ -17,18 +17,18 @@ export default class PLSQL extends BasicParser<PlSqlLexer, ProgramContext, PlSql
protected preferredRules: Set<number> = new Set();
protected get splitListener () {
protected get splitListener() {
return null as any;
}
protected processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number
): Suggestions<Token> {
return {
syntax: [],
keywords: []
}
keywords: [],
};
}
}

View File

@ -1,7 +1,11 @@
import { Token } from 'antlr4ts';
import { CandidatesCollection } from 'antlr4-c3';
import { SparkSqlLexer } from '../lib/spark/SparkSqlLexer';
import { SparkSqlParser, ProgramContext, SingleStatementContext } from '../lib/spark/SparkSqlParser';
import {
SparkSqlParser,
ProgramContext,
SingleStatementContext,
} from '../lib/spark/SparkSqlParser';
import BasicParser from './common/basicParser';
import { Suggestions, SyntaxContextType, SyntaxSuggestion } from './common/basic-parser-types';
import { SparkSqlParserListener } from 'src/lib/spark/SparkSqlParserListener';
@ -36,7 +40,7 @@ export default class SparkSQL extends BasicParser<SparkSqlLexer, ProgramContext,
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number,
tokenIndexOffset: number,
tokenIndexOffset: number
): Suggestions<Token> {
const originalSyntaxSuggestions: SyntaxSuggestion<Token>[] = [];
const keywords: string[] = [];
@ -44,7 +48,10 @@ export default class SparkSQL extends BasicParser<SparkSqlLexer, ProgramContext,
for (const candidate of candidates.rules) {
const [ruleType, candidateRule] = candidate;
const startTokenIndex = candidateRule.startTokenIndex + tokenIndexOffset;
const tokenRanges = allTokens.slice(startTokenIndex, caretTokenIndex + tokenIndexOffset + 1);
const tokenRanges = allTokens.slice(
startTokenIndex,
caretTokenIndex + tokenIndexOffset + 1
);
let syntaxContextType: SyntaxContextType;
switch (ruleType) {
@ -96,7 +103,10 @@ export default class SparkSQL extends BasicParser<SparkSqlLexer, ProgramContext,
const symbolicName = this._parser.vocabulary.getSymbolicName(candidate[0]);
const displayName = this._parser.vocabulary.getDisplayName(candidate[0]);
if (symbolicName && symbolicName.startsWith('KW_')) {
const keyword = displayName.startsWith("'") && displayName.endsWith("'") ? displayName.slice(1, -1) : displayName;
const keyword =
displayName.startsWith("'") && displayName.endsWith("'")
? displayName.slice(1, -1)
: displayName;
keywords.push(keyword);
}
}
@ -113,11 +123,10 @@ export class SparkSqlSplitListener implements SparkSqlParserListener {
exitSingleStatement = (ctx: SingleStatementContext) => {
this._statementsContext.push(ctx);
}
enterSingleStatement = (ctx: SingleStatementContext) => {
};
enterSingleStatement = (ctx: SingleStatementContext) => {};
get statementsContext() {
return this._statementsContext;
}

View File

@ -16,21 +16,20 @@ export default class TrinoSQL extends BasicParser<TrinoSqlLexer, ProgramContext,
return parser;
}
protected get splitListener () {
protected get splitListener() {
return null as any;
}
protected preferredRules: Set<number> = new Set();
protected processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number
): Suggestions<Token> {
return {
syntax: [],
keywords: []
}
keywords: [],
};
}
}