chroe: devops (#180)
* ci: add dependencies about lint tool * ci: replace eslint with prettier * ci: add husky, cz and commitlint * style: lint fix via prettier * ci: add prettier and check-types to github workflow '
This commit is contained in:
		@ -16,6 +16,5 @@ export * from './lib/trinosql/TrinoSqlListener';
 | 
			
		||||
export * from './lib/trinosql/TrinoSqlVisitor';
 | 
			
		||||
export { SyntaxContextType } from './parser/common/basic-parser-types';
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
export type * from './parser/common/basic-parser-types';
 | 
			
		||||
export type { SyntaxError, ParserError } from './parser/common/parserErrorListener';
 | 
			
		||||
 | 
			
		||||
@ -30,12 +30,10 @@ export default abstract class PostgreSQLLexerBase extends Lexer {
 | 
			
		||||
        return this._input;
 | 
			
		||||
    }
 | 
			
		||||
    checkLA( c) {
 | 
			
		||||
        // eslint-disable-next-line new-cap
 | 
			
		||||
        return this.getInputStream().LA(1) !== c;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    charIsLetter() {
 | 
			
		||||
        // eslint-disable-next-line new-cap
 | 
			
		||||
        return isLetter(this.getInputStream().LA(-1));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@ -53,11 +51,9 @@ export default abstract class PostgreSQLLexerBase extends Lexer {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    UnterminatedBlockCommentDebugAssert() {
 | 
			
		||||
        // Debug.Assert(InputStream.LA(1) == -1 /*EOF*/);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    CheckIfUtf32Letter() {
 | 
			
		||||
        // eslint-disable-next-line new-cap
 | 
			
		||||
        let codePoint = this.getInputStream().LA(-2) << 8 + this.getInputStream().LA(-1);
 | 
			
		||||
        let c;
 | 
			
		||||
        if (codePoint < 0x10000) {
 | 
			
		||||
 | 
			
		||||
@ -1,9 +1,7 @@
 | 
			
		||||
/* eslint-disable new-cap,camelcase */
 | 
			
		||||
import { CharStreams, CommonTokenStream, Parser } from 'antlr4ts';
 | 
			
		||||
import { PostgreSQLLexer } from './PostgreSQLLexer';
 | 
			
		||||
import { PostgreSQLParser } from './PostgreSQLParser';
 | 
			
		||||
 | 
			
		||||
// @ts-ignore
 | 
			
		||||
export default abstract class PostgreSQLParserBase extends Parser {
 | 
			
		||||
    constructor( input) {
 | 
			
		||||
        super(input);
 | 
			
		||||
@ -32,16 +30,13 @@ export default abstract class PostgreSQLParserBase extends Parser {
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        if (!lang) return;
 | 
			
		||||
        // eslint-disable-next-line camelcase
 | 
			
		||||
        let func_as = null;
 | 
			
		||||
        for (const a of _localctx.createfunc_opt_item()) {
 | 
			
		||||
            if (!a.func_as()) {
 | 
			
		||||
                // eslint-disable-next-line camelcase
 | 
			
		||||
                func_as = a;
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        // eslint-disable-next-line camelcase
 | 
			
		||||
        if (!func_as) {
 | 
			
		||||
            const txt = this.GetRoutineBodyString(func_as.func_as().sconst(0));
 | 
			
		||||
            const line = func_as.func_as().sconst(0).start.getLine();
 | 
			
		||||
@ -76,7 +71,6 @@ export default abstract class PostgreSQLParserBase extends Parser {
 | 
			
		||||
 | 
			
		||||
    GetRoutineBodyString( rule) {
 | 
			
		||||
        const anysconst = rule.anysconst();
 | 
			
		||||
        // eslint-disable-next-line new-cap
 | 
			
		||||
        const StringConstant = anysconst.StringConstant();
 | 
			
		||||
        if (null !== StringConstant) return this.unquote(this.TrimQuotes(StringConstant.getText()));
 | 
			
		||||
        const UnicodeEscapeStringConstant = anysconst.UnicodeEscapeStringConstant();
 | 
			
		||||
 | 
			
		||||
@ -78,4 +78,4 @@ export interface TextSlice {
 | 
			
		||||
    startColumn: number;
 | 
			
		||||
    endColumn: number;
 | 
			
		||||
    text: string;
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -1,21 +1,21 @@
 | 
			
		||||
import { 
 | 
			
		||||
    Parser, 
 | 
			
		||||
    Lexer, 
 | 
			
		||||
import {
 | 
			
		||||
    Parser,
 | 
			
		||||
    Lexer,
 | 
			
		||||
    Token,
 | 
			
		||||
    CharStreams, 
 | 
			
		||||
    CommonTokenStream, 
 | 
			
		||||
    CharStreams,
 | 
			
		||||
    CommonTokenStream,
 | 
			
		||||
    CodePointCharStream,
 | 
			
		||||
    ParserRuleContext
 | 
			
		||||
    ParserRuleContext,
 | 
			
		||||
} from 'antlr4ts';
 | 
			
		||||
import { ParseTreeWalker, ParseTreeListener } from 'antlr4ts/tree';
 | 
			
		||||
import { CandidatesCollection, CodeCompletionCore } from 'antlr4-c3';
 | 
			
		||||
import { findCaretTokenIndex } from '../../utils/findCaretTokenIndex';
 | 
			
		||||
import { 
 | 
			
		||||
import {
 | 
			
		||||
    CaretPosition,
 | 
			
		||||
    Suggestions,
 | 
			
		||||
    SyntaxSuggestion,
 | 
			
		||||
    WordRange,
 | 
			
		||||
    TextSlice
 | 
			
		||||
    TextSlice,
 | 
			
		||||
} from './basic-parser-types';
 | 
			
		||||
import ParserErrorListener, {
 | 
			
		||||
    ParserError,
 | 
			
		||||
@ -36,10 +36,10 @@ interface SplitListener extends ParseTreeListener {
 | 
			
		||||
 * Custom Parser class, subclass needs extends it.
 | 
			
		||||
 */
 | 
			
		||||
export default abstract class BasicParser<
 | 
			
		||||
    L extends Lexer = Lexer, 
 | 
			
		||||
    L extends Lexer = Lexer,
 | 
			
		||||
    PRC extends ParserRuleContext = ParserRuleContext,
 | 
			
		||||
    P extends IParser<PRC> = IParser<PRC>
 | 
			
		||||
>  {
 | 
			
		||||
    P extends IParser<PRC> = IParser<PRC>,
 | 
			
		||||
> {
 | 
			
		||||
    protected _charStreams: CodePointCharStream;
 | 
			
		||||
    protected _lexer: L;
 | 
			
		||||
    protected _tokenStream: CommonTokenStream;
 | 
			
		||||
@ -52,7 +52,7 @@ export default abstract class BasicParser<
 | 
			
		||||
     * PreferredRules for antlr4-c3
 | 
			
		||||
     */
 | 
			
		||||
    protected abstract preferredRules: Set<number>;
 | 
			
		||||
 
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * Create a antrl4 Lexer instance
 | 
			
		||||
     * @param input source string
 | 
			
		||||
@ -64,26 +64,26 @@ export default abstract class BasicParser<
 | 
			
		||||
     * @param tokenStream CommonTokenStream
 | 
			
		||||
     */
 | 
			
		||||
    protected abstract createParserFromTokenStream(tokenStream: CommonTokenStream): P;
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * Convert candidates to suggestions
 | 
			
		||||
     * @param candidates candidate list
 | 
			
		||||
     * @param allTokens all tokens from input
 | 
			
		||||
     * @param caretTokenIndex tokenIndex of caretPosition
 | 
			
		||||
     * @param tokenIndexOffset offset of the tokenIndex in the candidates 
 | 
			
		||||
     * @param tokenIndexOffset offset of the tokenIndex in the candidates
 | 
			
		||||
     * compared to the tokenIndex in allTokens
 | 
			
		||||
     */
 | 
			
		||||
    protected abstract processCandidates(
 | 
			
		||||
        candidates: CandidatesCollection, 
 | 
			
		||||
        allTokens: Token[], 
 | 
			
		||||
        candidates: CandidatesCollection,
 | 
			
		||||
        allTokens: Token[],
 | 
			
		||||
        caretTokenIndex: number,
 | 
			
		||||
        tokenIndexOffset: number,
 | 
			
		||||
        tokenIndexOffset: number
 | 
			
		||||
    ): Suggestions<Token>;
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * Get splitListener instance.
 | 
			
		||||
     */
 | 
			
		||||
    protected abstract get splitListener (): SplitListener; 
 | 
			
		||||
    protected abstract get splitListener(): SplitListener;
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * Create an anltr4 lexer from input.
 | 
			
		||||
@ -92,8 +92,8 @@ export default abstract class BasicParser<
 | 
			
		||||
    public createLexer(input: string) {
 | 
			
		||||
        const charStreams = CharStreams.fromString(input.toUpperCase());
 | 
			
		||||
        const lexer = this.createLexerFormCharStream(charStreams);
 | 
			
		||||
 | 
			
		||||
        return lexer;
 | 
			
		||||
   
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
@ -104,7 +104,7 @@ export default abstract class BasicParser<
 | 
			
		||||
        const lexer = this.createLexer(input);
 | 
			
		||||
        const tokenStream = new CommonTokenStream(lexer);
 | 
			
		||||
        const parser = this.createParserFromTokenStream(tokenStream);
 | 
			
		||||
        return parser;        
 | 
			
		||||
        return parser;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
@ -119,11 +119,11 @@ export default abstract class BasicParser<
 | 
			
		||||
 | 
			
		||||
        this._tokenStream = new CommonTokenStream(this._lexer);
 | 
			
		||||
        this._tokenStream.fill();
 | 
			
		||||
        
 | 
			
		||||
 | 
			
		||||
        this._parser = this.createParserFromTokenStream(this._tokenStream);
 | 
			
		||||
        this._parser.buildParseTree = true;
 | 
			
		||||
 | 
			
		||||
        return this._parser
 | 
			
		||||
        return this._parser;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
@ -134,13 +134,10 @@ export default abstract class BasicParser<
 | 
			
		||||
     * @param errorListener listen errors
 | 
			
		||||
     * @returns parserTree
 | 
			
		||||
     */
 | 
			
		||||
    public parse(
 | 
			
		||||
        input: string,
 | 
			
		||||
        errorListener?: ErrorHandler<any>
 | 
			
		||||
    ) {
 | 
			
		||||
    public parse(input: string, errorListener?: ErrorHandler<any>) {
 | 
			
		||||
        // Avoid parsing the same input repeatedly.
 | 
			
		||||
        if(this._parsedInput === input && !errorListener) {
 | 
			
		||||
            return;
 | 
			
		||||
        if (this._parsedInput === input && !errorListener) {
 | 
			
		||||
            return this._parserTree;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        const parser = this.createParserWithCache(input);
 | 
			
		||||
@ -150,12 +147,12 @@ export default abstract class BasicParser<
 | 
			
		||||
        this._errorCollector.clear();
 | 
			
		||||
 | 
			
		||||
        parser.addErrorListener(this._errorCollector);
 | 
			
		||||
        if(errorListener) {
 | 
			
		||||
        if (errorListener) {
 | 
			
		||||
            parser.addErrorListener(new ParserErrorListener(errorListener));
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        this._parserTree = parser.program();
 | 
			
		||||
        
 | 
			
		||||
 | 
			
		||||
        return this._parserTree;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@ -178,11 +175,11 @@ export default abstract class BasicParser<
 | 
			
		||||
    public getAllTokens(input: string): Token[] {
 | 
			
		||||
        this.parse(input);
 | 
			
		||||
        let allTokens = this._tokenStream.getTokens();
 | 
			
		||||
        if(allTokens[allTokens.length - 1].text === '<EOF>') {
 | 
			
		||||
            allTokens = allTokens.slice(0, -1)
 | 
			
		||||
        if (allTokens[allTokens.length - 1].text === '<EOF>') {
 | 
			
		||||
            allTokens = allTokens.slice(0, -1);
 | 
			
		||||
        }
 | 
			
		||||
        return allTokens
 | 
			
		||||
    };
 | 
			
		||||
        return allTokens;
 | 
			
		||||
    }
 | 
			
		||||
    /**
 | 
			
		||||
     * It convert tree to string, it's convenient to use in unit test.
 | 
			
		||||
     * @param string input
 | 
			
		||||
@ -204,7 +201,10 @@ export default abstract class BasicParser<
 | 
			
		||||
     * @param listener Listener instance extends ParserListener
 | 
			
		||||
     * @param parserTree parser Tree
 | 
			
		||||
     */
 | 
			
		||||
    public listen<PTL extends ParseTreeListener = ParseTreeListener>(listener: PTL, parserTree: PRC) {
 | 
			
		||||
    public listen<PTL extends ParseTreeListener = ParseTreeListener>(
 | 
			
		||||
        listener: PTL,
 | 
			
		||||
        parserTree: PRC
 | 
			
		||||
    ) {
 | 
			
		||||
        ParseTreeWalker.DEFAULT.walk(listener, parserTree);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@ -217,8 +217,8 @@ export default abstract class BasicParser<
 | 
			
		||||
        this.parse(input);
 | 
			
		||||
        const splitListener = this.splitListener;
 | 
			
		||||
        this.listen(splitListener, this._parserTree);
 | 
			
		||||
        
 | 
			
		||||
        const res = splitListener.statementsContext.map(context => {
 | 
			
		||||
 | 
			
		||||
        const res = splitListener.statementsContext.map((context) => {
 | 
			
		||||
            const { start, stop } = context;
 | 
			
		||||
            return {
 | 
			
		||||
                startIndex: start.startIndex,
 | 
			
		||||
@ -228,8 +228,8 @@ export default abstract class BasicParser<
 | 
			
		||||
                startColumn: start.charPositionInLine + 1,
 | 
			
		||||
                endColumn: stop.charPositionInLine + stop.text.length,
 | 
			
		||||
                text: this._parsedInput.slice(start.startIndex, stop.stopIndex + 1),
 | 
			
		||||
            }
 | 
			
		||||
        })
 | 
			
		||||
            };
 | 
			
		||||
        });
 | 
			
		||||
 | 
			
		||||
        return res;
 | 
			
		||||
    }
 | 
			
		||||
@ -240,10 +240,13 @@ export default abstract class BasicParser<
 | 
			
		||||
     * @param caretPosition caret position, such as cursor position
 | 
			
		||||
     * @returns suggestion
 | 
			
		||||
     */
 | 
			
		||||
    public getSuggestionAtCaretPosition(input: string, caretPosition: CaretPosition): Suggestions | null {
 | 
			
		||||
    public getSuggestionAtCaretPosition(
 | 
			
		||||
        input: string,
 | 
			
		||||
        caretPosition: CaretPosition
 | 
			
		||||
    ): Suggestions | null {
 | 
			
		||||
        const splitListener = this.splitListener;
 | 
			
		||||
        // TODO: add splitListener to all sqlParser implements add remove following if
 | 
			
		||||
        if(!splitListener) return null;
 | 
			
		||||
        if (!splitListener) return null;
 | 
			
		||||
 | 
			
		||||
        this.parse(input);
 | 
			
		||||
        let sqlParserIns = this._parser;
 | 
			
		||||
@ -252,8 +255,8 @@ export default abstract class BasicParser<
 | 
			
		||||
        let c3Context: ParserRuleContext = this._parserTree;
 | 
			
		||||
        let tokenIndexOffset: number = 0;
 | 
			
		||||
 | 
			
		||||
        if(!caretTokenIndex && caretTokenIndex !== 0) return null;
 | 
			
		||||
        
 | 
			
		||||
        if (!caretTokenIndex && caretTokenIndex !== 0) return null;
 | 
			
		||||
 | 
			
		||||
        /**
 | 
			
		||||
         * Split sql by statement.
 | 
			
		||||
         * Try to collect candidates from the caret statement only.
 | 
			
		||||
@ -263,16 +266,19 @@ export default abstract class BasicParser<
 | 
			
		||||
        // If there are multiple statements.
 | 
			
		||||
        if (splitListener.statementsContext.length > 1) {
 | 
			
		||||
            // find statement rule context where caretPosition is located.
 | 
			
		||||
            const caretStatementContext = splitListener?.statementsContext.find(ctx => {
 | 
			
		||||
                return caretTokenIndex <= ctx.stop?.tokenIndex && caretTokenIndex >= ctx.start.tokenIndex;
 | 
			
		||||
            const caretStatementContext = splitListener?.statementsContext.find((ctx) => {
 | 
			
		||||
                return (
 | 
			
		||||
                    caretTokenIndex <= ctx.stop?.tokenIndex &&
 | 
			
		||||
                    caretTokenIndex >= ctx.start.tokenIndex
 | 
			
		||||
                );
 | 
			
		||||
            });
 | 
			
		||||
 | 
			
		||||
            if(caretStatementContext) {
 | 
			
		||||
                c3Context = caretStatementContext
 | 
			
		||||
            if (caretStatementContext) {
 | 
			
		||||
                c3Context = caretStatementContext;
 | 
			
		||||
            } else {
 | 
			
		||||
                const lastStatementToken= splitListener
 | 
			
		||||
                    .statementsContext[splitListener?.statementsContext.length - 1]
 | 
			
		||||
                    .start;
 | 
			
		||||
                const lastStatementToken =
 | 
			
		||||
                    splitListener.statementsContext[splitListener?.statementsContext.length - 1]
 | 
			
		||||
                        .start;
 | 
			
		||||
                /**
 | 
			
		||||
                 * If caretStatementContext is not found and it follows all statements.
 | 
			
		||||
                 * Reparses part of the input following the penultimate statement.
 | 
			
		||||
@ -281,8 +287,8 @@ export default abstract class BasicParser<
 | 
			
		||||
                if (caretTokenIndex > lastStatementToken?.tokenIndex) {
 | 
			
		||||
                    /**
 | 
			
		||||
                     * Save offset of the tokenIndex in the partInput
 | 
			
		||||
                     * compared to the tokenIndex in the whole input 
 | 
			
		||||
                     */  
 | 
			
		||||
                     * compared to the tokenIndex in the whole input
 | 
			
		||||
                     */
 | 
			
		||||
                    tokenIndexOffset = lastStatementToken?.tokenIndex;
 | 
			
		||||
                    // Correct caretTokenIndex
 | 
			
		||||
                    caretTokenIndex = caretTokenIndex - tokenIndexOffset;
 | 
			
		||||
@ -304,28 +310,34 @@ export default abstract class BasicParser<
 | 
			
		||||
        core.preferredRules = this.preferredRules;
 | 
			
		||||
 | 
			
		||||
        const candidates = core.collectCandidates(caretTokenIndex, c3Context);
 | 
			
		||||
        const originalSuggestions = this.processCandidates(candidates, allTokens, caretTokenIndex, tokenIndexOffset);
 | 
			
		||||
        const originalSuggestions = this.processCandidates(
 | 
			
		||||
            candidates,
 | 
			
		||||
            allTokens,
 | 
			
		||||
            caretTokenIndex,
 | 
			
		||||
            tokenIndexOffset
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        const syntaxSuggestions: SyntaxSuggestion<WordRange>[] = originalSuggestions.syntax
 | 
			
		||||
            .map(syntaxCtx => {
 | 
			
		||||
                const wordRanges: WordRange[] = syntaxCtx.wordRanges.map(token => {
 | 
			
		||||
        const syntaxSuggestions: SyntaxSuggestion<WordRange>[] = originalSuggestions.syntax.map(
 | 
			
		||||
            (syntaxCtx) => {
 | 
			
		||||
                const wordRanges: WordRange[] = syntaxCtx.wordRanges.map((token) => {
 | 
			
		||||
                    return {
 | 
			
		||||
                        text: this._parsedInput.slice(token.startIndex, token.stopIndex + 1),
 | 
			
		||||
                        startIndex: token.startIndex,
 | 
			
		||||
                        stopIndex: token.stopIndex,
 | 
			
		||||
                        line: token.line,
 | 
			
		||||
                        startColumn: token.charPositionInLine + 1,
 | 
			
		||||
                        stopColumn: token.charPositionInLine + token.text.length
 | 
			
		||||
                    }
 | 
			
		||||
                })
 | 
			
		||||
                        stopColumn: token.charPositionInLine + token.text.length,
 | 
			
		||||
                    };
 | 
			
		||||
                });
 | 
			
		||||
                return {
 | 
			
		||||
                    syntaxContextType: syntaxCtx.syntaxContextType,
 | 
			
		||||
                    wordRanges,
 | 
			
		||||
                }
 | 
			
		||||
            })
 | 
			
		||||
                };
 | 
			
		||||
            }
 | 
			
		||||
        );
 | 
			
		||||
        return {
 | 
			
		||||
            syntax: syntaxSuggestions,
 | 
			
		||||
            keywords: originalSuggestions.keywords
 | 
			
		||||
        }
 | 
			
		||||
            keywords: originalSuggestions.keywords,
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -1,5 +1,5 @@
 | 
			
		||||
import { Token, Recognizer, ParserErrorListener, RecognitionException } from 'antlr4ts';
 | 
			
		||||
import { ATNSimulator } from 'antlr4ts/atn/ATNSimulator'
 | 
			
		||||
import { ATNSimulator } from 'antlr4ts/atn/ATNSimulator';
 | 
			
		||||
 | 
			
		||||
export interface ParserError {
 | 
			
		||||
    startLine: number;
 | 
			
		||||
@ -26,15 +26,15 @@ export class ParserErrorCollector implements ParserErrorListener {
 | 
			
		||||
 | 
			
		||||
    syntaxError(
 | 
			
		||||
        recognizer: Recognizer<Token, ATNSimulator>,
 | 
			
		||||
        offendingSymbol: Token, 
 | 
			
		||||
        offendingSymbol: Token,
 | 
			
		||||
        line: number,
 | 
			
		||||
        charPositionInLine: number, 
 | 
			
		||||
        msg: string, 
 | 
			
		||||
        e: RecognitionException,
 | 
			
		||||
        charPositionInLine: number,
 | 
			
		||||
        msg: string,
 | 
			
		||||
        e: RecognitionException
 | 
			
		||||
    ) {
 | 
			
		||||
        let endCol = charPositionInLine + 1;
 | 
			
		||||
        if (offendingSymbol && offendingSymbol.text !== null) {
 | 
			
		||||
            endCol = charPositionInLine + offendingSymbol.text.length;
 | 
			
		||||
            endCol = charPositionInLine + (offendingSymbol.text?.length ?? 0);
 | 
			
		||||
        }
 | 
			
		||||
        this._parseErrors.push({
 | 
			
		||||
            startLine: line,
 | 
			
		||||
@ -51,7 +51,7 @@ export class ParserErrorCollector implements ParserErrorListener {
 | 
			
		||||
            recognizer,
 | 
			
		||||
            offendingSymbol,
 | 
			
		||||
            charPositionInLine,
 | 
			
		||||
        })
 | 
			
		||||
        });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    clear() {
 | 
			
		||||
@ -59,12 +59,12 @@ export class ParserErrorCollector implements ParserErrorListener {
 | 
			
		||||
        this._syntaxErrors = [];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    get parserErrors () {
 | 
			
		||||
        return this._parseErrors
 | 
			
		||||
    get parserErrors() {
 | 
			
		||||
        return this._parseErrors;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export default class CustomParserErrorListener implements ParserErrorListener  {
 | 
			
		||||
export default class CustomParserErrorListener implements ParserErrorListener {
 | 
			
		||||
    private _errorHandler;
 | 
			
		||||
 | 
			
		||||
    constructor(errorListener: ErrorHandler<Token>) {
 | 
			
		||||
@ -72,28 +72,35 @@ export default class CustomParserErrorListener implements ParserErrorListener  {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    syntaxError(
 | 
			
		||||
        recognizer: Recognizer<Token, ATNSimulator>, offendingSymbol: Token, line: number,
 | 
			
		||||
        charPositionInLine: number, msg: string, e: RecognitionException,
 | 
			
		||||
        recognizer: Recognizer<Token, ATNSimulator>,
 | 
			
		||||
        offendingSymbol: Token,
 | 
			
		||||
        line: number,
 | 
			
		||||
        charPositionInLine: number,
 | 
			
		||||
        msg: string,
 | 
			
		||||
        e: RecognitionException
 | 
			
		||||
    ) {
 | 
			
		||||
        let endCol = charPositionInLine + 1;
 | 
			
		||||
        if (offendingSymbol && offendingSymbol.text !== null) {
 | 
			
		||||
            endCol = charPositionInLine + offendingSymbol.text.length;
 | 
			
		||||
        }
 | 
			
		||||
        if (this._errorHandler) {
 | 
			
		||||
            this._errorHandler({
 | 
			
		||||
                startLine: line,
 | 
			
		||||
                endLine: line,
 | 
			
		||||
                startCol: charPositionInLine,
 | 
			
		||||
                endCol: endCol,
 | 
			
		||||
                message: msg,
 | 
			
		||||
            }, {
 | 
			
		||||
                e,
 | 
			
		||||
                line,
 | 
			
		||||
                msg,
 | 
			
		||||
                recognizer,
 | 
			
		||||
                offendingSymbol,
 | 
			
		||||
                charPositionInLine,
 | 
			
		||||
            });
 | 
			
		||||
            this._errorHandler(
 | 
			
		||||
                {
 | 
			
		||||
                    startLine: line,
 | 
			
		||||
                    endLine: line,
 | 
			
		||||
                    startCol: charPositionInLine,
 | 
			
		||||
                    endCol: endCol,
 | 
			
		||||
                    message: msg,
 | 
			
		||||
                },
 | 
			
		||||
                {
 | 
			
		||||
                    e,
 | 
			
		||||
                    line,
 | 
			
		||||
                    msg,
 | 
			
		||||
                    recognizer,
 | 
			
		||||
                    offendingSymbol,
 | 
			
		||||
                    charPositionInLine,
 | 
			
		||||
                }
 | 
			
		||||
            );
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -5,7 +5,7 @@ import {
 | 
			
		||||
    FlinkSqlParser,
 | 
			
		||||
    ProgramContext,
 | 
			
		||||
    SqlStatementContext,
 | 
			
		||||
    SqlStatementsContext
 | 
			
		||||
    SqlStatementsContext,
 | 
			
		||||
} from '../lib/flinksql/FlinkSqlParser';
 | 
			
		||||
import { FlinkSqlParserListener } from '../lib/flinksql/FlinkSqlParserListener';
 | 
			
		||||
import { SyntaxContextType, Suggestions, SyntaxSuggestion } from './common/basic-parser-types';
 | 
			
		||||
@ -34,7 +34,7 @@ export default class FlinkSQL extends BasicParser<FlinkSqlLexer, ProgramContext,
 | 
			
		||||
        FlinkSqlParser.RULE_functionNameCreate, // functionName that will be created
 | 
			
		||||
    ]);
 | 
			
		||||
 | 
			
		||||
    protected get splitListener () {
 | 
			
		||||
    protected get splitListener() {
 | 
			
		||||
        return new FlinkSqlSplitListener();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@ -50,7 +50,10 @@ export default class FlinkSQL extends BasicParser<FlinkSqlLexer, ProgramContext,
 | 
			
		||||
        for (let candidate of candidates.rules) {
 | 
			
		||||
            const [ruleType, candidateRule] = candidate;
 | 
			
		||||
            const startTokenIndex = candidateRule.startTokenIndex + tokenIndexOffset;
 | 
			
		||||
            const tokenRanges = allTokens.slice(startTokenIndex, caretTokenIndex + tokenIndexOffset + 1);
 | 
			
		||||
            const tokenRanges = allTokens.slice(
 | 
			
		||||
                startTokenIndex,
 | 
			
		||||
                caretTokenIndex + tokenIndexOffset + 1
 | 
			
		||||
            );
 | 
			
		||||
 | 
			
		||||
            let syntaxContextType: SyntaxContextType;
 | 
			
		||||
            switch (ruleType) {
 | 
			
		||||
@ -78,15 +81,15 @@ export default class FlinkSQL extends BasicParser<FlinkSqlLexer, ProgramContext,
 | 
			
		||||
                    syntaxContextType = SyntaxContextType.VIEW;
 | 
			
		||||
                    break;
 | 
			
		||||
                }
 | 
			
		||||
                case FlinkSqlParser.RULE_viewPathCreate : {
 | 
			
		||||
                case FlinkSqlParser.RULE_viewPathCreate: {
 | 
			
		||||
                    syntaxContextType = SyntaxContextType.VIEW_CREATE;
 | 
			
		||||
                    break;
 | 
			
		||||
                }
 | 
			
		||||
                case FlinkSqlParser.RULE_functionName : {
 | 
			
		||||
                case FlinkSqlParser.RULE_functionName: {
 | 
			
		||||
                    syntaxContextType = SyntaxContextType.FUNCTION;
 | 
			
		||||
                    break;
 | 
			
		||||
                }
 | 
			
		||||
                case FlinkSqlParser.RULE_functionNameCreate : {
 | 
			
		||||
                case FlinkSqlParser.RULE_functionNameCreate: {
 | 
			
		||||
                    syntaxContextType = SyntaxContextType.FUNCTION_CREATE;
 | 
			
		||||
                    break;
 | 
			
		||||
                }
 | 
			
		||||
@ -97,25 +100,26 @@ export default class FlinkSQL extends BasicParser<FlinkSqlLexer, ProgramContext,
 | 
			
		||||
            if (syntaxContextType) {
 | 
			
		||||
                originalSyntaxSuggestions.push({
 | 
			
		||||
                    syntaxContextType,
 | 
			
		||||
                    wordRanges: tokenRanges
 | 
			
		||||
                })
 | 
			
		||||
                    wordRanges: tokenRanges,
 | 
			
		||||
                });
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        for (let candidate of candidates.tokens) {
 | 
			
		||||
            const symbolicName = this._parser.vocabulary.getSymbolicName(candidate[0]);
 | 
			
		||||
            const displayName = this._parser.vocabulary.getDisplayName(candidate[0]);
 | 
			
		||||
            if(symbolicName && symbolicName.startsWith('KW_')) {
 | 
			
		||||
                const keyword = displayName.startsWith("'") && displayName.endsWith("'")
 | 
			
		||||
                    ? displayName.slice(1, -1)
 | 
			
		||||
                    : displayName
 | 
			
		||||
            if (symbolicName && symbolicName.startsWith('KW_')) {
 | 
			
		||||
                const keyword =
 | 
			
		||||
                    displayName.startsWith("'") && displayName.endsWith("'")
 | 
			
		||||
                        ? displayName.slice(1, -1)
 | 
			
		||||
                        : displayName;
 | 
			
		||||
                keywords.push(keyword);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        return {
 | 
			
		||||
            syntax: originalSyntaxSuggestions,
 | 
			
		||||
            keywords,
 | 
			
		||||
        }
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -124,12 +128,11 @@ export class FlinkSqlSplitListener implements FlinkSqlParserListener {
 | 
			
		||||
 | 
			
		||||
    exitSqlStatement = (ctx: SqlStatementContext) => {
 | 
			
		||||
        this._statementsContext.push(ctx);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    enterSqlStatements = (ctx: SqlStatementsContext) => {
 | 
			
		||||
    };
 | 
			
		||||
    
 | 
			
		||||
    get statementsContext () {
 | 
			
		||||
 | 
			
		||||
    enterSqlStatements = (ctx: SqlStatementsContext) => {};
 | 
			
		||||
 | 
			
		||||
    get statementsContext() {
 | 
			
		||||
        return this._statementsContext;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -17,19 +17,18 @@ export default class GenericSQL extends BasicParser<SqlLexer, ProgramContext, Sq
 | 
			
		||||
 | 
			
		||||
    protected preferredRules: Set<number> = new Set();
 | 
			
		||||
 | 
			
		||||
    protected get splitListener () {
 | 
			
		||||
    protected get splitListener() {
 | 
			
		||||
        return null as any;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    protected processCandidates(
 | 
			
		||||
        candidates: CandidatesCollection, 
 | 
			
		||||
        allTokens: Token[], 
 | 
			
		||||
        candidates: CandidatesCollection,
 | 
			
		||||
        allTokens: Token[],
 | 
			
		||||
        caretTokenIndex: number
 | 
			
		||||
    ): Suggestions<Token> {
 | 
			
		||||
        return {
 | 
			
		||||
            syntax: [],
 | 
			
		||||
            keywords: []
 | 
			
		||||
        }
 | 
			
		||||
            keywords: [],
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -1,12 +1,17 @@
 | 
			
		||||
import { Token } from 'antlr4ts';
 | 
			
		||||
import { CandidatesCollection } from 'antlr4-c3';
 | 
			
		||||
import { HiveSqlLexer } from '../lib/hive/HiveSqlLexer';
 | 
			
		||||
import { HiveSqlParser, ProgramContext, StatementContext, ExplainStatementContext, ExecStatementContext } from '../lib/hive/HiveSqlParser';
 | 
			
		||||
import {
 | 
			
		||||
    HiveSqlParser,
 | 
			
		||||
    ProgramContext,
 | 
			
		||||
    StatementContext,
 | 
			
		||||
    ExplainStatementContext,
 | 
			
		||||
    ExecStatementContext,
 | 
			
		||||
} from '../lib/hive/HiveSqlParser';
 | 
			
		||||
import BasicParser from './common/basicParser';
 | 
			
		||||
import { HiveSqlParserListener } from '../lib/hive/HiveSqlParserListener';
 | 
			
		||||
import { SyntaxContextType, Suggestions, SyntaxSuggestion } from './common/basic-parser-types';
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, HiveSqlParser> {
 | 
			
		||||
    protected createLexerFormCharStream(charStreams) {
 | 
			
		||||
        const lexer = new HiveSqlLexer(charStreams);
 | 
			
		||||
@ -27,10 +32,9 @@ export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, H
 | 
			
		||||
        HiveSqlParser.RULE_functionNameForDDL, // function name
 | 
			
		||||
        HiveSqlParser.RULE_functionNameForInvoke, // function name
 | 
			
		||||
        HiveSqlParser.RULE_functionNameCreate, // function name that will be created
 | 
			
		||||
 | 
			
		||||
    ]);
 | 
			
		||||
 | 
			
		||||
    protected get splitListener () {
 | 
			
		||||
    protected get splitListener() {
 | 
			
		||||
        return new HiveSqlSplitListener();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@ -38,14 +42,17 @@ export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, H
 | 
			
		||||
        candidates: CandidatesCollection,
 | 
			
		||||
        allTokens: Token[],
 | 
			
		||||
        caretTokenIndex: number,
 | 
			
		||||
        tokenIndexOffset: number,
 | 
			
		||||
        tokenIndexOffset: number
 | 
			
		||||
    ): Suggestions<Token> {
 | 
			
		||||
        const originalSyntaxSuggestions: SyntaxSuggestion<Token>[] = [];
 | 
			
		||||
        const keywords: string[] = [];
 | 
			
		||||
        for (let candidate of candidates.rules) {
 | 
			
		||||
            const [ruleType, candidateRule] = candidate;
 | 
			
		||||
            const startTokenIndex = candidateRule.startTokenIndex + tokenIndexOffset;
 | 
			
		||||
            const tokenRanges = allTokens.slice(startTokenIndex, caretTokenIndex + tokenIndexOffset + 1);
 | 
			
		||||
            const tokenRanges = allTokens.slice(
 | 
			
		||||
                startTokenIndex,
 | 
			
		||||
                caretTokenIndex + tokenIndexOffset + 1
 | 
			
		||||
            );
 | 
			
		||||
 | 
			
		||||
            let syntaxContextType: SyntaxContextType;
 | 
			
		||||
            switch (ruleType) {
 | 
			
		||||
@ -62,7 +69,7 @@ export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, H
 | 
			
		||||
                    break;
 | 
			
		||||
                }
 | 
			
		||||
                case HiveSqlParser.RULE_tableNameCreate: {
 | 
			
		||||
                    syntaxContextType = SyntaxContextType.TABLE_CREATE
 | 
			
		||||
                    syntaxContextType = SyntaxContextType.TABLE_CREATE;
 | 
			
		||||
                    break;
 | 
			
		||||
                }
 | 
			
		||||
                case HiveSqlParser.RULE_viewName: {
 | 
			
		||||
@ -73,7 +80,7 @@ export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, H
 | 
			
		||||
                    syntaxContextType = SyntaxContextType.VIEW_CREATE;
 | 
			
		||||
                    break;
 | 
			
		||||
                }
 | 
			
		||||
                case HiveSqlParser.RULE_functionNameForDDL: 
 | 
			
		||||
                case HiveSqlParser.RULE_functionNameForDDL:
 | 
			
		||||
                case HiveSqlParser.RULE_functionNameForInvoke: {
 | 
			
		||||
                    syntaxContextType = SyntaxContextType.FUNCTION;
 | 
			
		||||
                    break;
 | 
			
		||||
@ -98,7 +105,10 @@ export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, H
 | 
			
		||||
            const symbolicName = this._parser.vocabulary.getSymbolicName(candidate[0]);
 | 
			
		||||
            const displayName = this._parser.vocabulary.getDisplayName(candidate[0]);
 | 
			
		||||
            if (symbolicName && symbolicName.startsWith('KW_')) {
 | 
			
		||||
                const keyword = displayName.startsWith("'") && displayName.endsWith("'") ? displayName.slice(1, -1) : displayName;
 | 
			
		||||
                const keyword =
 | 
			
		||||
                    displayName.startsWith("'") && displayName.endsWith("'")
 | 
			
		||||
                        ? displayName.slice(1, -1)
 | 
			
		||||
                        : displayName;
 | 
			
		||||
                keywords.push(keyword);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
@ -111,16 +121,14 @@ export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, H
 | 
			
		||||
 | 
			
		||||
export class HiveSqlSplitListener implements HiveSqlParserListener {
 | 
			
		||||
    private _statementContext: StatementContext[] = [];
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    exitStatement = (ctx: StatementContext) => {
 | 
			
		||||
        this._statementContext.push(ctx);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    enterStatement = (ctx: StatementContext) => {
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    enterStatement = (ctx: StatementContext) => {};
 | 
			
		||||
 | 
			
		||||
    get statementsContext() {
 | 
			
		||||
        return this._statementContext;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -5,7 +5,11 @@ import { PostgreSQLParser, ProgramContext } from '../lib/pgsql/PostgreSQLParser'
 | 
			
		||||
import BasicParser from './common/basicParser';
 | 
			
		||||
import { Suggestions } from './common/basic-parser-types';
 | 
			
		||||
 | 
			
		||||
export default class PostgresSQL extends BasicParser<PostgreSQLLexer, ProgramContext, PostgreSQLParser> {
 | 
			
		||||
export default class PostgresSQL extends BasicParser<
 | 
			
		||||
    PostgreSQLLexer,
 | 
			
		||||
    ProgramContext,
 | 
			
		||||
    PostgreSQLParser
 | 
			
		||||
> {
 | 
			
		||||
    protected createLexerFormCharStream(charStreams) {
 | 
			
		||||
        const lexer = new PostgreSQLLexer(charStreams);
 | 
			
		||||
        return lexer;
 | 
			
		||||
@ -17,18 +21,18 @@ export default class PostgresSQL extends BasicParser<PostgreSQLLexer, ProgramCon
 | 
			
		||||
 | 
			
		||||
    protected preferredRules: Set<number> = new Set();
 | 
			
		||||
 | 
			
		||||
    protected get splitListener () {
 | 
			
		||||
    protected get splitListener() {
 | 
			
		||||
        return null as any;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    protected processCandidates(
 | 
			
		||||
        candidates: CandidatesCollection, 
 | 
			
		||||
        allTokens: Token[], 
 | 
			
		||||
        candidates: CandidatesCollection,
 | 
			
		||||
        allTokens: Token[],
 | 
			
		||||
        caretTokenIndex: number
 | 
			
		||||
    ): Suggestions<Token> {
 | 
			
		||||
        return {
 | 
			
		||||
            syntax: [],
 | 
			
		||||
            keywords: []
 | 
			
		||||
        }
 | 
			
		||||
            keywords: [],
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -17,18 +17,18 @@ export default class PLSQL extends BasicParser<PlSqlLexer, ProgramContext, PlSql
 | 
			
		||||
 | 
			
		||||
    protected preferredRules: Set<number> = new Set();
 | 
			
		||||
 | 
			
		||||
    protected get splitListener () {
 | 
			
		||||
    protected get splitListener() {
 | 
			
		||||
        return null as any;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    protected processCandidates(
 | 
			
		||||
        candidates: CandidatesCollection, 
 | 
			
		||||
        allTokens: Token[], 
 | 
			
		||||
        candidates: CandidatesCollection,
 | 
			
		||||
        allTokens: Token[],
 | 
			
		||||
        caretTokenIndex: number
 | 
			
		||||
    ): Suggestions<Token> {
 | 
			
		||||
        return {
 | 
			
		||||
            syntax: [],
 | 
			
		||||
            keywords: []
 | 
			
		||||
        }
 | 
			
		||||
            keywords: [],
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -1,7 +1,11 @@
 | 
			
		||||
import { Token } from 'antlr4ts';
 | 
			
		||||
import { CandidatesCollection } from 'antlr4-c3';
 | 
			
		||||
import { SparkSqlLexer } from '../lib/spark/SparkSqlLexer';
 | 
			
		||||
import { SparkSqlParser, ProgramContext, SingleStatementContext } from '../lib/spark/SparkSqlParser';
 | 
			
		||||
import {
 | 
			
		||||
    SparkSqlParser,
 | 
			
		||||
    ProgramContext,
 | 
			
		||||
    SingleStatementContext,
 | 
			
		||||
} from '../lib/spark/SparkSqlParser';
 | 
			
		||||
import BasicParser from './common/basicParser';
 | 
			
		||||
import { Suggestions, SyntaxContextType, SyntaxSuggestion } from './common/basic-parser-types';
 | 
			
		||||
import { SparkSqlParserListener } from 'src/lib/spark/SparkSqlParserListener';
 | 
			
		||||
@ -36,7 +40,7 @@ export default class SparkSQL extends BasicParser<SparkSqlLexer, ProgramContext,
 | 
			
		||||
        candidates: CandidatesCollection,
 | 
			
		||||
        allTokens: Token[],
 | 
			
		||||
        caretTokenIndex: number,
 | 
			
		||||
        tokenIndexOffset: number,
 | 
			
		||||
        tokenIndexOffset: number
 | 
			
		||||
    ): Suggestions<Token> {
 | 
			
		||||
        const originalSyntaxSuggestions: SyntaxSuggestion<Token>[] = [];
 | 
			
		||||
        const keywords: string[] = [];
 | 
			
		||||
@ -44,7 +48,10 @@ export default class SparkSQL extends BasicParser<SparkSqlLexer, ProgramContext,
 | 
			
		||||
        for (const candidate of candidates.rules) {
 | 
			
		||||
            const [ruleType, candidateRule] = candidate;
 | 
			
		||||
            const startTokenIndex = candidateRule.startTokenIndex + tokenIndexOffset;
 | 
			
		||||
            const tokenRanges = allTokens.slice(startTokenIndex, caretTokenIndex + tokenIndexOffset + 1);
 | 
			
		||||
            const tokenRanges = allTokens.slice(
 | 
			
		||||
                startTokenIndex,
 | 
			
		||||
                caretTokenIndex + tokenIndexOffset + 1
 | 
			
		||||
            );
 | 
			
		||||
 | 
			
		||||
            let syntaxContextType: SyntaxContextType;
 | 
			
		||||
            switch (ruleType) {
 | 
			
		||||
@ -96,7 +103,10 @@ export default class SparkSQL extends BasicParser<SparkSqlLexer, ProgramContext,
 | 
			
		||||
            const symbolicName = this._parser.vocabulary.getSymbolicName(candidate[0]);
 | 
			
		||||
            const displayName = this._parser.vocabulary.getDisplayName(candidate[0]);
 | 
			
		||||
            if (symbolicName && symbolicName.startsWith('KW_')) {
 | 
			
		||||
                const keyword = displayName.startsWith("'") && displayName.endsWith("'") ? displayName.slice(1, -1) : displayName;
 | 
			
		||||
                const keyword =
 | 
			
		||||
                    displayName.startsWith("'") && displayName.endsWith("'")
 | 
			
		||||
                        ? displayName.slice(1, -1)
 | 
			
		||||
                        : displayName;
 | 
			
		||||
                keywords.push(keyword);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
@ -113,11 +123,10 @@ export class SparkSqlSplitListener implements SparkSqlParserListener {
 | 
			
		||||
 | 
			
		||||
    exitSingleStatement = (ctx: SingleStatementContext) => {
 | 
			
		||||
        this._statementsContext.push(ctx);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    enterSingleStatement = (ctx: SingleStatementContext) => {
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    enterSingleStatement = (ctx: SingleStatementContext) => {};
 | 
			
		||||
 | 
			
		||||
    get statementsContext() {
 | 
			
		||||
        return this._statementsContext;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@ -16,21 +16,20 @@ export default class TrinoSQL extends BasicParser<TrinoSqlLexer, ProgramContext,
 | 
			
		||||
        return parser;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    protected get splitListener () {
 | 
			
		||||
    protected get splitListener() {
 | 
			
		||||
        return null as any;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    protected preferredRules: Set<number> = new Set();
 | 
			
		||||
 | 
			
		||||
    protected processCandidates(
 | 
			
		||||
        candidates: CandidatesCollection, 
 | 
			
		||||
        allTokens: Token[], 
 | 
			
		||||
        candidates: CandidatesCollection,
 | 
			
		||||
        allTokens: Token[],
 | 
			
		||||
        caretTokenIndex: number
 | 
			
		||||
    ): Suggestions<Token> {
 | 
			
		||||
        return {
 | 
			
		||||
            syntax: [],
 | 
			
		||||
            keywords: []
 | 
			
		||||
        }
 | 
			
		||||
            keywords: [],
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1
									
								
								src/typings/index.d.ts
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								src/typings/index.d.ts
									
									
									
									
										vendored
									
									
								
							@ -1 +0,0 @@
 | 
			
		||||
declare type sql = string | string[]
 | 
			
		||||
@ -1,9 +1,9 @@
 | 
			
		||||
import { Token } from "antlr4ts";
 | 
			
		||||
import { CaretPosition } from "../../src/parser/common/basic-parser-types";
 | 
			
		||||
import { Token } from 'antlr4ts';
 | 
			
		||||
import { CaretPosition } from '../../src/parser/common/basic-parser-types';
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * find token index via caret position (cursor position)
 | 
			
		||||
 * @param caretPosition 
 | 
			
		||||
 * @param caretPosition
 | 
			
		||||
 * @param allTokens all the tokens
 | 
			
		||||
 * @returns caretTokenIndex
 | 
			
		||||
 */
 | 
			
		||||
@ -12,22 +12,23 @@ export function findCaretTokenIndex(caretPosition: CaretPosition, allTokens: Tok
 | 
			
		||||
    let left = 0;
 | 
			
		||||
    let right = allTokens.length - 1;
 | 
			
		||||
 | 
			
		||||
    while(left <= right) {
 | 
			
		||||
    while (left <= right) {
 | 
			
		||||
        const mid = left + ((right - left) >> 1);
 | 
			
		||||
        const token = allTokens[mid];
 | 
			
		||||
        if (token.line > caretLine || (
 | 
			
		||||
            token.line === caretLine
 | 
			
		||||
            && token.charPositionInLine + 1 >= caretCol
 | 
			
		||||
        )) {
 | 
			
		||||
        if (
 | 
			
		||||
            token.line > caretLine ||
 | 
			
		||||
            (token.line === caretLine && token.charPositionInLine + 1 >= caretCol)
 | 
			
		||||
        ) {
 | 
			
		||||
            right = mid - 1;
 | 
			
		||||
        } else if (token.line < caretLine || (
 | 
			
		||||
            token.line === caretLine
 | 
			
		||||
            && token.charPositionInLine + token.text.length + 1 < caretCol
 | 
			
		||||
        )) {
 | 
			
		||||
        } else if (
 | 
			
		||||
            token.line < caretLine ||
 | 
			
		||||
            (token.line === caretLine &&
 | 
			
		||||
                token.charPositionInLine + token.text.length + 1 < caretCol)
 | 
			
		||||
        ) {
 | 
			
		||||
            left = mid + 1;
 | 
			
		||||
        } else {
 | 
			
		||||
            return allTokens[mid].tokenIndex
 | 
			
		||||
            return allTokens[mid].tokenIndex;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    return null;
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -1,4 +1,3 @@
 | 
			
		||||
 | 
			
		||||
import { TokenType, Token, TokenReg } from './token';
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
@ -15,7 +14,6 @@ function lexer(input: string): Token[] {
 | 
			
		||||
    /**
 | 
			
		||||
     * 提取 TokenType
 | 
			
		||||
     */
 | 
			
		||||
    // eslint-disable-next-line
 | 
			
		||||
    const extract = (currentChar: string, validator: RegExp, TokenType: TokenType): Token => {
 | 
			
		||||
        let value = '';
 | 
			
		||||
        const start = current;
 | 
			
		||||
@ -95,19 +93,16 @@ function lexer(input: string): Token[] {
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (TokenReg.BackQuotation.test(char)) {
 | 
			
		||||
            // eslint-disable-next-line
 | 
			
		||||
            matchQuotation(char, TokenReg.BackQuotation, TokenType.BackQuotation);
 | 
			
		||||
            continue;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (TokenReg.SingleQuotation.test(char)) {
 | 
			
		||||
            // eslint-disable-next-line
 | 
			
		||||
            matchQuotation(char, TokenReg.SingleQuotation, TokenType.SingleQuotation);
 | 
			
		||||
            continue;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (TokenReg.DoubleQuotation.test(char)) {
 | 
			
		||||
            // eslint-disable-next-line
 | 
			
		||||
            matchQuotation(char, TokenReg.DoubleQuotation, TokenType.DoubleQuotation);
 | 
			
		||||
            continue;
 | 
			
		||||
        }
 | 
			
		||||
@ -162,7 +157,7 @@ function lexer(input: string): Token[] {
 | 
			
		||||
            const newToken = extract(
 | 
			
		||||
                char,
 | 
			
		||||
                TokenReg.StatementTerminator,
 | 
			
		||||
                TokenType.StatementTerminator,
 | 
			
		||||
                TokenType.StatementTerminator
 | 
			
		||||
            );
 | 
			
		||||
            tokens.push(newToken);
 | 
			
		||||
            continue;
 | 
			
		||||
@ -211,8 +206,4 @@ function cleanSql(sql: string) {
 | 
			
		||||
    resultSql += sql.slice(startIndex);
 | 
			
		||||
    return resultSql;
 | 
			
		||||
}
 | 
			
		||||
export {
 | 
			
		||||
    cleanSql,
 | 
			
		||||
    splitSql,
 | 
			
		||||
    lexer,
 | 
			
		||||
};
 | 
			
		||||
export { cleanSql, splitSql, lexer };
 | 
			
		||||
 | 
			
		||||
@ -28,14 +28,14 @@ export enum TokenType {
 | 
			
		||||
     */
 | 
			
		||||
    RightSmallBracket = 'RightSmallBracket',
 | 
			
		||||
    Comma = 'Comma',
 | 
			
		||||
    FunctionArguments = 'FunctionArguments'
 | 
			
		||||
    FunctionArguments = 'FunctionArguments',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Token object
 | 
			
		||||
 */
 | 
			
		||||
export interface Token {
 | 
			
		||||
    type: TokenType,
 | 
			
		||||
    type: TokenType;
 | 
			
		||||
    value: string;
 | 
			
		||||
    start: number;
 | 
			
		||||
    end: number;
 | 
			
		||||
@ -44,8 +44,8 @@ export interface Token {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
*  Token recognition rules
 | 
			
		||||
*/
 | 
			
		||||
 *  Token recognition rules
 | 
			
		||||
 */
 | 
			
		||||
export const TokenReg = {
 | 
			
		||||
    [TokenType.StatementTerminator]: /[;]/,
 | 
			
		||||
    [TokenType.SingleQuotation]: /['|\']/,
 | 
			
		||||
 | 
			
		||||
		Reference in New Issue
	
	Block a user