refactor: optimize basic parser interface (#121)

* feat: set all internal method to protected

* feat: public createLexer and createParser method

* feat: remove error listeners of parser when get suggestion
This commit is contained in:
Hayden 2023-06-16 16:14:53 +08:00 committed by GitHub
parent 8097d47541
commit 9cf79064de
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 88 additions and 65 deletions

View File

@ -49,31 +49,31 @@ export default abstract class BasicParser<
protected _parsedInput: string = null; protected _parsedInput: string = null;
/** /**
* preferredRules for antlr4-c3 * PreferredRules for antlr4-c3
*/ */
public abstract preferredRules: Set<number>; protected abstract preferredRules: Set<number>;
/** /**
* Create antrl4 Lexer instance * Create a antrl4 Lexer instance
* @param input source string * @param input source string
*/ */
public abstract createLexerFormCharStream(charStreams: CodePointCharStream): L; protected abstract createLexerFormCharStream(charStreams: CodePointCharStream): L;
/** /**
* Create Parser by CommonTokenStream * Create Parser by CommonTokenStream
* @param tokenStream CommonTokenStream * @param tokenStream CommonTokenStream
*/ */
public abstract createParserFromTokenStream(tokenStream: CommonTokenStream): P; protected abstract createParserFromTokenStream(tokenStream: CommonTokenStream): P;
/** /**
* convert candidates to suggestions * Convert candidates to suggestions
* @param candidates candidate list * @param candidates candidate list
* @param allTokens all tokens from input * @param allTokens all tokens from input
* @param caretTokenIndex tokenIndex of caretPosition * @param caretTokenIndex tokenIndex of caretPosition
* @param tokenIndexOffset offset of the tokenIndex in the candidates * @param tokenIndexOffset offset of the tokenIndex in the candidates
* compared to the tokenIndex in allTokens * compared to the tokenIndex in allTokens
*/ */
public abstract processCandidates( protected abstract processCandidates(
candidates: CandidatesCollection, candidates: CandidatesCollection,
allTokens: Token[], allTokens: Token[],
caretTokenIndex: number, caretTokenIndex: number,
@ -81,14 +81,55 @@ export default abstract class BasicParser<
): Suggestions<Token>; ): Suggestions<Token>;
/** /**
* splitListener instance Getter * Get splitListener instance.
*/ */
protected abstract get splitListener (): SplitListener; protected abstract get splitListener (): SplitListener;
/** /**
* If it is invoked multiple times in a row and the input parameters is the same * Create an anltr4 lexer from input.
* this method returns the parsing result directly for the first time * @param input string
* unless the errorListener parameter is passed */
public createLexer(input: string) {
const charStreams = CharStreams.fromString(input.toUpperCase());
const lexer = this.createLexerFormCharStream(charStreams);
return lexer;
}
/**
* Create an anltr4 parser from input.
* @param input string
*/
public createParser(input: string) {
const lexer = this.createLexer(input);
const tokenStream = new CommonTokenStream(lexer);
const parser = this.createParserFromTokenStream(tokenStream);
return parser;
}
/**
* Create an anltr4 parser from input.
* And the instances will be cache.
* @param input string
*/
protected createParserWithCache(input: string): P {
this._parserTree = null;
this._charStreams = CharStreams.fromString(input.toUpperCase());
this._lexer = this.createLexerFormCharStream(this._charStreams);
this._tokenStream = new CommonTokenStream(this._lexer);
this._tokenStream.fill();
this._parser = this.createParserFromTokenStream(this._tokenStream);
this._parser.buildParseTree = true;
return this._parser
}
/**
* If it is invoked multiple times in a row and the input parameters is the same,
* this method returns the parsing result directly for the first time
* unless the errorListener parameter is passed.
* @param input source string * @param input source string
* @param errorListener listen errors * @param errorListener listen errors
* @returns parserTree * @returns parserTree
@ -97,12 +138,12 @@ export default abstract class BasicParser<
input: string, input: string,
errorListener?: ErrorHandler<any> errorListener?: ErrorHandler<any>
) { ) {
// Avoid parsing the same input repeatedly // Avoid parsing the same input repeatedly.
if(this._parsedInput === input && !errorListener) { if(this._parsedInput === input && !errorListener) {
return; return;
} }
const parser = this.createParser(input); const parser = this.createParserWithCache(input);
this._parsedInput = input; this._parsedInput = input;
parser.removeErrorListeners(); parser.removeErrorListeners();
@ -119,7 +160,7 @@ export default abstract class BasicParser<
} }
/** /**
* validate input string and return syntax errors * Validate input string and return syntax errors if exists.
* @param input source string * @param input source string
* @returns syntax errors * @returns syntax errors
*/ */
@ -142,25 +183,6 @@ export default abstract class BasicParser<
} }
return allTokens return allTokens
}; };
/**
* Get Parser instance by input string
* @param input string
*/
public createParser(input: string): P {
this._parserTree = null;
this._charStreams = CharStreams.fromString(input.toUpperCase());
this._lexer = this.createLexerFormCharStream(this._charStreams);
this._tokenStream = new CommonTokenStream(this._lexer);
this._tokenStream.fill();
this._parser = this.createParserFromTokenStream(this._tokenStream);
this._parser.buildParseTree = true;
return this._parser
}
/** /**
* It convert tree to string, it's convenient to use in unit test. * It convert tree to string, it's convenient to use in unit test.
* @param string input * @param string input
@ -187,10 +209,11 @@ export default abstract class BasicParser<
} }
/** /**
* split input into statements * Split input into statements.
* If exist syntax error it will return null.
* @param input source string * @param input source string
*/ */
public splitSQL(input): TextSlice[] { public splitSQLByStatement(input): TextSlice[] {
this.parse(input); this.parse(input);
const splitListener = this.splitListener; const splitListener = this.splitListener;
this.listen(splitListener, this._parserTree); this.listen(splitListener, this._parserTree);
@ -265,11 +288,11 @@ export default abstract class BasicParser<
caretTokenIndex = caretTokenIndex - tokenIndexOffset; caretTokenIndex = caretTokenIndex - tokenIndexOffset;
const inputSlice = input.slice(lastStatementToken.startIndex); const inputSlice = input.slice(lastStatementToken.startIndex);
const charStreams = CharStreams.fromString(inputSlice.toUpperCase()); const lexer = this.createLexer(inputSlice);
const lexer = this.createLexerFormCharStream(charStreams);
const tokenStream = new CommonTokenStream(lexer); const tokenStream = new CommonTokenStream(lexer);
tokenStream.fill(); tokenStream.fill();
const parser = this.createParserFromTokenStream(tokenStream); const parser = this.createParserFromTokenStream(tokenStream);
parser.removeErrorListeners();
parser.buildParseTree = true; parser.buildParseTree = true;
sqlParserIns = parser; sqlParserIns = parser;
c3Context = parser.program(); c3Context = parser.program();

View File

@ -12,17 +12,17 @@ import { SyntaxContextType, Suggestions, SyntaxSuggestion } from './common/basic
import BasicParser from './common/basicParser'; import BasicParser from './common/basicParser';
export default class FlinkSQL extends BasicParser<FlinkSqlLexer, ProgramContext, FlinkSqlParser> { export default class FlinkSQL extends BasicParser<FlinkSqlLexer, ProgramContext, FlinkSqlParser> {
public createLexerFormCharStream(charStreams) { protected createLexerFormCharStream(charStreams) {
const lexer = new FlinkSqlLexer(charStreams); const lexer = new FlinkSqlLexer(charStreams);
return lexer; return lexer;
} }
public createParserFromTokenStream(tokenStream) { protected createParserFromTokenStream(tokenStream) {
const parser = new FlinkSqlParser(tokenStream); const parser = new FlinkSqlParser(tokenStream);
return parser; return parser;
} }
public preferredRules = new Set([ protected preferredRules = new Set([
FlinkSqlParser.RULE_tablePath, // table name >> select / insert ... FlinkSqlParser.RULE_tablePath, // table name >> select / insert ...
FlinkSqlParser.RULE_tablePathCreate, // table name >> create FlinkSqlParser.RULE_tablePathCreate, // table name >> create
FlinkSqlParser.RULE_databasePath, // database name >> show FlinkSqlParser.RULE_databasePath, // database name >> show
@ -34,7 +34,7 @@ export default class FlinkSQL extends BasicParser<FlinkSqlLexer, ProgramContext,
return new FlinkSqlSplitListener(); return new FlinkSqlSplitListener();
} }
public processCandidates( protected processCandidates(
candidates: CandidatesCollection, candidates: CandidatesCollection,
allTokens: Token[], allTokens: Token[],
caretTokenIndex: number, caretTokenIndex: number,

View File

@ -6,22 +6,22 @@ import BasicParser from './common/basicParser';
import { Suggestions } from './common/basic-parser-types'; import { Suggestions } from './common/basic-parser-types';
export default class GenericSQL extends BasicParser<SqlLexer, ProgramContext, SqlParser> { export default class GenericSQL extends BasicParser<SqlLexer, ProgramContext, SqlParser> {
public createLexerFormCharStream(charStreams): SqlLexer { protected createLexerFormCharStream(charStreams): SqlLexer {
const lexer = new SqlLexer(charStreams); const lexer = new SqlLexer(charStreams);
return lexer; return lexer;
} }
public createParserFromTokenStream(tokenStream): SqlParser { protected createParserFromTokenStream(tokenStream): SqlParser {
return new SqlParser(tokenStream); return new SqlParser(tokenStream);
} }
public preferredRules: Set<number> = new Set(); protected preferredRules: Set<number> = new Set();
protected get splitListener () { protected get splitListener () {
return null as any; return null as any;
} }
public processCandidates( protected processCandidates(
candidates: CandidatesCollection, candidates: CandidatesCollection,
allTokens: Token[], allTokens: Token[],
caretTokenIndex: number caretTokenIndex: number

View File

@ -7,12 +7,12 @@ import { Suggestions } from './common/basic-parser-types';
export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, HiveSql> { export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, HiveSql> {
public createLexerFormCharStream(charStreams) { protected createLexerFormCharStream(charStreams) {
const lexer = new HiveSqlLexer(charStreams); const lexer = new HiveSqlLexer(charStreams);
return lexer; return lexer;
} }
public createParserFromTokenStream(tokenStream) { protected createParserFromTokenStream(tokenStream) {
return new HiveSql(tokenStream); return new HiveSql(tokenStream);
} }
@ -20,9 +20,9 @@ export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, H
return null as any; return null as any;
} }
public preferredRules: Set<number> = new Set(); protected preferredRules: Set<number> = new Set();
public processCandidates( protected processCandidates(
candidates: CandidatesCollection, candidates: CandidatesCollection,
allTokens: Token[], allTokens: Token[],
caretTokenIndex: number caretTokenIndex: number

View File

@ -6,22 +6,22 @@ import BasicParser from './common/basicParser';
import { Suggestions } from './common/basic-parser-types'; import { Suggestions } from './common/basic-parser-types';
export default class PostgresSQL extends BasicParser<PostgreSQLLexer, ProgramContext, PostgreSQLParser> { export default class PostgresSQL extends BasicParser<PostgreSQLLexer, ProgramContext, PostgreSQLParser> {
public createLexerFormCharStream(charStreams) { protected createLexerFormCharStream(charStreams) {
const lexer = new PostgreSQLLexer(charStreams); const lexer = new PostgreSQLLexer(charStreams);
return lexer; return lexer;
} }
public createParserFromTokenStream(tokenStream) { protected createParserFromTokenStream(tokenStream) {
return new PostgreSQLParser(tokenStream); return new PostgreSQLParser(tokenStream);
} }
public preferredRules: Set<number> = new Set(); protected preferredRules: Set<number> = new Set();
protected get splitListener () { protected get splitListener () {
return null as any; return null as any;
} }
public processCandidates( protected processCandidates(
candidates: CandidatesCollection, candidates: CandidatesCollection,
allTokens: Token[], allTokens: Token[],
caretTokenIndex: number caretTokenIndex: number

View File

@ -6,22 +6,22 @@ import BasicParser from './common/basicParser';
import { Suggestions } from './common/basic-parser-types'; import { Suggestions } from './common/basic-parser-types';
export default class PLSQL extends BasicParser<PlSqlLexer, ProgramContext, PlSqlParser> { export default class PLSQL extends BasicParser<PlSqlLexer, ProgramContext, PlSqlParser> {
public createLexerFormCharStream(charStreams) { protected createLexerFormCharStream(charStreams) {
const lexer = new PlSqlLexer(charStreams); const lexer = new PlSqlLexer(charStreams);
return lexer; return lexer;
} }
public createParserFromTokenStream(tokenStream) { protected createParserFromTokenStream(tokenStream) {
return new PlSqlParser(tokenStream); return new PlSqlParser(tokenStream);
} }
public preferredRules: Set<number> = new Set(); protected preferredRules: Set<number> = new Set();
protected get splitListener () { protected get splitListener () {
return null as any; return null as any;
} }
public processCandidates( protected processCandidates(
candidates: CandidatesCollection, candidates: CandidatesCollection,
allTokens: Token[], allTokens: Token[],
caretTokenIndex: number caretTokenIndex: number

View File

@ -6,22 +6,22 @@ import BasicParser from './common/basicParser';
import { Suggestions } from './common/basic-parser-types'; import { Suggestions } from './common/basic-parser-types';
export default class SparkSQL extends BasicParser<SparkSqlLexer, ProgramContext, SparkSqlParser> { export default class SparkSQL extends BasicParser<SparkSqlLexer, ProgramContext, SparkSqlParser> {
public createLexerFormCharStream(charStreams) { protected createLexerFormCharStream(charStreams) {
const lexer = new SparkSqlLexer(charStreams); const lexer = new SparkSqlLexer(charStreams);
return lexer; return lexer;
} }
public createParserFromTokenStream(tokenStream) { protected createParserFromTokenStream(tokenStream) {
return new SparkSqlParser(tokenStream); return new SparkSqlParser(tokenStream);
} }
public preferredRules: Set<number> = new Set(); protected preferredRules: Set<number> = new Set();
protected get splitListener () { protected get splitListener () {
return null as any; return null as any;
} }
public processCandidates( protected processCandidates(
candidates: CandidatesCollection, candidates: CandidatesCollection,
allTokens: Token[], allTokens: Token[],
caretTokenIndex: number caretTokenIndex: number

View File

@ -6,12 +6,12 @@ import BasicParser from './common/basicParser';
import { Suggestions } from './common/basic-parser-types'; import { Suggestions } from './common/basic-parser-types';
export default class TrinoSQL extends BasicParser<TrinoSqlLexer, ProgramContext, TrinoSqlParser> { export default class TrinoSQL extends BasicParser<TrinoSqlLexer, ProgramContext, TrinoSqlParser> {
public createLexerFormCharStream(charStreams) { protected createLexerFormCharStream(charStreams) {
const lexer = new TrinoSqlLexer(charStreams); const lexer = new TrinoSqlLexer(charStreams);
return lexer; return lexer;
} }
public createParserFromTokenStream(tokenStream) { protected createParserFromTokenStream(tokenStream) {
const parser = new TrinoSqlParser(tokenStream); const parser = new TrinoSqlParser(tokenStream);
return parser; return parser;
} }
@ -20,9 +20,9 @@ export default class TrinoSQL extends BasicParser<TrinoSqlLexer, ProgramContext,
return null as any; return null as any;
} }
public preferredRules: Set<number> = new Set(); protected preferredRules: Set<number> = new Set();
public processCandidates( protected processCandidates(
candidates: CandidatesCollection, candidates: CandidatesCollection,
allTokens: Token[], allTokens: Token[],
caretTokenIndex: number caretTokenIndex: number