refactor: optimize basic parser interface (#121)

* feat: set all internal method to protected

* feat: public createLexer and createParser method

* feat: remove error listeners of parser when get suggestion
This commit is contained in:
Hayden 2023-06-16 16:14:53 +08:00 committed by GitHub
parent 8097d47541
commit 9cf79064de
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 88 additions and 65 deletions

View File

@ -49,31 +49,31 @@ export default abstract class BasicParser<
protected _parsedInput: string = null;
/**
* preferredRules for antlr4-c3
* PreferredRules for antlr4-c3
*/
public abstract preferredRules: Set<number>;
protected abstract preferredRules: Set<number>;
/**
* Create antrl4 Lexer instance
* Create a antrl4 Lexer instance
* @param input source string
*/
public abstract createLexerFormCharStream(charStreams: CodePointCharStream): L;
protected abstract createLexerFormCharStream(charStreams: CodePointCharStream): L;
/**
* Create Parser by CommonTokenStream
* @param tokenStream CommonTokenStream
*/
public abstract createParserFromTokenStream(tokenStream: CommonTokenStream): P;
protected abstract createParserFromTokenStream(tokenStream: CommonTokenStream): P;
/**
* convert candidates to suggestions
* Convert candidates to suggestions
* @param candidates candidate list
* @param allTokens all tokens from input
* @param caretTokenIndex tokenIndex of caretPosition
* @param tokenIndexOffset offset of the tokenIndex in the candidates
* compared to the tokenIndex in allTokens
*/
public abstract processCandidates(
protected abstract processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number,
@ -81,14 +81,55 @@ export default abstract class BasicParser<
): Suggestions<Token>;
/**
* splitListener instance Getter
* Get splitListener instance.
*/
protected abstract get splitListener (): SplitListener;
/**
* If it is invoked multiple times in a row and the input parameters is the same
* this method returns the parsing result directly for the first time
* unless the errorListener parameter is passed
* Create an anltr4 lexer from input.
* @param input string
*/
public createLexer(input: string) {
const charStreams = CharStreams.fromString(input.toUpperCase());
const lexer = this.createLexerFormCharStream(charStreams);
return lexer;
}
/**
* Create an anltr4 parser from input.
* @param input string
*/
public createParser(input: string) {
const lexer = this.createLexer(input);
const tokenStream = new CommonTokenStream(lexer);
const parser = this.createParserFromTokenStream(tokenStream);
return parser;
}
/**
* Create an anltr4 parser from input.
* And the instances will be cache.
* @param input string
*/
protected createParserWithCache(input: string): P {
this._parserTree = null;
this._charStreams = CharStreams.fromString(input.toUpperCase());
this._lexer = this.createLexerFormCharStream(this._charStreams);
this._tokenStream = new CommonTokenStream(this._lexer);
this._tokenStream.fill();
this._parser = this.createParserFromTokenStream(this._tokenStream);
this._parser.buildParseTree = true;
return this._parser
}
/**
* If it is invoked multiple times in a row and the input parameters is the same,
* this method returns the parsing result directly for the first time
* unless the errorListener parameter is passed.
* @param input source string
* @param errorListener listen errors
* @returns parserTree
@ -97,12 +138,12 @@ export default abstract class BasicParser<
input: string,
errorListener?: ErrorHandler<any>
) {
// Avoid parsing the same input repeatedly
// Avoid parsing the same input repeatedly.
if(this._parsedInput === input && !errorListener) {
return;
}
const parser = this.createParser(input);
const parser = this.createParserWithCache(input);
this._parsedInput = input;
parser.removeErrorListeners();
@ -119,7 +160,7 @@ export default abstract class BasicParser<
}
/**
* validate input string and return syntax errors
* Validate input string and return syntax errors if exists.
* @param input source string
* @returns syntax errors
*/
@ -142,25 +183,6 @@ export default abstract class BasicParser<
}
return allTokens
};
/**
* Get Parser instance by input string
* @param input string
*/
public createParser(input: string): P {
this._parserTree = null;
this._charStreams = CharStreams.fromString(input.toUpperCase());
this._lexer = this.createLexerFormCharStream(this._charStreams);
this._tokenStream = new CommonTokenStream(this._lexer);
this._tokenStream.fill();
this._parser = this.createParserFromTokenStream(this._tokenStream);
this._parser.buildParseTree = true;
return this._parser
}
/**
* It convert tree to string, it's convenient to use in unit test.
* @param string input
@ -187,10 +209,11 @@ export default abstract class BasicParser<
}
/**
* split input into statements
* Split input into statements.
* If exist syntax error it will return null.
* @param input source string
*/
public splitSQL(input): TextSlice[] {
public splitSQLByStatement(input): TextSlice[] {
this.parse(input);
const splitListener = this.splitListener;
this.listen(splitListener, this._parserTree);
@ -265,11 +288,11 @@ export default abstract class BasicParser<
caretTokenIndex = caretTokenIndex - tokenIndexOffset;
const inputSlice = input.slice(lastStatementToken.startIndex);
const charStreams = CharStreams.fromString(inputSlice.toUpperCase());
const lexer = this.createLexerFormCharStream(charStreams);
const lexer = this.createLexer(inputSlice);
const tokenStream = new CommonTokenStream(lexer);
tokenStream.fill();
const parser = this.createParserFromTokenStream(tokenStream);
parser.removeErrorListeners();
parser.buildParseTree = true;
sqlParserIns = parser;
c3Context = parser.program();

View File

@ -12,17 +12,17 @@ import { SyntaxContextType, Suggestions, SyntaxSuggestion } from './common/basic
import BasicParser from './common/basicParser';
export default class FlinkSQL extends BasicParser<FlinkSqlLexer, ProgramContext, FlinkSqlParser> {
public createLexerFormCharStream(charStreams) {
protected createLexerFormCharStream(charStreams) {
const lexer = new FlinkSqlLexer(charStreams);
return lexer;
}
public createParserFromTokenStream(tokenStream) {
protected createParserFromTokenStream(tokenStream) {
const parser = new FlinkSqlParser(tokenStream);
return parser;
}
public preferredRules = new Set([
protected preferredRules = new Set([
FlinkSqlParser.RULE_tablePath, // table name >> select / insert ...
FlinkSqlParser.RULE_tablePathCreate, // table name >> create
FlinkSqlParser.RULE_databasePath, // database name >> show
@ -34,7 +34,7 @@ export default class FlinkSQL extends BasicParser<FlinkSqlLexer, ProgramContext,
return new FlinkSqlSplitListener();
}
public processCandidates(
protected processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number,

View File

@ -6,22 +6,22 @@ import BasicParser from './common/basicParser';
import { Suggestions } from './common/basic-parser-types';
export default class GenericSQL extends BasicParser<SqlLexer, ProgramContext, SqlParser> {
public createLexerFormCharStream(charStreams): SqlLexer {
protected createLexerFormCharStream(charStreams): SqlLexer {
const lexer = new SqlLexer(charStreams);
return lexer;
}
public createParserFromTokenStream(tokenStream): SqlParser {
protected createParserFromTokenStream(tokenStream): SqlParser {
return new SqlParser(tokenStream);
}
public preferredRules: Set<number> = new Set();
protected preferredRules: Set<number> = new Set();
protected get splitListener () {
return null as any;
}
public processCandidates(
protected processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number

View File

@ -7,12 +7,12 @@ import { Suggestions } from './common/basic-parser-types';
export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, HiveSql> {
public createLexerFormCharStream(charStreams) {
protected createLexerFormCharStream(charStreams) {
const lexer = new HiveSqlLexer(charStreams);
return lexer;
}
public createParserFromTokenStream(tokenStream) {
protected createParserFromTokenStream(tokenStream) {
return new HiveSql(tokenStream);
}
@ -20,9 +20,9 @@ export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, H
return null as any;
}
public preferredRules: Set<number> = new Set();
protected preferredRules: Set<number> = new Set();
public processCandidates(
protected processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number

View File

@ -6,22 +6,22 @@ import BasicParser from './common/basicParser';
import { Suggestions } from './common/basic-parser-types';
export default class PostgresSQL extends BasicParser<PostgreSQLLexer, ProgramContext, PostgreSQLParser> {
public createLexerFormCharStream(charStreams) {
protected createLexerFormCharStream(charStreams) {
const lexer = new PostgreSQLLexer(charStreams);
return lexer;
}
public createParserFromTokenStream(tokenStream) {
protected createParserFromTokenStream(tokenStream) {
return new PostgreSQLParser(tokenStream);
}
public preferredRules: Set<number> = new Set();
protected preferredRules: Set<number> = new Set();
protected get splitListener () {
return null as any;
}
public processCandidates(
protected processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number

View File

@ -6,22 +6,22 @@ import BasicParser from './common/basicParser';
import { Suggestions } from './common/basic-parser-types';
export default class PLSQL extends BasicParser<PlSqlLexer, ProgramContext, PlSqlParser> {
public createLexerFormCharStream(charStreams) {
protected createLexerFormCharStream(charStreams) {
const lexer = new PlSqlLexer(charStreams);
return lexer;
}
public createParserFromTokenStream(tokenStream) {
protected createParserFromTokenStream(tokenStream) {
return new PlSqlParser(tokenStream);
}
public preferredRules: Set<number> = new Set();
protected preferredRules: Set<number> = new Set();
protected get splitListener () {
return null as any;
}
public processCandidates(
protected processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number

View File

@ -6,22 +6,22 @@ import BasicParser from './common/basicParser';
import { Suggestions } from './common/basic-parser-types';
export default class SparkSQL extends BasicParser<SparkSqlLexer, ProgramContext, SparkSqlParser> {
public createLexerFormCharStream(charStreams) {
protected createLexerFormCharStream(charStreams) {
const lexer = new SparkSqlLexer(charStreams);
return lexer;
}
public createParserFromTokenStream(tokenStream) {
protected createParserFromTokenStream(tokenStream) {
return new SparkSqlParser(tokenStream);
}
public preferredRules: Set<number> = new Set();
protected preferredRules: Set<number> = new Set();
protected get splitListener () {
return null as any;
}
public processCandidates(
protected processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number

View File

@ -6,12 +6,12 @@ import BasicParser from './common/basicParser';
import { Suggestions } from './common/basic-parser-types';
export default class TrinoSQL extends BasicParser<TrinoSqlLexer, ProgramContext, TrinoSqlParser> {
public createLexerFormCharStream(charStreams) {
protected createLexerFormCharStream(charStreams) {
const lexer = new TrinoSqlLexer(charStreams);
return lexer;
}
public createParserFromTokenStream(tokenStream) {
protected createParserFromTokenStream(tokenStream) {
const parser = new TrinoSqlParser(tokenStream);
return parser;
}
@ -20,9 +20,9 @@ export default class TrinoSQL extends BasicParser<TrinoSqlLexer, ProgramContext,
return null as any;
}
public preferredRules: Set<number> = new Set();
protected preferredRules: Set<number> = new Set();
public processCandidates(
protected processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number