Fix/basic suggestion (#119)

* fix: correct suggetion logic in multiple sql case

* test: add multiple sql test case of suggestion

* feat: export SyntaxContextType as enum
This commit is contained in:
Hayden 2023-06-12 15:21:27 +08:00 committed by GitHub
parent 1b02ff5d75
commit e34a9f6128
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 42 additions and 11 deletions

View File

@ -14,6 +14,8 @@ export * from './lib/pgsql/PostgreSQLParserListener';
export * from './lib/pgsql/PostgreSQLParserVisitor'; export * from './lib/pgsql/PostgreSQLParserVisitor';
export * from './lib/trinosql/TrinoSqlListener'; export * from './lib/trinosql/TrinoSqlListener';
export * from './lib/trinosql/TrinoSqlVisitor'; export * from './lib/trinosql/TrinoSqlVisitor';
export { SyntaxContextType } from './parser/common/basic-parser-types'
export type * from './parser/common/basic-parser-types'; export type * from './parser/common/basic-parser-types';
export type { SyntaxError, ParserError } from './parser/common/parserErrorListener'; export type { SyntaxError, ParserError } from './parser/common/parserErrorListener';

View File

@ -224,7 +224,7 @@ export default abstract class BasicParser<
this.parse(input); this.parse(input);
let sqlParserIns = this._parser; let sqlParserIns = this._parser;
let allTokens = this.getAllTokens(input); const allTokens = this.getAllTokens(input);
let caretTokenIndex = findCaretTokenIndex(caretPosition, allTokens); let caretTokenIndex = findCaretTokenIndex(caretPosition, allTokens);
let c3Context: ParserRuleContext = this._parserTree; let c3Context: ParserRuleContext = this._parserTree;
let tokenIndexOffset: number = 0; let tokenIndexOffset: number = 0;
@ -238,7 +238,7 @@ export default abstract class BasicParser<
this.listen(splitListener, this._parserTree); this.listen(splitListener, this._parserTree);
// If there are multiple statements. // If there are multiple statements.
if (splitListener.statementsContext.length) { if (splitListener.statementsContext.length > 1) {
// find statement rule context where caretPosition is located. // find statement rule context where caretPosition is located.
const caretStatementContext = splitListener?.statementsContext.find(ctx => { const caretStatementContext = splitListener?.statementsContext.find(ctx => {
return caretTokenIndex <= ctx.stop?.tokenIndex && caretTokenIndex >= ctx.start.tokenIndex; return caretTokenIndex <= ctx.stop?.tokenIndex && caretTokenIndex >= ctx.start.tokenIndex;
@ -247,12 +247,9 @@ export default abstract class BasicParser<
if(caretStatementContext) { if(caretStatementContext) {
c3Context = caretStatementContext c3Context = caretStatementContext
} else { } else {
const lastIndex = splitListener.statementsContext.length > 1
? 2
: 1;
const lastStatementToken= splitListener const lastStatementToken= splitListener
.statementsContext[splitListener?.statementsContext.length - lastIndex] .statementsContext[splitListener?.statementsContext.length - 1]
.stop; .start;
/** /**
* If caretStatementContext is not found and it follows all statements. * If caretStatementContext is not found and it follows all statements.
* Reparses part of the input following the penultimate statement. * Reparses part of the input following the penultimate statement.
@ -263,11 +260,11 @@ export default abstract class BasicParser<
* Save offset of the tokenIndex in the partInput * Save offset of the tokenIndex in the partInput
* compared to the tokenIndex in the whole input * compared to the tokenIndex in the whole input
*/ */
tokenIndexOffset = lastStatementToken?.tokenIndex + 1; tokenIndexOffset = lastStatementToken?.tokenIndex;
// Correct caretTokenIndex // Correct caretTokenIndex
caretTokenIndex = caretTokenIndex - tokenIndexOffset; caretTokenIndex = caretTokenIndex - tokenIndexOffset;
const inputSlice = input.slice(lastStatementToken.stopIndex + 1); const inputSlice = input.slice(lastStatementToken.startIndex);
const charStreams = CharStreams.fromString(inputSlice.toUpperCase()); const charStreams = CharStreams.fromString(inputSlice.toUpperCase());
const lexer = this.createLexerFormCharStream(charStreams); const lexer = this.createLexerFormCharStream(charStreams);
const tokenStream = new CommonTokenStream(lexer); const tokenStream = new CommonTokenStream(lexer);

View File

@ -46,7 +46,7 @@ export default class FlinkSQL extends BasicParser<FlinkSqlLexer, ProgramContext,
for (let candidate of candidates.rules) { for (let candidate of candidates.rules) {
const [ruleType, candidateRule] = candidate; const [ruleType, candidateRule] = candidate;
const startTokenIndex = candidateRule.startTokenIndex + tokenIndexOffset; const startTokenIndex = candidateRule.startTokenIndex + tokenIndexOffset;
const tokenRanges = allTokens.slice(startTokenIndex, caretTokenIndex + 1); const tokenRanges = allTokens.slice(startTokenIndex, caretTokenIndex + tokenIndexOffset + 1);
let syntaxContextType: SyntaxContextType; let syntaxContextType: SyntaxContextType;
switch (ruleType) { switch (ruleType) {

View File

@ -0,0 +1,19 @@
CREATE TABLE orders (
order_uid BIGINT,
product_id BIGINT,
price DECIMAL(32, 2),
order_time TIMESTAMP(3)
) WITH (
'connector' = 'datagen'
);
CREATE TABLE orders (
order_uid BIGINT,
product_id BIGINT,
price DECIMAL(32, 2),
order_time TIMESTAMP(3)
) WITH (
'connector' = 'datagen'
);
use cat1.

View File

@ -4,6 +4,7 @@ import { CaretPosition, SyntaxContextType } from '../../../../src/parser/common/
import FlinkSQL from '../../../../src/parser/flinksql' import FlinkSQL from '../../../../src/parser/flinksql'
const syntaxSql = fs.readFileSync(path.join(__dirname, 'fixtures', 'syntaxSuggestion.sql'), 'utf-8'); const syntaxSql = fs.readFileSync(path.join(__dirname, 'fixtures', 'syntaxSuggestion.sql'), 'utf-8');
const multipleSql = fs.readFileSync(path.join(__dirname, 'fixtures', 'multipleSql.sql'), 'utf-8');
describe('Flink SQL Syntax Suggestion', () => { describe('Flink SQL Syntax Suggestion', () => {
const parser = new FlinkSQL(); const parser = new FlinkSQL();
@ -84,7 +85,19 @@ describe('Flink SQL Syntax Suggestion', () => {
expect(suggestion?.syntaxContextType === SyntaxContextType.DATABASE) expect(suggestion?.syntaxContextType === SyntaxContextType.DATABASE)
expect(suggestion?.wordRanges.map(token => token.text)) expect(suggestion?.wordRanges.map(token => token.text))
.toEqual([ 'cat', '.' ]) .toEqual([ 'cat', '.' ]);
})
test("Multiple SQL use database", () => {
const pos: CaretPosition = {
lineNumber: 19,
column: 10,
}
const suggestion = parser.getSuggestionAtCaretPosition(multipleSql, pos)?.syntax?.[0];
console.log(suggestion);
expect(suggestion?.syntaxContextType === SyntaxContextType.DATABASE);
expect(suggestion?.wordRanges.map(token => token.text))
.toEqual([ 'cat1', '.' ]);
}) })
}) })