Fix/basic suggestion (#119)
* fix: correct suggetion logic in multiple sql case * test: add multiple sql test case of suggestion * feat: export SyntaxContextType as enum
This commit is contained in:
parent
1b02ff5d75
commit
e34a9f6128
@ -14,6 +14,8 @@ export * from './lib/pgsql/PostgreSQLParserListener';
|
||||
export * from './lib/pgsql/PostgreSQLParserVisitor';
|
||||
export * from './lib/trinosql/TrinoSqlListener';
|
||||
export * from './lib/trinosql/TrinoSqlVisitor';
|
||||
export { SyntaxContextType } from './parser/common/basic-parser-types'
|
||||
|
||||
|
||||
export type * from './parser/common/basic-parser-types';
|
||||
export type { SyntaxError, ParserError } from './parser/common/parserErrorListener';
|
||||
|
@ -224,7 +224,7 @@ export default abstract class BasicParser<
|
||||
|
||||
this.parse(input);
|
||||
let sqlParserIns = this._parser;
|
||||
let allTokens = this.getAllTokens(input);
|
||||
const allTokens = this.getAllTokens(input);
|
||||
let caretTokenIndex = findCaretTokenIndex(caretPosition, allTokens);
|
||||
let c3Context: ParserRuleContext = this._parserTree;
|
||||
let tokenIndexOffset: number = 0;
|
||||
@ -238,7 +238,7 @@ export default abstract class BasicParser<
|
||||
this.listen(splitListener, this._parserTree);
|
||||
|
||||
// If there are multiple statements.
|
||||
if (splitListener.statementsContext.length) {
|
||||
if (splitListener.statementsContext.length > 1) {
|
||||
// find statement rule context where caretPosition is located.
|
||||
const caretStatementContext = splitListener?.statementsContext.find(ctx => {
|
||||
return caretTokenIndex <= ctx.stop?.tokenIndex && caretTokenIndex >= ctx.start.tokenIndex;
|
||||
@ -247,12 +247,9 @@ export default abstract class BasicParser<
|
||||
if(caretStatementContext) {
|
||||
c3Context = caretStatementContext
|
||||
} else {
|
||||
const lastIndex = splitListener.statementsContext.length > 1
|
||||
? 2
|
||||
: 1;
|
||||
const lastStatementToken= splitListener
|
||||
.statementsContext[splitListener?.statementsContext.length - lastIndex]
|
||||
.stop;
|
||||
.statementsContext[splitListener?.statementsContext.length - 1]
|
||||
.start;
|
||||
/**
|
||||
* If caretStatementContext is not found and it follows all statements.
|
||||
* Reparses part of the input following the penultimate statement.
|
||||
@ -263,11 +260,11 @@ export default abstract class BasicParser<
|
||||
* Save offset of the tokenIndex in the partInput
|
||||
* compared to the tokenIndex in the whole input
|
||||
*/
|
||||
tokenIndexOffset = lastStatementToken?.tokenIndex + 1;
|
||||
tokenIndexOffset = lastStatementToken?.tokenIndex;
|
||||
// Correct caretTokenIndex
|
||||
caretTokenIndex = caretTokenIndex - tokenIndexOffset;
|
||||
|
||||
const inputSlice = input.slice(lastStatementToken.stopIndex + 1);
|
||||
const inputSlice = input.slice(lastStatementToken.startIndex);
|
||||
const charStreams = CharStreams.fromString(inputSlice.toUpperCase());
|
||||
const lexer = this.createLexerFormCharStream(charStreams);
|
||||
const tokenStream = new CommonTokenStream(lexer);
|
||||
|
@ -46,7 +46,7 @@ export default class FlinkSQL extends BasicParser<FlinkSqlLexer, ProgramContext,
|
||||
for (let candidate of candidates.rules) {
|
||||
const [ruleType, candidateRule] = candidate;
|
||||
const startTokenIndex = candidateRule.startTokenIndex + tokenIndexOffset;
|
||||
const tokenRanges = allTokens.slice(startTokenIndex, caretTokenIndex + 1);
|
||||
const tokenRanges = allTokens.slice(startTokenIndex, caretTokenIndex + tokenIndexOffset + 1);
|
||||
|
||||
let syntaxContextType: SyntaxContextType;
|
||||
switch (ruleType) {
|
||||
|
19
test/parser/flinksql/suggestion/fixtures/multipleSql.sql
Normal file
19
test/parser/flinksql/suggestion/fixtures/multipleSql.sql
Normal file
@ -0,0 +1,19 @@
|
||||
CREATE TABLE orders (
|
||||
order_uid BIGINT,
|
||||
product_id BIGINT,
|
||||
price DECIMAL(32, 2),
|
||||
order_time TIMESTAMP(3)
|
||||
) WITH (
|
||||
'connector' = 'datagen'
|
||||
);
|
||||
|
||||
CREATE TABLE orders (
|
||||
order_uid BIGINT,
|
||||
product_id BIGINT,
|
||||
price DECIMAL(32, 2),
|
||||
order_time TIMESTAMP(3)
|
||||
) WITH (
|
||||
'connector' = 'datagen'
|
||||
);
|
||||
|
||||
use cat1.
|
@ -4,6 +4,7 @@ import { CaretPosition, SyntaxContextType } from '../../../../src/parser/common/
|
||||
import FlinkSQL from '../../../../src/parser/flinksql'
|
||||
|
||||
const syntaxSql = fs.readFileSync(path.join(__dirname, 'fixtures', 'syntaxSuggestion.sql'), 'utf-8');
|
||||
const multipleSql = fs.readFileSync(path.join(__dirname, 'fixtures', 'multipleSql.sql'), 'utf-8');
|
||||
|
||||
describe('Flink SQL Syntax Suggestion', () => {
|
||||
const parser = new FlinkSQL();
|
||||
@ -84,7 +85,19 @@ describe('Flink SQL Syntax Suggestion', () => {
|
||||
|
||||
expect(suggestion?.syntaxContextType === SyntaxContextType.DATABASE)
|
||||
expect(suggestion?.wordRanges.map(token => token.text))
|
||||
.toEqual([ 'cat', '.' ])
|
||||
.toEqual([ 'cat', '.' ]);
|
||||
})
|
||||
|
||||
test("Multiple SQL use database", () => {
|
||||
const pos: CaretPosition = {
|
||||
lineNumber: 19,
|
||||
column: 10,
|
||||
}
|
||||
const suggestion = parser.getSuggestionAtCaretPosition(multipleSql, pos)?.syntax?.[0];
|
||||
console.log(suggestion);
|
||||
expect(suggestion?.syntaxContextType === SyntaxContextType.DATABASE);
|
||||
expect(suggestion?.wordRanges.map(token => token.text))
|
||||
.toEqual([ 'cat1', '.' ]);
|
||||
})
|
||||
|
||||
})
|
Loading…
Reference in New Issue
Block a user