a99721162b
* feat: add text and word utils * feat: add entity collector class * refactor: rename SyntaxContextType to EntityContextType * refactor: improve EntityCollector * feat: improve mysql parser grammar * feat: add mysql entity collector * test: mysql entity collector tests * feat: remove useless method * feat: improve spark grammar file * feat: add spark entity collector * test: spark entity collector unit tests * feat: remove useless code * feat: add queryStatement label * feat: add crateDatabaseStmt * feat: add trino entity collector * feat: rename trinosql to trino * test: trino collect entity unit tests * test: fix spark test * feat(impala): support impale entity collector (#256) * Feat/collect entity hive (#263) * feat(hive): support hive collect entity * feat(hive): update tableAllColumns * feat: replace antlr4ts with antlr4ng * feat(pgsql): pgsql collect entity (#268) * feat(pgsql): pgsql collect entity * feat(pgsql): optimize some name --------- Co-authored-by: zhaoge <> * feat: get word text by token.text * feat: supprt collect db/function and add splitListener (#270) * feat: supprt collect db/function and add splitListner * feat: remove SplitListener interface in baseParser to use SplitListener in root * fix(mysql): fix show create xxx not celloct as createXXXEntity type * test: fix pgsql unit tests * Feat/error recover predicate (#274) * feat: optimize pgsql grammar * feat: add sql parser base * feat: apply SQLParserBase * feat: add geAllEntities method * test: test collect table when missing column * feat: compose collect and suggestion (#276) * feat: mark stmt which contain caret * test: correct name of getAllEntities * test: remove misscolumn unit tests * test: add suggestionWithEntity tests * feat: flink collect entity (#277) * feat: improve flink sql parser * feat: support flink entity collector * test: flink entity collect unit test * feat: move combine entities to parent class --------- Co-authored-by: 霜序 <976060700@qq.com> Co-authored-by: XCynthia <942884029@qq.com>
157 lines
6.3 KiB
TypeScript
157 lines
6.3 KiB
TypeScript
import fs from 'fs';
|
|
import path from 'path';
|
|
import SparkSQL from 'src/parser/spark';
|
|
import { CaretPosition, EntityContextType } from 'src/parser/common/basic-parser-types';
|
|
import { commentOtherLine } from 'test/helper';
|
|
|
|
const syntaxSql = fs.readFileSync(
|
|
path.join(__dirname, 'fixtures', 'suggestionWithEntity.sql'),
|
|
'utf-8'
|
|
);
|
|
|
|
describe('PostgreSQL Syntax Suggestion with collect entity', () => {
|
|
const spark = new SparkSQL();
|
|
|
|
test('select with no column', () => {
|
|
const pos: CaretPosition = {
|
|
lineNumber: 1,
|
|
column: 8,
|
|
};
|
|
const sql = commentOtherLine(syntaxSql, pos.lineNumber);
|
|
|
|
const syntaxes = spark.getSuggestionAtCaretPosition(sql, pos)?.syntax;
|
|
const suggestion = syntaxes?.find(
|
|
(syn) => syn.syntaxContextType === EntityContextType.COLUMN
|
|
);
|
|
expect(suggestion).not.toBeUndefined();
|
|
expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]);
|
|
|
|
const entities = spark.getAllEntities(sql, pos);
|
|
expect(entities.length).toBe(1);
|
|
expect(entities[0].text).toBe('my_db.tb');
|
|
expect(entities[0].entityContextType).toBe(EntityContextType.TABLE);
|
|
expect(entities[0].belongStmt.isContainCaret).toBeTruthy();
|
|
});
|
|
|
|
test('select with columns with trailing comma', () => {
|
|
const pos: CaretPosition = {
|
|
lineNumber: 3,
|
|
column: 47,
|
|
};
|
|
const sql = commentOtherLine(syntaxSql, pos.lineNumber);
|
|
|
|
const syntaxes = spark.getSuggestionAtCaretPosition(sql, pos)?.syntax;
|
|
const suggestion = syntaxes?.find(
|
|
(syn) => syn.syntaxContextType === EntityContextType.COLUMN
|
|
);
|
|
expect(suggestion).not.toBeUndefined();
|
|
expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]);
|
|
|
|
const entities = spark.getAllEntities(sql, pos);
|
|
expect(entities.length).toBe(1);
|
|
expect(entities[0].text).toBe('students');
|
|
expect(entities[0].entityContextType).toBe(EntityContextType.TABLE);
|
|
expect(entities[0].belongStmt.isContainCaret).toBeTruthy();
|
|
});
|
|
|
|
test('insert into table as select with no column', () => {
|
|
const pos: CaretPosition = {
|
|
lineNumber: 5,
|
|
column: 30,
|
|
};
|
|
const sql = commentOtherLine(syntaxSql, pos.lineNumber);
|
|
|
|
const syntaxes = spark.getSuggestionAtCaretPosition(sql, pos)?.syntax;
|
|
const suggestion = syntaxes?.find(
|
|
(syn) => syn.syntaxContextType === EntityContextType.COLUMN
|
|
);
|
|
expect(suggestion).not.toBeUndefined();
|
|
expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]);
|
|
|
|
const entities = spark.getAllEntities(sql, pos);
|
|
expect(entities.length).toBe(2);
|
|
expect(entities[0].text).toBe('insert_tb');
|
|
expect(entities[0].entityContextType).toBe(EntityContextType.TABLE);
|
|
expect(entities[0].belongStmt.isContainCaret).toBeTruthy();
|
|
|
|
expect(entities[1].text).toBe('from_tb');
|
|
expect(entities[1].entityContextType).toBe(EntityContextType.TABLE);
|
|
expect(entities[1].belongStmt.isContainCaret).toBeTruthy();
|
|
});
|
|
|
|
test('insert into table as select with trailing comma', () => {
|
|
const pos: CaretPosition = {
|
|
lineNumber: 7,
|
|
column: 39,
|
|
};
|
|
const sql = commentOtherLine(syntaxSql, pos.lineNumber);
|
|
|
|
const syntaxes = spark.getSuggestionAtCaretPosition(sql, pos)?.syntax;
|
|
const suggestion = syntaxes?.find(
|
|
(syn) => syn.syntaxContextType === EntityContextType.COLUMN
|
|
);
|
|
expect(suggestion).not.toBeUndefined();
|
|
expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]);
|
|
|
|
const entities = spark.getAllEntities(sql, pos);
|
|
expect(entities.length).toBe(2);
|
|
expect(entities[0].text).toBe('insert_tb');
|
|
expect(entities[0].entityContextType).toBe(EntityContextType.TABLE);
|
|
expect(entities[0].belongStmt.isContainCaret).toBeTruthy();
|
|
|
|
expect(entities[1].text).toBe('from_tb');
|
|
expect(entities[1].entityContextType).toBe(EntityContextType.TABLE);
|
|
expect(entities[1].belongStmt.isContainCaret).toBeTruthy();
|
|
});
|
|
|
|
test('create table as select with no column', () => {
|
|
const pos: CaretPosition = {
|
|
lineNumber: 9,
|
|
column: 43,
|
|
};
|
|
const sql = commentOtherLine(syntaxSql, pos.lineNumber);
|
|
|
|
const syntaxes = spark.getSuggestionAtCaretPosition(sql, pos)?.syntax;
|
|
const suggestion = syntaxes?.find(
|
|
(syn) => syn.syntaxContextType === EntityContextType.COLUMN
|
|
);
|
|
expect(suggestion).not.toBeUndefined();
|
|
expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]);
|
|
|
|
const entities = spark.getAllEntities(sql, pos);
|
|
expect(entities.length).toBe(2);
|
|
expect(entities[0].text).toBe('sorted_census_data');
|
|
expect(entities[0].entityContextType).toBe(EntityContextType.TABLE_CREATE);
|
|
expect(entities[0].belongStmt.isContainCaret).toBeTruthy();
|
|
|
|
expect(entities[1].text).toBe('unsorted_census_data');
|
|
expect(entities[1].entityContextType).toBe(EntityContextType.TABLE);
|
|
expect(entities[1].belongStmt.isContainCaret).toBeTruthy();
|
|
});
|
|
|
|
test('create table as select with trailing comma', () => {
|
|
const pos: CaretPosition = {
|
|
lineNumber: 11,
|
|
column: 52,
|
|
};
|
|
const sql = commentOtherLine(syntaxSql, pos.lineNumber);
|
|
|
|
const syntaxes = spark.getSuggestionAtCaretPosition(sql, pos)?.syntax;
|
|
const suggestion = syntaxes?.find(
|
|
(syn) => syn.syntaxContextType === EntityContextType.COLUMN
|
|
);
|
|
expect(suggestion).not.toBeUndefined();
|
|
expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]);
|
|
|
|
const entities = spark.getAllEntities(sql, pos);
|
|
expect(entities.length).toBe(2);
|
|
expect(entities[0].text).toBe('sorted_census_data');
|
|
expect(entities[0].entityContextType).toBe(EntityContextType.TABLE_CREATE);
|
|
expect(entities[0].belongStmt.isContainCaret).toBeTruthy();
|
|
|
|
expect(entities[1].text).toBe('unsorted_census_data');
|
|
expect(entities[1].entityContextType).toBe(EntityContextType.TABLE);
|
|
expect(entities[1].belongStmt.isContainCaret).toBeTruthy();
|
|
});
|
|
});
|