feat: spark sql DDL test (#170)

Co-authored-by: liuyi <liuyi@dtstack.com>
This commit is contained in:
琉易 2023-10-09 09:49:48 +08:00 committed by GitHub
parent d1c2920f80
commit d13a92914d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 530 additions and 0 deletions

View File

@ -0,0 +1,20 @@
import SparkSQL from '../../../../src/parser/spark';
import { readSQL } from '../../../helper';
const parser = new SparkSQL();
const features = {
alertDatabase: readSQL(__dirname, 'alertDatabase.sql'),
alertTable: readSQL(__dirname, 'alertTable.sql'),
alertView: readSQL(__dirname, 'alertView.sql'),
};
describe('SparkSQL Insert Syntax Tests', () => {
Object.keys(features).forEach((key) => {
features[key].forEach((sql) => {
it(sql, () => {
expect(parser.validate(sql).length).toBe(0);
});
});
});
});

View File

@ -0,0 +1,20 @@
import SparkSQL from '../../../../src/parser/spark';
import { readSQL } from '../../../helper';
const parser = new SparkSQL();
const features = {
createDatabase: readSQL(__dirname, 'createDatabase.sql'),
createFunction: readSQL(__dirname, 'createFunction.sql'),
createView: readSQL(__dirname, 'createView.sql'),
};
describe('SparkSQL Insert Syntax Tests', () => {
Object.keys(features).forEach((key) => {
features[key].forEach((sql) => {
it(sql, () => {
expect(parser.validate(sql).length).toBe(0);
});
});
});
});

View File

@ -0,0 +1,21 @@
import SparkSQL from '../../../../src/parser/spark';
import { readSQL } from '../../../helper';
const parser = new SparkSQL();
const features = {
dropDatabase: readSQL(__dirname, 'dropDatabase.sql'),
dropFunction: readSQL(__dirname, 'dropFunction.sql'),
dropTable: readSQL(__dirname, 'dropTable.sql'),
dropView: readSQL(__dirname, 'dropView.sql'),
};
describe('SparkSQL Insert Syntax Tests', () => {
Object.keys(features).forEach((key) => {
features[key].forEach((sql) => {
it(sql, () => {
expect(parser.validate(sql).length).toBe(0);
});
});
});
});

View File

@ -0,0 +1,13 @@
-- Syntax ALTER PROPERTIES
-- ALTER { DATABASE | SCHEMA | NAMESPACE } database_name SET { DBPROPERTIES | PROPERTIES } ( property_name = property_value [ , ... ] )
ALTER DATABASE inventory SET DBPROPERTIES ('Edited-by' = 'John', 'Edit-date' = '01/01/2001');
ALTER DATABASE inventory SET PROPERTIES ('Edited-by' = 'John', 'Edit-date' = '01/01/2001');
-- Syntax ALTER LOCATION
-- ALTER { DATABASE | SCHEMA | NAMESPACE } database_name SET LOCATION 'new_location'
ALTER DATABASE inventory SET LOCATION 'file:/temp/spark-warehouse/new_inventory.db';
ALTER SCHEMA inventory SET LOCATION 'file:/temp/spark-warehouse/new_inventory.db';
ALTER NAMESPACE inventory SET LOCATION 'file:/temp/spark-warehouse/new_inventory.db';

View File

@ -0,0 +1,80 @@
-- Syntax RENAME
-- ALTER TABLE table_identifier RENAME TO table_identifier
ALTER TABLE Student RENAME TO StudentInfo;
-- ALTER TABLE table_identifier partition_spec RENAME TO partition_spec
ALTER TABLE default.StudentInfo PARTITION (age='10') RENAME TO PARTITION (age='15');
ALTER TABLE default.StudentInfo PARTITION (age=10) RENAME TO PARTITION (age=12.323);
-- Syntax ADD COLUMNS
-- ALTER TABLE table_identifier ADD COLUMNS ( col_spec [ , ... ] )
ALTER TABLE StudentInfo ADD COLUMNS (LastName string, DOB timestamp);
-- Syntax DROP COLUMNS
-- ALTER TABLE table_identifier DROP { COLUMN | COLUMNS } [ ( ] col_name [ , ... ] [ ) ]
ALTER TABLE StudentInfo DROP columns (LastName, DOB);
-- Syntax RENAME COLUMN
-- ALTER TABLE table_identifier RENAME COLUMN col_name TO col_name
ALTER TABLE StudentInfo RENAME COLUMN name TO FirstName;
-- Syntax ALTER OR CHANGE COLUMN
-- ALTER TABLE table_identifier { ALTER | CHANGE } [ COLUMN ] col_name alterColumnAction
ALTER TABLE StudentInfo ALTER COLUMN FirstName COMMENT "new comment";
ALTER TABLE StudentInfo CHANGE COLUMN FirstName COMMENT "new comment";
ALTER TABLE StudentInfo ALTER FirstName COMMENT "new comment";
ALTER TABLE StudentInfo CHANGE FirstName COMMENT "new comment";
-- Syntax REPLACE COLUMNS
-- ALTER TABLE table_identifier [ partition_spec ] REPLACE COLUMNS [ ( ] qualified_col_type_with_position_list [ ) ]
ALTER TABLE StudentInfo REPLACE COLUMNS (name string, ID int COMMENT 'new comment');
ALTER TABLE StudentInfo REPLACE COLUMNS name string, ID int COMMENT 'new comment';
-- Syntax ADD PARTITION
-- ALTER TABLE table_identifier ADD [IF NOT EXISTS] ( partition_spec [ partition_spec ... ] )
ALTER TABLE StudentInfo ADD IF NOT EXISTS PARTITION (age=18);
ALTER TABLE StudentInfo ADD PARTITION (age=18);
-- Adding multiple partitions to the table
ALTER TABLE StudentInfo ADD IF NOT EXISTS PARTITION (age=18) PARTITION (age=20);
ALTER TABLE StudentInfo ADD PARTITION (age=18) PARTITION (age=20);
-- Syntax DROP PARTITION
-- ALTER TABLE table_identifier DROP [ IF EXISTS ] partition_spec [PURGE]
ALTER TABLE StudentInfo DROP IF EXISTS PARTITION (age=18);
ALTER TABLE StudentInfo DROP PARTITION (age=18);
-- Syntax SET TABLE PROPERTIES
-- ALTER TABLE table_identifier SET TBLPROPERTIES ( key1 = val1, key2 = val2, ... )
ALTER TABLE dbx.tab1 SET TBLPROPERTIES ('comment' = 'A table comment.');
-- Unset Table Properties
-- ALTER TABLE table_identifier UNSET TBLPROPERTIES [ IF EXISTS ] ( key1, key2, ... )
ALTER TABLE dbx.tab1 UNSET TBLPROPERTIES IF EXISTS ('winner');
ALTER TABLE dbx.tab1 UNSET TBLPROPERTIES ('winner');
-- Syntax SET SERDE
-- ALTER TABLE table_identifier [ partition_spec ] SET SERDEPROPERTIES ( key1 = val1, key2 = val2, ... )
ALTER TABLE test_tab SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
-- ALTER TABLE table_identifier [ partition_spec ] SET SERDE serde_class_name [ WITH SERDEPROPERTIES ( key1 = val1, key2 = val2, ... ) ]
ALTER TABLE dbx.tab1 SET SERDE 'org.apache.hadoop' WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee')
-- Syntax SET LOCATION And SET FILE FORMAT
-- ALTER TABLE table_identifier [ partition_spec ] SET FILEFORMAT file_format
ALTER TABLE loc_orc SET FILEFORMAT orc;
ALTER TABLE p1 partition (month=2, day=2) SET FILEFORMAT parquet;
-- ALTER TABLE table_identifier [ partition_spec ] SET LOCATION 'new_location'
ALTER TABLE dbx.tab1 PARTITION (a='1', b='2') SET LOCATION '/path/to/part/ways'
ALTER TABLE dbx.tab1 SET LOCATION '/path/to/part/ways'
-- Syntax RECOVER PARTITIONS
-- ALTER TABLE table_identifier RECOVER PARTITIONS
ALTER TABLE dbx.tab1 RECOVER PARTITIONS;

View File

@ -0,0 +1,22 @@
-- Syntax RENAME View
-- ALTER VIEW view_identifier RENAME TO view_identifier
ALTER VIEW tempdb1.v1 RENAME TO tempdb1.v2;
-- Syntax SET View Properties
-- ALTER VIEW view_identifier SET TBLPROPERTIES ( property_key = property_val [ , ... ] )
ALTER VIEW tempdb1.v2 SET TBLPROPERTIES ('created.by.user' = "John", 'created.date' = '01-01-2001' );
-- Syntax UNSET View Properties
-- ALTER VIEW view_identifier UNSET TBLPROPERTIES [ IF EXISTS ] ( property_key [ , ... ] )
ALTER VIEW tempdb1.v2 UNSET TBLPROPERTIES IF EXISTS ('created.by.user', 'created.date');
ALTER VIEW tempdb1.v2 UNSET TBLPROPERTIES ('created.by.user', 'created.date');
-- Syntax ALTER View AS SELECT
-- ALTER VIEW view_identifier AS select_statement
ALTER VIEW tempdb1.v2 AS SELECT * FROM tempdb1.v1;

View File

@ -0,0 +1,68 @@
-- Syntax
-- CREATE TABLE [ IF NOT EXISTS ] table_identifier
-- [ ( col_name1 col_type1 [ COMMENT col_comment1 ], ... ) ]
-- USING data_source
-- [ OPTIONS ( key1=val1, key2=val2, ... ) ]
-- [ PARTITIONED BY ( col_name1, col_name2, ... ) ]
-- [ CLUSTERED BY ( col_name3, col_name4, ... )
-- [ SORTED BY ( col_name [ ASC | DESC ], ... ) ]
-- INTO num_buckets BUCKETS ]
-- [ LOCATION path ]
-- [ COMMENT table_comment ]
-- [ TBLPROPERTIES ( key1=val1, key2=val2, ... ) ]
-- [ AS select_statement ]
--Use data source
CREATE TABLE student (id INT, name STRING, age INT) USING CSV;
CREATE TABLE IF NOT EXISTS student (id INT, name STRING, age INT) USING CSV;
--Use data from another table
CREATE TABLE student_copy USING CSV AS SELECT * FROM student;
--Omit the USING clause, which uses the default data source (parquet by default)
CREATE TABLE student (id INT, name STRING, age INT);
--Use parquet data source with parquet storage options
--The columns 'id' and 'name' enable the bloom filter during writing parquet file,
--column 'age' does not enable
CREATE TABLE student_parquet(id INT, name STRING, age INT) USING PARQUET
OPTIONS (
'parquet.bloom.filter.enabled'='true',
'parquet.bloom.filter.enabled#age'='false'
);
--Specify table comment and properties
CREATE TABLE student (id INT, name STRING, age INT) USING CSV
LOCATION 'file:/temp/spark-warehouse/new_inventory.db'
COMMENT 'this is a comment'
TBLPROPERTIES ('foo'='bar');
--Specify table comment and properties with different clauses order
CREATE TABLE student (id INT, name STRING, age INT) USING CSV
TBLPROPERTIES ('foo'='bar')
COMMENT 'this is a comment';
--Create partitioned and bucketed table
CREATE TABLE student (id INT, name STRING, age INT)
USING CSV
PARTITIONED BY (age)
CLUSTERED BY (Id)
SORTED BY (Id ASC) INTO 4 buckets;
--Create partitioned and bucketed table through CTAS
CREATE TABLE student_partition_bucket
USING parquet
PARTITIONED BY (age)
CLUSTERED BY (id) INTO 4 buckets
AS SELECT * FROM student;
--Create bucketed table through CTAS and CTE
CREATE TABLE student_bucket
USING parquet
CLUSTERED BY (id) INTO 4 buckets (
WITH tmpTable AS (
SELECT * FROM student WHERE id > 100
)
SELECT * FROM tmpTable
);

View File

@ -0,0 +1,23 @@
-- Syntax
-- CREATE { DATABASE | SCHEMA } [ IF NOT EXISTS ] database_name [ COMMENT database_comment ] [ LOCATION database_directory ] [ WITH DBPROPERTIES ( property_name = property_value [ , ... ] ) ]
CREATE DATABASE IF NOT EXISTS customer_db;
CREATE DATABASE customer_db;
CREATE SCHEMA IF NOT EXISTS customer_db;
CREATE SCHEMA customer_db;
CREATE DATABASE IF NOT EXISTS customer_db COMMENT 'This is customer database' LOCATION '/user' WITH DBPROPERTIES (ID=001, Name='John');
CREATE DATABASE IF NOT EXISTS customer_db LOCATION '/user' WITH DBPROPERTIES (ID=001, Name='John');
CREATE DATABASE IF NOT EXISTS customer_db WITH DBPROPERTIES (ID=001, Name='John');
CREATE DATABASE customer_db COMMENT 'This is customer database' LOCATION '/user' WITH DBPROPERTIES (ID=001, Name='John');
CREATE DATABASE customer_db LOCATION '/user' WITH DBPROPERTIES (ID=001, Name='John');
CREATE DATABASE customer_db WITH DBPROPERTIES (ID=001, Name='John');
CREATE SCHEMA IF NOT EXISTS customer_db COMMENT 'This is customer database' LOCATION '/user' WITH DBPROPERTIES (ID=001, Name='John');
CREATE SCHEMA IF NOT EXISTS customer_db LOCATION '/user' WITH DBPROPERTIES (ID=001, Name='John');
CREATE SCHEMA IF NOT EXISTS customer_db WITH DBPROPERTIES (ID=001, Name='John');
CREATE SCHEMA customer_db COMMENT 'This is customer database' LOCATION '/user' WITH DBPROPERTIES (ID=001, Name='John');
CREATE SCHEMA customer_db LOCATION '/user' WITH DBPROPERTIES (ID=001, Name='John');
CREATE SCHEMA customer_db WITH DBPROPERTIES (ID=001, Name='John');

View File

@ -0,0 +1,19 @@
-- Syntax
-- CREATE [ OR REPLACE ] [ TEMPORARY ] FUNCTION [ IF NOT EXISTS ] function_name AS class_name [ resource_locations ]
CREATE OR REPLACE TEMPORARY FUNCTION IF NOT EXISTS simple_udf AS 'SimpleUdfR' USING JAR '/tmp/SimpleUdfR.jar';
CREATE OR REPLACE TEMPORARY FUNCTION IF NOT EXISTS simple_udf AS 'SimpleUdfR';
CREATE OR REPLACE FUNCTION IF NOT EXISTS simple_udf AS 'SimpleUdfR';
CREATE TEMPORARY FUNCTION IF NOT EXISTS simple_udf AS 'SimpleUdfR';
CREATE FUNCTION IF NOT EXISTS simple_udf AS 'SimpleUdfR';
CREATE OR REPLACE FUNCTION simple_udf AS 'SimpleUdfR';
CREATE TEMPORARY FUNCTION simple_udf AS 'SimpleUdfR';
CREATE FUNCTION simple_udf AS 'SimpleUdfR';
CREATE FUNCTION simple_udf AS 'SimpleUdf' USING JAR '/tmp/SimpleUdf.jar';
CREATE TEMPORARY FUNCTION simple_temp_udf AS 'SimpleUdf' USING JAR '/tmp/SimpleUdf.jar';

View File

@ -0,0 +1,101 @@
-- Syntax
-- CREATE [ EXTERNAL ] TABLE [ IF NOT EXISTS ] table_identifier
-- [ ( col_name1[:] col_type1 [ COMMENT col_comment1 ], ... ) ]
-- [ COMMENT table_comment ]
-- [ PARTITIONED BY ( col_name2[:] col_type2 [ COMMENT col_comment2 ], ... )
-- | ( col_name1, col_name2, ... ) ]
-- [ CLUSTERED BY ( col_name1, col_name2, ...)
-- [ SORTED BY ( col_name1 [ ASC | DESC ], col_name2 [ ASC | DESC ], ... ) ]
-- INTO num_buckets BUCKETS ]
-- [ ROW FORMAT row_format ]
-- [ STORED AS file_format ]
-- [ LOCATION path ]
-- [ TBLPROPERTIES ( key1=val1, key2=val2, ... ) ]
-- [ AS select_statement ]
--Use hive format
CREATE TABLE student (id INT, name STRING, age INT) STORED AS ORC;
--Use data from another table
CREATE TABLE student_copy STORED AS ORC
AS SELECT * FROM student;
--Specify table comment and properties
CREATE TABLE student (id INT, name STRING, age INT)
COMMENT 'this is a comment'
STORED AS ORC
TBLPROPERTIES ('foo'='bar');
--Specify table comment and properties with different clauses order
CREATE TABLE student (id INT, name STRING, age INT)
STORED AS ORC
TBLPROPERTIES ('foo'='bar')
COMMENT 'this is a comment';
--Create partitioned table
CREATE TABLE student (id INT, name STRING)
PARTITIONED BY (age INT)
STORED AS ORC;
--Create partitioned table with different clauses order
CREATE TABLE student (id INT, name STRING)
STORED AS ORC
PARTITIONED BY (age INT);
--Use Row Format and file format
CREATE TABLE IF NOT EXISTS student (id INT, name STRING)
ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
STORED AS TEXTFILE;
--Use complex datatype
CREATE EXTERNAL TABLE family(
name STRING,
friends ARRAY<STRING>,
children MAP<STRING, INT>,
address STRUCT<street: STRING, city: STRING>
)
ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' ESCAPED BY '\\'
COLLECTION ITEMS TERMINATED BY '_'
MAP KEYS TERMINATED BY ':'
LINES TERMINATED BY '\n'
NULL DEFINED AS 'foonull'
STORED AS TEXTFILE
LOCATION '/tmp/family/';
--Use predefined custom SerDe
CREATE TABLE avroExample
ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive",
"name": "first_schema",
"type": "record",
"fields": [
{ "name":"string1", "type":"string" },
{ "name":"string2", "type":"string" }
] }');
--Use personalized custom SerDe(we may need to `ADD JAR xxx.jar` first to ensure we can find the serde_class,
--or you may run into `CLASSNOTFOUND` exception)
ADD JAR /tmp/hive_serde_example.jar;
CREATE EXTERNAL TABLE family (id INT, name STRING)
ROW FORMAT SERDE 'com.ly.spark.serde.SerDeExample'
STORED AS INPUTFORMAT 'com.ly.spark.example.serde.io.SerDeExampleInputFormat'
OUTPUTFORMAT 'com.ly.spark.example.serde.io.SerDeExampleOutputFormat'
LOCATION '/tmp/family/';
--Use `CLUSTERED BY` clause to create bucket table without `SORTED BY`
CREATE TABLE clustered_by_test1 (ID INT, AGE STRING)
CLUSTERED BY (ID)
INTO 4 BUCKETS
STORED AS ORC
--Use `CLUSTERED BY` clause to create bucket table with `SORTED BY`
CREATE TABLE clustered_by_test2 (ID INT, NAME STRING)
PARTITIONED BY (YEAR STRING)
CLUSTERED BY (ID, NAME)
SORTED BY (ID ASC)
INTO 3 BUCKETS
STORED AS PARQUET

View File

@ -0,0 +1,24 @@
-- Syntax
-- CREATE TABLE [IF NOT EXISTS] table_identifier LIKE source_table_identifier
-- USING data_source
-- [ ROW FORMAT row_format ]
-- [ STORED AS file_format ]
-- [ TBLPROPERTIES ( key1=val1, key2=val2, ... ) ]
-- [ LOCATION path ]
-- Create table using an existing table
CREATE TABLE Student_Duple like Student;
CREATE TABLE IF NOT EXISTS Student_Duple like Student;
-- Create table like using a data source
CREATE TABLE Student_Duple like Student USING CSV;
-- Table is created as external table at the location specified
CREATE TABLE Student_Duple like Student location '/root1/home';
-- Create table like using a rowformat
CREATE TABLE Student_Duple like Student
ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
STORED AS TEXTFILE
TBLPROPERTIES ('owner'='xxxx');

View File

@ -0,0 +1,10 @@
-- Syntax
-- CREATE [ OR REPLACE ] [ [ GLOBAL ] TEMPORARY ] VIEW [ IF NOT EXISTS ] view_identifier create_view_clauses AS query
CREATE OR REPLACE VIEW experienced_employee (ID COMMENT 'Unique identification number', Name) COMMENT 'View for experienced employees' AS SELECT id, name FROM all_employee WHERE working_years > 5;
CREATE VIEW experienced_employee (ID COMMENT 'Unique identification number', Name) COMMENT 'View for experienced employees' AS SELECT id, name FROM all_employee WHERE working_years > 5;
CREATE OR REPLACE GLOBAL TEMPORARY VIEW IF NOT EXISTS subscribed_movies AS SELECT mo.member_id, mb.full_name, mo.movie_title FROM movies AS mo INNER JOIN members AS mb ON mo.member_id = mb.id;
CREATE OR REPLACE GLOBAL TEMPORARY VIEW subscribed_movies AS SELECT mo.member_id, mb.full_name, mo.movie_title FROM movies AS mo INNER JOIN members AS mb ON mo.member_id = mb.id;
CREATE GLOBAL TEMPORARY VIEW IF NOT EXISTS subscribed_movies AS SELECT mo.member_id, mb.full_name, mo.movie_title FROM movies AS mo INNER JOIN members AS mb ON mo.member_id = mb.id;
CREATE GLOBAL TEMPORARY VIEW subscribed_movies AS SELECT mo.member_id, mb.full_name, mo.movie_title FROM movies AS mo INNER JOIN members AS mb ON mo.member_id = mb.id;

View File

@ -0,0 +1,14 @@
-- Syntax
-- DROP { DATABASE | SCHEMA } [ IF EXISTS ] dbname [ RESTRICT | CASCADE ]
CREATE DATABASE inventory_db COMMENT 'This database is used to maintain Inventory';
CREATE SCHEMA inventory_db COMMENT 'This database is used to maintain Inventory';
DROP DATABASE inventory_db CASCADE;
DROP SCHEMA inventory_db CASCADE;
DROP DATABASE IF EXISTS inventory_db CASCADE;
DROP SCHEMA IF EXISTS inventory_db CASCADE;
DROP DATABASE inventory_db RESTRICT;
DROP SCHEMA inventory_db RESTRICT;

View File

@ -0,0 +1,11 @@
-- Syntax
-- DROP [ TEMPORARY ] FUNCTION [ IF EXISTS ] function_name
DROP FUNCTION test_avg;
DROP TEMPORARY FUNCTION test_avg;
DROP TEMPORARY FUNCTION IF EXISTS test_avg;
DROP TEMPORARY FUNCTION test_avg;

View File

@ -0,0 +1,10 @@
-- Syntax
-- DROP TABLE [ IF EXISTS ] table_identifier [ PURGE ]
DROP TABLE userDB.employable;
DROP TABLE IF EXISTS employable;
DROP TABLE employable;
DROP TABLE IF EXISTS employable PURGE;
DROP TABLE employable PURGE;

View File

@ -0,0 +1,9 @@
-- Syntax
-- DROP VIEW [ IF EXISTS ] view_identifier
DROP VIEW employeeView;
DROP VIEW userDB.employeeView;
DROP VIEW IF EXISTS employeeView;

View File

@ -0,0 +1,14 @@
-- Syntax
-- [MSCK] REPAIR TABLE table_identifier [{ADD|DROP|SYNC} PARTITIONS]
REPAIR TABLE t1;
MSCK REPAIR TABLE t1;
REPAIR TABLE t1 ADD PARTITIONS;
REPAIR TABLE t1 DROP PARTITIONS;
REPAIR TABLE t1 SYNC PARTITIONS;
MSCK REPAIR TABLE t1 ADD PARTITIONS;
MSCK REPAIR TABLE t1 DROP PARTITIONS;
MSCK REPAIR TABLE t1 SYNC PARTITIONS;

View File

@ -0,0 +1,6 @@
-- Syntax
-- TRUNCATE TABLE table_identifier [ partition_spec ]
TRUNCATE TABLE Student partition(age=10);
TRUNCATE TABLE Student;

View File

@ -0,0 +1,4 @@
-- Syntax
-- USE database_name
USE userDB;

View File

@ -0,0 +1,23 @@
import SparkSQL from '../../../../src/parser/spark';
import { readSQL } from '../../../helper';
const parser = new SparkSQL();
const features = {
createDataSourceTable: readSQL(__dirname, 'createDataSourceTable.sql'),
createHiveFormatTable: readSQL(__dirname, 'createHiveFormatTable.sql'),
createTableLike: readSQL(__dirname, 'createTableLike.sql'),
repairTable: readSQL(__dirname, 'repairTable.sql'),
truncateTable: readSQL(__dirname, 'truncateTable.sql'),
};
describe('SparkSQL Insert Syntax Tests', () => {
Object.keys(features).forEach((key) => {
features[key].forEach((sql) => {
it(sql, () => {
expect(parser.validate(sql).length).toBe(0);
});
});
});
});

View File

@ -0,0 +1,18 @@
import SparkSQL from '../../../../src/parser/spark';
import { readSQL } from '../../../helper';
const parser = new SparkSQL();
const features = {
useDatabase: readSQL(__dirname, 'useDatabase.sql'),
};
describe('SparkSQL Insert Syntax Tests', () => {
Object.keys(features).forEach((key) => {
features[key].forEach((sql) => {
it(sql, () => {
expect(parser.validate(sql).length).toBe(0);
});
});
});
});