Skip to content

Commit

Permalink
Merge pull request #287 from xnuinside/release_v_1.7.0
Browse files Browse the repository at this point in the history
release version 1.7.0 with adding support for enum and set column types
  • Loading branch information
xnuinside authored Sep 30, 2024
2 parents d31e690 + 8ad2798 commit 836de21
Show file tree
Hide file tree
Showing 11 changed files with 64,455 additions and 544 deletions.
8 changes: 8 additions & 0 deletions CHANGELOG.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,11 @@
**v1.7.0**
### Fixes
1. DEFAULT Value with '::' cast parsed correctly now - https://github.com/xnuinside/simple-ddl-parser/issues/286

### Improvements
1. Added support for ENUM & SET column type - https://github.com/xnuinside/simple-ddl-parser/issues/259


**v1.6.1**
### Fixes
1. #289 CREATE SCHEMA IF NOT EXISTS plus comment fail
Expand Down
8 changes: 8 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -489,6 +489,14 @@ for help with debugging & testing support for BigQuery dialect DDLs:
* https://github.com/kalyan939

## Changelog
**v1.7.0**
### Fixes
1. DEFAULT Value with '::' cast parsed correctly now - https://github.com/xnuinside/simple-ddl-parser/issues/286

### Improvements
1. Added support for ENUM & SET column type - https://github.com/xnuinside/simple-ddl-parser/issues/259


**v1.6.1**
### Fixes
1. #289 CREATE SCHEMA IF NOT EXISTS plus comment fail
Expand Down
14 changes: 14 additions & 0 deletions docs/README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -555,6 +555,20 @@ for help with debugging & testing support for BigQuery dialect DDLs:
Changelog
---------

**v1.7.0**

Fixes
^^^^^


#. DEFAULT Value with '::' cast parsed correctly now - https://github.com/xnuinside/simple-ddl-parser/issues/286

Improvements
^^^^^^^^^^^^


#. Added support for ENUM & SET column type - https://github.com/xnuinside/simple-ddl-parser/issues/259

**v1.6.1**

Fixes
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "simple-ddl-parser"
version = "1.6.1"
version = "1.7.0"
description = "Simple DDL Parser to parse SQL & dialects like HQL, TSQL (MSSQL), Oracle, AWS Redshift, Snowflake, MySQL, PostgreSQL, etc ddl files to json/python dict with full information about columns: types, defaults, primary keys, etc.; sequences, alters, custom types & other entities from ddl."
authors = ["Iuliia Volkova <[email protected]>"]
license = "MIT"
Expand Down
2 changes: 1 addition & 1 deletion simple_ddl_parser/ddl_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def after_columns_tokens(self, t: LexToken) -> LexToken:
t.type = tok.after_columns_tokens.get(t.value.upper(), t.type)
if t.type != "ID":
self.lexer.after_columns = True
elif self.lexer.columns_def:
elif not self.lexer.after_columns and self.lexer.columns_def:
t.type = tok.columns_definition.get(t.value.upper(), t.type)
return t

Expand Down
39 changes: 25 additions & 14 deletions simple_ddl_parser/dialects/sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,13 +241,18 @@ def p_c_type(self, p: List) -> None:
| id DOT id
| tid
| ARRAY
| ENUM LP pid RP
| SET LP pid RP
| c_type ARRAY
| c_type tid
"""
p[0] = {}
p_list = remove_par(list(p))
_type = None
if len(p_list) == 2:
if p_list[1] in ("ENUM", "SET"):
p[0] = {"property": {"values": p_list[-1]}}
_type = p_list[1]
elif len(p_list) == 2:
_type = p_list[-1]
elif isinstance(p[1], str) and p[1].lower() == "encode":
p[0] = {"property": {"encode": p[2]}}
Expand Down Expand Up @@ -603,6 +608,8 @@ def p_create_schema(self, p: List) -> None:
"""create_schema : c_schema id id
| c_schema id id id
| c_schema id
| create_schema COMMENT STRING
| create_schema COMMENT EQ STRING
| c_schema id DOT id
| c_schema IF NOT EXISTS id
| c_schema IF NOT EXISTS id DOT id
Expand All @@ -611,19 +618,19 @@ def p_create_schema(self, p: List) -> None:
p[0] = {}
auth_index = None

if "comment" in p_list[-1]:
del p_list[-1]

self.add_if_not_exists(p[0], p_list)

if isinstance(p_list[1], dict):
p[0] = p_list[1]
self.set_properties_for_schema_and_database(p, p_list)
if "COMMENT" in p_list:
p[0]["comment"] = p_list[-1]
else:
self.set_properties_for_schema_and_database(p, p_list)
elif auth in p_list:
auth_index = p_list.index(auth)
self.set_auth_property_in_schema(p, p_list)

if isinstance(p_list[-1], str):
if not p[0].get("schema_name") and isinstance(p_list[-1], str):
if auth_index:
schema_name = p_list[auth_index - 1]
if schema_name is None:
Expand Down Expand Up @@ -869,10 +876,10 @@ def get_column_and_value_from_alter(p: List) -> Tuple:
return column, value

def p_alter_default(self, p: List) -> None:
"""alter_default : alt_table id id
| alt_table ADD constraint id id
| alt_table ADD id STRING
| alt_table ADD constraint id STRING
"""alter_default : alt_table DEFAULT id
| alt_table ADD constraint DEFAULT id
| alt_table ADD DEFAULT STRING
| alt_table ADD constraint DEFAULT STRING
| alter_default id
| alter_default FOR pid
"""
Expand Down Expand Up @@ -1555,9 +1562,9 @@ def p_default(self, p: List) -> None:
| default id
| DEFAULT ID EQ id_or_string
| DEFAULT funct_expr
| default dot_id
"""
p_list = remove_par(list(p))

default = self.pre_process_default(p_list)
if "DEFAULT" in p_list:
index_default = p_list.index("DEFAULT")
Expand All @@ -1567,6 +1574,7 @@ def p_default(self, p: List) -> None:
default = " ".join(p_list[1:])
if default.isnumeric():
default = int(default)

if isinstance(p[1], dict):
p[0] = self.process_dict_default_value(p_list, default)
else:
Expand Down Expand Up @@ -1596,10 +1604,11 @@ def process_dict_default_value(p_list: List, default: Any) -> Dict:
for i in p_list[2:]:
if isinstance(p_list[2], str):
p_list[2] = p_list[2].replace("\\'", "'")
if i == ")" or i == "(":
data["default"] = str(data["default"]) + f"{i}"
if i == ")" or i == "(" or "::" in p_list[-1]:
item_to_append = f"{i}"
else:
data["default"] = str(data["default"]) + f" {i}"
item_to_append = f" {i}"
data["default"] = str(data["default"]) + item_to_append
data["default"] = data["default"].replace("))", ")")
return data

Expand Down Expand Up @@ -1770,6 +1779,8 @@ def p_ref(self, p: List) -> None:
| ref LP pid RP
| ref ON DELETE id
| ref ON UPDATE id
| ref ON DELETE SET
| ref ON UPDATE SET
| ref DEFERRABLE INITIALLY id
| ref NOT DEFERRABLE
"""
Expand Down
9 changes: 8 additions & 1 deletion simple_ddl_parser/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,13 +179,20 @@ def pre_process_data(self, data):
data = self.process_regex_input(data)
quote_before = r"((?!\'[\w]*[\\']*[\w]*)"
quote_after = r"((?![\w]*[\\']*[\w]*\')))"
num = 0
# add space everywhere except strings
for symbol, replace_to in [
(r"(,)+", " , "),
(r"((\()){1}", " ( "),
(r"((\))){1}", " ) "),
]:
data = re.sub(quote_before + symbol + quote_after, replace_to, data)
num += 1
if num == 2:
# need for correct work with `(`` but not need in other symbols
quote_after_use = quote_after.replace(")))", "))*)")
else:
quote_after_use = quote_after
data = re.sub(quote_before + symbol + quote_after_use, replace_to, data)

if data.count("'") % 2 != 0:
data = data.replace("\\'", "pars_m_single")
Expand Down
64,681 changes: 64,158 additions & 523 deletions simple_ddl_parser/parsetab.py

Large diffs are not rendered by default.

10 changes: 6 additions & 4 deletions simple_ddl_parser/tokens.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,10 @@
"SALT",
"NO",
"USING",
"DELETE",
"UPDATE",
"DEFAULT",
"COMMENT",
# bigquery
"OPTIONS",
# snoflake
Expand All @@ -47,16 +51,14 @@


columns_definition = {
"DELETE",
"UPDATE",
"ENUM",
"SET",
"NULL",
"ARRAY",
"DEFAULT",
"COLLATE",
"ENFORCED",
"ENCODE",
"GENERATED",
"COMMENT",
"POLICY",
"MASKING",
"WITH",
Expand Down
91 changes: 91 additions & 0 deletions tests/dialects/test_mysql.py
Original file line number Diff line number Diff line change
Expand Up @@ -631,3 +631,94 @@ def test_table_properties():
}
]
assert result == expected


def test_enum_column_type():
expected = {
"ddl_properties": [],
"domains": [],
"schemas": [],
"sequences": [],
"tables": [
{
"alter": {},
"checks": [],
"columns": [
{
"check": None,
"default": "'enabled'",
"name": "cancellation_type",
"nullable": False,
"references": None,
"size": None,
"type": "ENUM",
"unique": False,
"values": ["'enabled'", "'disabled'"],
}
],
"index": [],
"partitioned_by": [],
"primary_key": [],
"schema": None,
"table_name": "myset",
"tablespace": None,
}
],
"types": [],
}
ddl = """
CREATE TABLE myset (
cancellation_type enum('enabled','disabled') NOT NULL DEFAULT 'enabled'
);
"""
result = DDLParser(ddl, debug=True).run(
group_by_type=True,
output_mode="mysql",
)
assert result == expected


def test_set_type():
expected = {
"ddl_properties": [],
"domains": [],
"schemas": [],
"sequences": [],
"tables": [
{
"alter": {},
"checks": [],
"columns": [
{
"check": None,
"default": None,
"name": "randomcolumn",
"nullable": True,
"references": None,
"size": None,
"type": "SET",
"unique": False,
"values": ["'a'", "'b'", "'c'", "'d'"],
}
],
"index": [],
"partitioned_by": [],
"primary_key": [],
"schema": None,
"table_name": "myset",
"tablespace": None,
}
],
"types": [],
}

ddl = """
CREATE TABLE myset (
randomcolumn SET('a', 'b', 'c', 'd')
);
"""
result = DDLParser(ddl, debug=True).run(
group_by_type=True,
output_mode="mysql",
)
assert expected == result
Loading

0 comments on commit 836de21

Please sign in to comment.