-- ###########################
-- RUNNING stop_on_error.sql
stop_on_error;
reponse
success
-- ###########################
-- RUNNING clear.sh
/*
sure mode, no confirmation prompt
clearing directory INTERNAL_FILES_DIR (../STORAGE/INT) ...
done
clearing directory MAX_PERF_FILES_DIR (../STORAGE/MAX_PERF) ...
done
clearing directory TRANSAC_FILES_DIR (../STORAGE/TRANSAC) ...
done
*/
-- ###########################
-- RUNNING doc_data_full.sql
--
--
--
create
col_type
<end_user
|col_type_name> as
<number
|text
>
--In stormbase there are only 2 primary data types: text and number.
--On top of this primary types you define your col_type (the functional data type).
--Why ? in SB you can join 2 columns if and only if they have the same col_type.
create
col_type
t_site_id as
text
;
reponse
success
create
col_type
t_dept_id as
text
;
reponse
success
create
col_type
t_item_id as
text
;
reponse
success
create
col_type
t_customer_id as
text
;
reponse
success
create
col_type
t_date as
text
;
reponse
success
create
col_type
t_customer_info as
text
;
reponse
success
--the end_user col_type is used for permissions (explained later)
create
col_type
end_user
as
text
;
reponse
success
--Assuming you an integer value in a column, will you define it as text or number?
--The answer is simple. Are you going to sum this column? If yes then it is a number column, otherwise it is a text column.
--So if your integer column contains for example a status code, use text. And if it contains quantities, use number.
--
create
table
<|merge|big> table_name ( column_name1 <number
|text
|col_type_name>, ...)
-- In Stormbase, a table has a type: raw (no specific type), merge or big.
-- In traditional BI, big tables would be your fact tables (at the center of your star schema).
-- First column of a merge table becomes is primary key of the table.
-- Merge tables have an "insert or update" logic, based on the primary key.
-- In traditional BI, merge tables would be the dimension tables of your star schema.
--items and customers are merge tables, last insert wins and updates using the primary key
--they will be used as dimensions in views
create
merge table
items( item_id t_item_id, art_label text
, dept t_dept_id, avg_week_sales number
, sales_price number
);
reponse
success
create
merge table
customers( customer_id t_customer_id, customer_name text
);
reponse
success
--these tables are raw tables (first column is a primary key)
create
table
item_tags( item_id t_item_id, tag text
);
reponse
success
create
table
fidelity_cards( customer_id t_customer_id, card_label t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
table
item_customer_infos( customer_id t_customer_id, item_id t_item_id, info t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
big table
sales( item_id t_item_id, customer_id t_customer_id, sales_date t_date, sales_qty number
, line_id text
, packaging_id t_item_id);
reponse
success
create
big table
inventory( item_id t_item_id, inv_qty number
);
reponse
success
--
create
view
view_name as
select
* from
table_name1, table_name2, ... where
table_name1.column_nameA = table_name2.column_nameB ...
--In SB you query view not tables, so we create "one table views"
create
view
v_items as
select
* from
items;
reponse
success
create
view
v_item_tags as
select
* from
item_tags;
reponse
success
create
view
v_fidelity_cards as
select
* from
fidelity_cards;
reponse
success
create
view
v_item_customer_infos as
select
* from
item_customer_infos;
reponse
success
--most important, this view will link fact with dimensions
create
view
v_sales as
select
* from
sales, items, customers where
items.item_id=sales.item_id and
customers.customer_id=sales.customer_id;
reponse
success
create
view
v_inventory as
select
* from
inventory, items where
items.item_id=inventory.item_id;
reponse
success
--
insert
into
table_name values
('col1_value'
,'col2_value'
,...)
insert
into
items values
('artA'
,'the article A'
,'dept #1'
,10,1.5);
reponse
success
insert
into
items values
('artB'
,'the article B'
,'dept #2'
,10,3.2);
reponse
success
insert
into
items values
('box1'
,'a box'
,'packaging'
,10,0);
reponse
success
insert
into
customers values
('C1'
,'customer #1'
)('C2'
,'customer #2'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #1'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #2'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'SILVER'
,'20191201'
,'20191231'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'GOLD'
,'20201201'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artA'
,'FREQUENT BUYER of artA in 2019'
,'20190101'
,'20191231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C2'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID01'
,'box1'
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,6,'ID02'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C1'
,'20191231'
,4,'ID03'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,7,'ID04'
,'box1'
);
reponse
success
insert
into
sales values
('artC'
,'C1'
,'20200102'
,8,'ID05'
,''
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID06'
,'box1'
);
reponse
success
insert
into
sales values
('artA'
,'C2'
,'20191231'
,5,'ID07'
,'box1'
);
reponse
success
insert
into
inventory values
('artA'
,32);
reponse
success
insert
into
inventory values
('artC'
,12);
reponse
success
--
refresh
dirty
view
refresh
dirty
view
;
reponse
success
--
desc
<|col_type
|table
|view
|context|computed_columns|parameter>
desc
col_type
<|verbose
>
desc
table
the_table <|verbose
>
desc
view
the_view <|verbose
>
set
CACHE
='n'
;
reponse
success
desc
;
object_name object_type
t_site_id col_type
t_dept_id col_type
t_item_id col_type
t_customer_id col_type
t_date col_type
t_customer_info col_type
end_user col_type
items table
customers table
item_tags table
fidelity_cards table
item_customer_infos table
sales table
inventory table
v_items view
v_item_tags view
v_fidelity_cards view
v_item_customer_infos view
v_sales view
v_sales_#partition#_00001 view
v_sales_#partition#_00002 view
sales_#partition#_00001 table
v_inventory view
sales_#partition#_00002 table
TCP_PORT_TRANSAC parameter
LOG_VERBOSE parameter
CPU_COUNT parameter
SLEEP_AFTER_SQL parameter
SB_LICENSE_PATH parameter
ARRAY_BLOCK_SIZE_BIG parameter
ARRAY_BLOCK_SIZE_SMALL parameter
PROD_SERVER parameter
MAX_PERF_USE_COMPRESSION parameter
PARETO_LIMIT parameter
PARTITION_LINE_COUNT parameter
CACHE parameter
MAX_PERF_CHANGE_COUNT parameter
ALLOW_WHERE_ERROR parameter
ALLOW_GROUP_BY_ERROR parameter
INIT_FILE_PATH parameter
desc
col_type
;
col_type_name data_type ival_count
t_site_id text 0
t_dept_id text 3
t_item_id text 5
t_customer_id text 3
t_date text 7
t_customer_info text 4
end_user text 0
desc
col_type
verbose
;
col_type_name data_type ival_count
t_site_id text 0
t_dept_id text 3
t_item_id text 5
t_customer_id text 3
t_date text 7
t_customer_info text 4
end_user text 0
sys#type#items#art_label text 3
sys#type#items#avg_week_sales number 1
sys#type#items#sales_price number 3
sys#type#customers#customer_name text 2
sys#type#item_tags#tag text 2
sys#type#sales#sales_qty number 5
sys#type#sales#line_id text 7
sys#type#inventory#inv_qty number 2
desc
table
;
table_name column_count line_count has_delete has_update parent_view_hidden
items 5 3 n n n
customers 2 2 n n n
item_tags 2 2 n n n
fidelity_cards 4 2 n n n
item_customer_infos 5 3 n n n
sales 6 3 n n n
inventory 2 2 n n n
sales_#partition#_00001 6 3 n n n
sales_#partition#_00002 6 1 n n n
desc
table
items;
table_name column_name column_type col_type_name
items item_id text t_item_id
items art_label text sys#type#items#art_label
items dept text t_dept_id
items avg_week_sales number sys#type#items#avg_week_sales
items sales_price number sys#type#items#sales_price
desc
table
item_tags;
table_name column_name column_type col_type_name
item_tags item_id text t_item_id
item_tags tag text sys#type#item_tags#tag
desc
table
item_tags verbose
;
table_name column_name column_type col_type_name
item_tags item_id text t_item_id
item_tags tag text sys#type#item_tags#tag
desc
view
;
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_items 1 3 n n n 10 0 13 13 0 0
v_item_tags 1 2 n n n 10 0 12 12 0 0
v_fidelity_cards 1 2 n n n 10 0 12 12 0 0
v_item_customer_infos 1 3 n n n 10 0 13 13 0 0
v_sales 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00001 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00002 3 1 n n n 10 0 11 11 0 0
v_inventory 2 2 n n n 10 0 12 12 0 0
desc
view
v_items;
view_name table_name column_names
v_items items item_id,art_label,dept,avg_week_sales,sales_price
desc
view
v_item_tags;
view_name table_name column_names
v_item_tags item_tags item_id,tag
desc
view
v_sales;
view_name table_name column_names
v_sales sales item_id,customer_id,sales_date,sales_qty,line_id,packaging_id
v_sales items item_id,art_label,dept,avg_week_sales,sales_price
v_sales customers customer_id,customer_name
desc
view
v_sales verbose
;
view_name table_name column_name column_type col_type_name
v_sales sales item_id text t_item_id
v_sales sales customer_id text t_customer_id
v_sales sales sales_date text t_date
v_sales sales sales_qty number sys#type#sales#sales_qty
v_sales sales line_id text sys#type#sales#line_id
v_sales sales packaging_id text t_item_id
v_sales items item_id text t_item_id
v_sales items art_label text sys#type#items#art_label
v_sales items dept text t_dept_id
v_sales items avg_week_sales number sys#type#items#avg_week_sales
v_sales items sales_price number sys#type#items#sales_price
v_sales customers customer_id text t_customer_id
v_sales customers customer_name text sys#type#customers#customer_name
--
select
* from
v_items;
item_id art_label dept avg_week_sales sales_price
artA the article A dept #1 10 1.500
artB the article B dept #2 10 3.200
box1 a box packaging 10 0
insert
into
items values
('artA'
,'### the article A ###'
,'dept #1'
,10,12.123);
reponse
success
refresh
dirty
view
;
reponse
success
select
* from
v_items;
item_id art_label dept avg_week_sales sales_price
artA ### the article A ### dept #1 10 12.123
artB the article B dept #2 10 3.200
box1 a box packaging 10 0
--
save
save
;
reponse
success
--
insert
into
sales values
('artA'
,'C2'
,'20191231'
,5,'ID07'
,'box1'
);
reponse
success
desc
table
callback
where
(1,'sales'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 3 n n n
bounce;
--insert is lost
desc
table
callback
where
(1,'sales'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 3 n n n
insert
into
sales values
('artA'
,'C2'
,'20191231'
,5,'ID07'
,'box1'
);
reponse
success
--save data on disk
save
;
reponse
success
bounce;
--insert is not lost
desc
table
callback
where
(1,'sales'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 3 n n n
insert
into
sales values
('artA'
,'C2'
,'20191231'
,5,'ID07_'
,'box1'
);
reponse
success
--table is updated
desc
table
callback
where
(1,'sales'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 3 n n n
--but not the view
select
count(*) from
v_sales;
count(*)
7
--because view is dirty
desc
view
callback
where
(1,'v_sales'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 3 n n n 10 0 13 13 0 0
save
;
reponse
success
bounce;
--view is still dirty if we bounce SB
desc
view
callback
where
(1,'v_sales'
)
;
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 3 n n n 10 0 13 13 0 0
refresh
dirty
view
;
reponse
success
--view is no longer dirty
desc
view
callback
where
(1,'v_sales'
)
;
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 3 n n n 10 0 13 13 0 0
--and count is correct
select
count(*) from
v_sales
;
count(*)
9
insert
into
table_name select
* from
file(<'path/to/file'
|'file_name under CSV_FILES_DIR/table_name'
>)
desc
parameter verbose
callback
where
(1,'FILE_SEPARATOR'
);
param_name current_value comment is_default default_value
FILE_SEPARATOR , one character, CSV file separator y ,
desc
parameter verbose
callback
where
(1,'HEADER'
);
param_name current_value comment is_default default_value
HEADER y y/n, y: CSV files have a header line y y
--path/to/file (contains at least one /)
system '(echo '
'item_id,customer_id,sales_date,sales_qty,line_id'
'>/tmp/foo.csv)'
;
reponse
system '(echo '
'item_id_01,customer_id_01,sales_date_01,1,line_id_01'
'>>/tmp/foo.csv)'
;
reponse
insert
into
sales select
* from
file('/tmp/foo.csv'
);
reponse
success
--using CSV_FILES_DIR
desc
parameter verbose
callback
where
(1,'CSV_FILES_DIR'
);
param_name current_value comment is_default default_value
CSV_FILES_DIR ../STORAGE/CSV string, CSV file location used in "insert from file" when file path does not start with / y ../STORAGE/CSV
system 'mkdir -p ../STORAGE/CSV/sales'
;
reponse
system '(echo '
'item_id,customer_id,sales_date,sales_qty,line_id'
'>../STORAGE/CSV/sales/foo.csv)'
;
reponse
system '(echo '
'item_id_02,customer_id_03,sales_date_04,1,line_id_05'
'>>../STORAGE/CSV/sales/foo.csv)'
;
reponse
insert
into
sales select
* from
file('foo.csv'
);
reponse
success
--the column order does not matter
--in HEADER:y mode, SB is flexible regarding the columns provided in the csv file
--header is not case sensitive
desc
parameter verbose
callback
where
(1,'NEW_COLUMN_NULL_STRING'
);
param_name current_value comment is_default default_value
NEW_COLUMN_NULL_STRING NO_DATA string, default value when a column is added to a table or when a column is not provided in a CSV file y NO_DATA
system '(echo '
'sales_DATE,item_id,sales_qty_new,line_id'
'>/tmp/foo.csv)'
;
reponse
system '(echo '
'sales_date_03,item_id_03,3,line_id_03'
'>>/tmp/foo.csv)'
;
reponse
system '(echo '
'sales_date_03,item_id_04,4,line_id_04'
'>>/tmp/foo.csv)'
;
reponse
insert
into
sales select
* from
file('/tmp/foo.csv'
);
reponse
success
--same with a merge table
system '(echo '
'___art_label,dept,item_id'
'>/tmp/foo.csv)'
;
reponse
system '(echo '
'art_label_01,dept_01,item_id_01'
'>>/tmp/foo.csv)'
;
reponse
system '(echo '
'art_label_02,dept_02,artA'
'>>/tmp/foo.csv)'
;
reponse
insert
into
items select
* from
file('/tmp/foo.csv'
);
reponse
success
--SB is not flexible if HEADER:n
set
HEADER='n'
;
reponse
success
continue_on_error(63);
reponse
success
insert
into
items select
* from
file('/tmp/foo.csv'
);
reponse
error 63 (continue): end of line found where non expected
stop_on_error;
reponse
success
set
HEADER='y'
;
reponse
success
--view new lines
refresh
dirty
view
;
reponse
success
select
sales#item_id,sales#customer_id,sales_date,sales_qty,line_id from
v_sales callback
sort(5,'asc'
);
sales#item_id sales#customer_id sales_date sales_qty line_id
artA C1 20191231 5 ID01
artB C2 20200102 6 ID02
artB C1 20191231 4 ID03
artB C2 20200102 7 ID04
artC C1 20200102 8 ID05
artA C1 20191231 5 ID06
artA C2 20191231 5 ID07
artA C2 20191231 5 ID07
artA C2 20191231 5 ID07_
item_id_01 customer_id_01 sales_date_01 1 line_id_01
item_id_03 NO_DATA sales_date_03 0 line_id_03
item_id_04 NO_DATA sales_date_03 0 line_id_04
item_id_02 customer_id_03 sales_date_04 1 line_id_05
select
* from
v_items callback
sort(1,'asc'
);
item_id art_label dept avg_week_sales sales_price
artA NO_DATA dept_02 0 0
artB the article B dept #2 10 3.200
box1 a box packaging 10 0
item_id_01 NO_DATA dept_01 0 0
--parameter ESCAPE_SEPARATOR_INSIDE_DOUBLE_QUOTE
desc
parameter verbose
callback
where
(1,'ESCAPE_SEPARATOR_INSIDE_DOUBLE_QUOTE'
);
param_name current_value comment is_default default_value
ESCAPE_SEPARATOR_INSIDE_DOUBLE_QUOTE n y/n, y: ,"abc,def", in a csv file with , separator is interpreted as one value: abc,def y n
system '(echo '
'art_label,dept,item_id'
'>/tmp/foo.csv)'
;
reponse
system '(echo '
'"art_label_03,art_label_03",dept_03,"item_id_03,item_id_03"'
'>>/tmp/foo.csv)'
;
reponse
continue_on_error(63);
reponse
success
insert
into
items select
* from
file('/tmp/foo.csv'
);
reponse
error 63 (continue): end of line found where non expected
stop_on_error;
reponse
success
set
escape_separator_inside_double_quote='y'
;
reponse
success
insert
into
items select
* from
file('/tmp/foo.csv'
);
reponse
success
--buffer load for huge files, in this mode SB is not flexible even if HEADER:y
set
file_load_without_buffer='n'
;
reponse
success
system '(echo '
'item_id,art_label,dept,avg_week_sales,sales_price'
'>/tmp/foo.csv)'
;
reponse
system '(echo '
'"item_id_04,item_id_04","art_label_04,art_label_04",dept_04,99,12.123'
'>>/tmp/foo.csv)'
;
reponse
insert
into
items select
* from
file('/tmp/foo.csv'
);
reponse
success
--view new lines
refresh
dirty
view
;
reponse
success
select
sales#item_id,sales#customer_id,sales_date,sales_qty,line_id from
v_sales;
sales#item_id sales#customer_id sales_date sales_qty line_id
artA C1 20191231 5 ID01
artB C2 20200102 6 ID02
artB C1 20191231 4 ID03
artB C2 20200102 7 ID04
artC C1 20200102 8 ID05
artA C1 20191231 5 ID06
artA C2 20191231 5 ID07
artA C2 20191231 5 ID07
artA C2 20191231 5 ID07_
item_id_01 customer_id_01 sales_date_01 1 line_id_01
item_id_02 customer_id_03 sales_date_04 1 line_id_05
item_id_03 NO_DATA sales_date_03 0 line_id_03
item_id_04 NO_DATA sales_date_03 0 line_id_04
select
* from
v_items;
item_id art_label dept avg_week_sales sales_price
artA NO_DATA dept_02 0 0
artB the article B dept #2 10 3.200
box1 a box packaging 10 0
item_id_01 NO_DATA dept_01 0 0
item_id_03,item_id_03 art_label_03,art_label_03 dept_03 0 0
item_id_04,item_id_04 art_label_04,art_label_04 dept_04 99 12.123
bounce;
--
--before situation
select
customer_id, count(*) from
v_sales group by
customer_id;
customer_id count(*)
C1 4
C2 3
desc
table
callback
where
(1,'sales'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 3 n n n
--run delete
delete
sales where
customer_id='C1'
;
reponse
success
desc
table
callback
where
(1,'sales'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 3 y n n
--apply delete
refresh
dirty
view
;
reponse
success
--delete has been applied
select
customer_id, count(*) from
v_sales group by
customer_id;
customer_id count(*)
C2 5
desc
table
callback
where
(1,'sales'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 1 n n n
--nothing to delete case (won't trigger table/view rebuild)
delete
sales where
customer_id='C1'
;
reponse
success
desc
table
callback
where
(1,'sales'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 1 n n n
bounce;
--run select
delete
sales where
customer_id='C1'
;
reponse
success
bounce;
--apply select
refresh
dirty
view
;
reponse
success
--delete is lost
select
customer_id, count(*) from
v_sales group by
customer_id;
customer_id count(*)
C1 4
C2 5
delete
sales where
customer_id='C1'
;
reponse
success
refresh
dirty
table
;
reponse
success
--delete has not been applied
desc
view
callback
where
(1,'v_sales'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 3 y n n 10 0 13 13 0 0
continue_on_error(207);
reponse
success
--#SB no_cache
select
customer_id, count(*) from
v_sales group by
customer_id;
customer_id count(*)
C1 4
C2 5
--but table is updated
desc
table
callback
like(1,'sales'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 1 n n n
sales_#partition#_00001 6 1 n n n
sales_#partition#_00002 6 3 n n n
save
;
reponse
success
bounce;
refresh
dirty
view
;
reponse
success
--delete has been applied
select
customer_id, count(*) from
v_sales group by
customer_id;
customer_id count(*)
C2 5
--
refresh_online
dirty
view
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID11'
,'box1'
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID11'
,'box1'
);
reponse
success
desc
table
callback
like(1,'sales'
) sort(1,'asc'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 1 n n n
sales_#partition#_00001 6 1 n n n
sales_#partition#_00002 6 3 n n n
sales_#partition#_00003 6 2 n n n
refresh_online
dirty
view
;
reponse
success
--a new view is created on top of new data
desc
view
callback
like(1,'sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00003 3 0 y y y 10 0 0 0 1 1
v_sales_#partition#_00003_#refresh_online#_00001 3 2 n n n 10 0 12 12 0 0
--select against v_sales will also be executed against the refresh_online view
select
count(*) from
v_sales;
count(*)
7
--
--let's do it again
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID11'
,'box1'
);
reponse
success
refresh_online
dirty
view
;
reponse
success
--now 2 refresh_online views, the first one is now obsolete (check column mp_r_online_count)
desc
view
callback
like(1,'sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00003 3 0 y y y 10 0 0 0 2 2
v_sales_#partition#_00003_#refresh_online#_00001 3 2 n n n 10 0 12 12 0 0
v_sales_#partition#_00003_#refresh_online#_00002 3 3 n n n 10 0 13 13 0 0
select
count(*) from
v_sales;
count(*)
8
--
--the refresh will recompute the view, and the refresh_online view becomes obsolete
refresh
dirty
view
;
reponse
success
desc
view
callback
like(1,'sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00003 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00003_#refresh_online#_00001 3 2 n n n 10 0 12 12 0 0
v_sales_#partition#_00003_#refresh_online#_00002 3 3 n n n 10 0 13 13 0 0
select
count(*) from
v_sales;
count(*)
8
--
--
--a background thread will clean (drop) the obsolete refresh_online view
--the clean can also be triggered by a refresh_force
refresh_force
dirty
view
;
reponse
success
desc
view
callback
like(1,'sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00003 3 3 n n n 10 0 13 13 0 0
select
count(*) from
v_sales;
count(*)
8
--
desc
table
callback
like(1,'sales'
) sort(1,'asc'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 1 n n n
sales_#partition#_00001 6 1 n n n
sales_#partition#_00002 6 3 n n n
sales_#partition#_00003 6 3 n n n
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID11'
,'box1'
);
reponse
success
desc
table
callback
like(1,'sales'
) sort(1,'asc'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 1 n n n
sales_#partition#_00001 6 1 n n n
sales_#partition#_00002 6 3 n n n
sales_#partition#_00003 6 3 n n n
sales_#partition#_00004 6 1 n n n
set
partition_line_count=1;
reponse
success
desc
parameter callback
where
(1,'PARTITION_LINE_COUNT'
);
param_name current_value comment is_default default_value
PARTITION_LINE_COUNT 1 integer, maximum #line in a table partition (SB will created a new partition during insert if needed), 0 means no partition n 3000000000
--next insert will trigger a new partition
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID11'
,'box1'
);
reponse
success
desc
table
callback
like(1,'sales'
) sort(1,'asc'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 1 n n n
sales_#partition#_00001 6 1 n n n
sales_#partition#_00002 6 3 n n n
sales_#partition#_00003 6 3 n n n
sales_#partition#_00004 6 1 n n n
sales_#partition#_00005 6 1 n n n
refresh
dirty
view
;
reponse
success
--and corresponding view
desc
view
callback
like(1,'sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00003 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00004 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00005 3 1 n n n 10 0 11 11 0 0
--select against v_sales will also be executed against partition
--ACCEPT_DIFF_START
--#SB log_verbose explain
select
count(*) from
v_sales;
logs
--#SB explain
select
count(*) from
v_sales
callback
like(1,'partition'
);
logs
--ACCEPT_DIFF_END
--
bounce;
refresh
dirty
view
;
reponse
success
save
;
reponse
success
--
select
count(*) from
v_sales;
count(*)
5
desc
view
callback
like(1,'sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
--
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID11'
,'box1'
);
reponse
success
desc
view
callback
like(1,'sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00003 3 0 y y y 10 0 0 0 0 0
--I have a refresh_online view on top of the table
refresh_online
dirty
view
;
reponse
success
select
count(*) from
v_sales;
count(*)
6
desc
view
callback
like(1,'sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00003 3 0 y y y 10 0 0 0 1 1
v_sales_#partition#_00003_#refresh_online#_00001 3 1 n n n 10 0 11 11 0 0
--
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID11'
,'box1'
);
reponse
success
set
PARTITION_LINE_COUNT=1;
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID11'
,'box1'
);
reponse
success
desc
table
callback
like(1,'sales'
) sort(1,'asc'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 1 n n n
sales_#partition#_00001 6 1 n n n
sales_#partition#_00002 6 3 n n n
sales_#partition#_00003 6 2 n n n
sales_#partition#_00004 6 1 n n n
refresh_online
dirty
view
;
reponse
success
--I have a refresh_online view on top of the partition
desc
view
callback
like(1,'sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00003 3 0 y y y 10 0 0 0 2 2
v_sales_#partition#_00003_#refresh_online#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00003_#refresh_online#_00002 3 2 n n n 10 0 12 12 0 0
v_sales_#partition#_00004 3 0 y y y 10 0 0 0 1 1
v_sales_#partition#_00004_#refresh_online#_00001 3 1 n n n 10 0 11 11 0 0
select
count(*) from
v_sales;
count(*)
8
--ACCEPT_DIFF_START
--#SB no_cache explain
select
count(*) from
v_sales callback
like(1,'running select'
);
logs
--ACCEPT_DIFF_END
--
refresh
dirty
view
;
reponse
success
select
count(*) from
v_sales;
count(*)
8
desc
view
callback
like(1,'sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00003 3 2 n n n 10 0 12 12 0 0
v_sales_#partition#_00003_#refresh_online#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00003_#refresh_online#_00002 3 2 n n n 10 0 12 12 0 0
v_sales_#partition#_00004 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00004_#refresh_online#_00001 3 1 n n n 10 0 11 11 0 0
--
refresh_force
dirty
view
;
reponse
success
select
count(*) from
v_sales;
count(*)
8
desc
view
callback
like(1,'sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00003 3 2 n n n 10 0 12 12 0 0
v_sales_#partition#_00004 3 1 n n n 10 0 11 11 0 0
--
refresh
dirty
table
refresh
view
view_name
refresh
permission
refresh
computed_column
refresh
cache
refresh
dirty
view
--under construction
--
--I run a refresh_force to clean the old child view (not needed in client/server mode)
--delete sales where regex(item_id,'.*');
refresh_force
dirty
view
;
reponse
success
-- set partition_line_count=100;
select
list(line_id) from
v_sales;
list(line_id)
ID02,ID04,ID07,ID07_,ID11
desc
table
callback
like(1,'sales'
) sort(1,'asc'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 1 n n n
sales_#partition#_00001 6 1 n n n
sales_#partition#_00002 6 3 n n n
sales_#partition#_00003 6 2 n n n
sales_#partition#_00004 6 1 n n n
desc
view
callback
like(1,'v_sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00003 3 2 n n n 10 0 12 12 0 0
v_sales_#partition#_00004 3 1 n n n 10 0 11 11 0 0
--partial delete on big table not allowed by refresh_online
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID12'
,'box1'
)('artA'
,'C1'
,'20191231'
,5,'ID13'
,'box1'
);
reponse
success
delete
sales where
line_id='ID12'
;
reponse
success
continue_on_error(141);
reponse
success
refresh_online
dirty
view
;
reponse
error 141 (continue): "refresh_online dirty views" is not allowed
stop_on_error;
reponse
success
refresh_force
dirty
view
;
reponse
success
select
list(line_id) from
v_sales;
list(line_id)
ID02,ID04,ID07,ID07_,ID11,ID13
desc
view
callback
like(1,'v_sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00003 3 2 n n n 10 0 12 12 0 0
v_sales_#partition#_00004 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00005 3 1 n n n 10 0 11 11 0 0
--full delete on big table is allowed by refresh_online
delete
sales where
regex(item_id,'.*'
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID14'
,'box1'
);
reponse
success
--refresh_online is accepted
refresh_online
dirty
view
;
reponse
success
select
list(line_id) from
v_sales;
list(line_id)
ID14
desc
table
callback
like(1,'sales'
) sort(1,'asc'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 0 n n y
sales_#partition#_00001 6 0 n n y
sales_#partition#_00002 6 0 n n y
sales_#partition#_00003 6 0 n n y
sales_#partition#_00004 6 0 n n y
sales_#partition#_00005 6 1 n n y
desc
view
callback
like(1,'v_sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 y n n 10 0 11 11 1 0
v_sales_#partition#_00001 3 1 y n n 10 0 11 11 1 0
v_sales_#partition#_00001_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00002 3 3 y n n 10 0 13 13 1 0
v_sales_#partition#_00002_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00003 3 2 y n n 10 0 12 12 1 0
v_sales_#partition#_00003_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00004 3 1 y n n 10 0 11 11 1 0
v_sales_#partition#_00004_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00005 3 1 y n n 10 0 11 11 1 1
v_sales_#partition#_00005_#refresh_online#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
--redo
delete
sales where
regex(item_id,'.*'
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID15'
,'box1'
)('artA'
,'C1'
,'20191231'
,5,'ID16'
,'box1'
);
reponse
success
refresh_online
dirty
view
;
reponse
success
select
list(line_id) from
v_sales;
list(line_id)
ID15,ID16
--the main view is flagged as with hide_parent_view
--so the select will not see this view
desc
table
callback
like(1,'sales'
) sort(1,'asc'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 0 n n y
sales_#partition#_00001 6 0 n n y
sales_#partition#_00002 6 0 n n y
sales_#partition#_00003 6 0 n n y
sales_#partition#_00004 6 0 n n y
sales_#partition#_00005 6 2 n n y
desc
view
callback
like(1,'v_sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 y n n 10 0 11 11 2 0
v_sales_#partition#_00001 3 1 y n n 10 0 11 11 2 0
v_sales_#partition#_00001_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00001_#refresh_online#_00002 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00002 3 3 y n n 10 0 13 13 2 0
v_sales_#partition#_00002_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00002_#refresh_online#_00002 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00003 3 2 y n n 10 0 12 12 2 0
v_sales_#partition#_00003_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00003_#refresh_online#_00002 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00004 3 1 y n n 10 0 11 11 2 0
v_sales_#partition#_00004_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00004_#refresh_online#_00002 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00005 3 1 y n n 10 0 11 11 2 2
v_sales_#partition#_00005_#refresh_online#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00005_#refresh_online#_00002 3 2 n n n 10 0 12 12 0 0
v_sales_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#refresh_online#_00002 3 0 n n n 10 0 10 10 0 0
--after bounce, data is still here
save
;
reponse
success
bounce;
desc
table
callback
like(1,'sales'
) sort(1,'asc'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 0 n n y
sales_#partition#_00001 6 0 n n y
sales_#partition#_00002 6 0 n n y
sales_#partition#_00003 6 0 n n y
sales_#partition#_00004 6 0 n n y
sales_#partition#_00005 6 2 n n y
desc
view
callback
like(1,'v_sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 y n n 10 0 11 11 2 0
v_sales_#partition#_00001 3 1 y n n 10 0 11 11 2 0
v_sales_#partition#_00001_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00001_#refresh_online#_00002 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00002 3 3 y n n 10 0 13 13 2 0
v_sales_#partition#_00002_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00002_#refresh_online#_00002 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00003 3 2 y n n 10 0 12 12 2 0
v_sales_#partition#_00003_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00003_#refresh_online#_00002 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00004 3 1 y n n 10 0 11 11 2 0
v_sales_#partition#_00004_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00004_#refresh_online#_00002 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00005 3 1 y n n 10 0 11 11 2 2
v_sales_#partition#_00005_#refresh_online#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00005_#refresh_online#_00002 3 2 n n n 10 0 12 12 0 0
v_sales_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#refresh_online#_00002 3 0 n n n 10 0 10 10 0 0
select
list(line_id) from
v_sales;
list(line_id)
ID15,ID16
--after clear_max_perf the non dirty views are rebuilt
save
;
reponse
success
system './clear_max_perf.sh sure'
;
reponse
sure mode, no confirmation prompt
clearing directory ../STORAGE/MAX_PERF...
done
bounce;
desc
table
callback
like(1,'sales'
) sort(1,'asc'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 0 n n y
sales_#partition#_00001 6 0 n n y
sales_#partition#_00002 6 0 n n y
sales_#partition#_00003 6 0 n n y
sales_#partition#_00004 6 0 n n y
sales_#partition#_00005 6 2 n n y
desc
view
callback
like(1,'v_sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 1 y y y 10 0 0 0 2 0
v_sales_#partition#_00001 3 1 y y y 10 0 0 0 2 0
v_sales_#partition#_00001_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00001_#refresh_online#_00002 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00002 3 3 y y y 10 0 0 0 2 0
v_sales_#partition#_00002_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00002_#refresh_online#_00002 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00003 3 2 y y y 10 0 0 0 2 0
v_sales_#partition#_00003_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00003_#refresh_online#_00002 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00004 3 1 y y y 10 0 0 0 2 0
v_sales_#partition#_00004_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00004_#refresh_online#_00002 3 0 n n n 10 0 10 10 0 0
v_sales_#partition#_00005 3 1 y y y 10 0 0 0 2 2
v_sales_#partition#_00005_#refresh_online#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00005_#refresh_online#_00002 3 2 n n n 10 0 12 12 0 0
v_sales_#refresh_online#_00001 3 0 n n n 10 0 10 10 0 0
v_sales_#refresh_online#_00002 3 0 n n n 10 0 10 10 0 0
select
list(line_id) from
v_sales;
list(line_id)
ID15,ID16
--refresh dirty view cleans everything
refresh_force
dirty
view
;
reponse
success
desc
table
callback
like(1,'sales'
) sort(1,'asc'
);
table_name column_count line_count has_delete has_update parent_view_hidden
sales 6 0 n n n
sales_#partition#_00001 6 0 n n n
sales_#partition#_00002 6 0 n n n
sales_#partition#_00003 6 0 n n n
sales_#partition#_00004 6 0 n n n
sales_#partition#_00005 6 2 n n n
desc
view
callback
like(1,'v_sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 0 n n n 10 0 10 10 2 0
v_sales_#partition#_00001 3 0 n n n 10 0 10 10 2 0
v_sales_#partition#_00001_#refresh_online#_00001 3 0 y n n 10 0 10 10 0 0
v_sales_#partition#_00001_#refresh_online#_00002 3 0 y n n 10 0 10 10 0 0
v_sales_#partition#_00002 3 0 n n n 10 0 10 10 2 0
v_sales_#partition#_00002_#refresh_online#_00001 3 0 y n n 10 0 10 10 0 0
v_sales_#partition#_00002_#refresh_online#_00002 3 0 y n n 10 0 10 10 0 0
v_sales_#partition#_00003 3 0 n n n 10 0 10 10 2 0
v_sales_#partition#_00003_#refresh_online#_00001 3 0 y n n 10 0 10 10 0 0
v_sales_#partition#_00003_#refresh_online#_00002 3 0 y n n 10 0 10 10 0 0
v_sales_#partition#_00004 3 0 n n n 10 0 10 10 2 0
v_sales_#partition#_00004_#refresh_online#_00001 3 0 y n n 10 0 10 10 0 0
v_sales_#partition#_00004_#refresh_online#_00002 3 0 y n n 10 0 10 10 0 0
v_sales_#partition#_00005 3 2 n n n 10 0 12 12 0 0
v_sales_#refresh_online#_00001 3 0 y n n 10 0 10 10 0 0
v_sales_#refresh_online#_00002 3 0 y n n 10 0 10 10 0 0
select
list(line_id) from
v_sales;
list(line_id)
ID15,ID16
-- ###########################
-- RUNNING save.sql
save
;
reponse
success
-- ###########################
-- RUNNING shutdown.sql
shutdown
;
-- ###########################
-- RUNNING stop_on_error.sql
stop_on_error;
reponse
success
-- ###########################
-- RUNNING clear.sh
/*
sure mode, no confirmation prompt
clearing directory INTERNAL_FILES_DIR (../STORAGE/INT) ...
done
clearing directory MAX_PERF_FILES_DIR (../STORAGE/MAX_PERF) ...
done
clearing directory TRANSAC_FILES_DIR (../STORAGE/TRANSAC) ...
done
*/
-- ###########################
-- RUNNING doc_data_init.sql
create
col_type
t_site_id as
text
;
reponse
success
create
col_type
t_dept_id as
text
;
reponse
success
create
col_type
t_item_id as
text
;
reponse
success
create
col_type
t_customer_id as
text
;
reponse
success
create
col_type
t_date as
text
;
reponse
success
create
col_type
t_customer_info as
text
;
reponse
success
create
col_type
end_user
as
text
;
reponse
success
create
merge table
items( item_id t_item_id, art_label text
, dept t_dept_id, avg_week_sales number
, sales_price number
);
reponse
success
create
merge table
customers( customer_id t_customer_id, customer_name text
);
reponse
success
create
table
item_tags( item_id t_item_id, tag text
);
reponse
success
create
table
fidelity_cards( customer_id t_customer_id, card_label t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
table
item_customer_infos( customer_id t_customer_id, item_id t_item_id, info t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
big table
sales( item_id t_item_id, customer_id t_customer_id, sales_date t_date, sales_qty number
, line_id text
, packaging_id t_item_id);
reponse
success
create
big table
inventory( item_id t_item_id, inv_qty number
);
reponse
success
create
view
v_items as
select
* from
items;
reponse
success
create
view
v_item_tags as
select
* from
item_tags;
reponse
success
create
view
v_fidelity_cards as
select
* from
fidelity_cards;
reponse
success
create
view
v_item_customer_infos as
select
* from
item_customer_infos;
reponse
success
create
view
v_sales as
select
* from
sales, items, customers where
items.item_id=sales.item_id and
customers.customer_id=sales.customer_id;
reponse
success
create
view
v_inventory as
select
* from
inventory, items where
items.item_id=inventory.item_id;
reponse
success
insert
into
items values
('artA'
,'the article A'
,'dept #1'
,10,1.5);
reponse
success
insert
into
items values
('artB'
,'the article B'
,'dept #2'
,10,3.2);
reponse
success
insert
into
items values
('box1'
,'a box'
,'packaging'
,10,0);
reponse
success
insert
into
customers values
('C1'
,'customer #1'
)('C2'
,'customer #2'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #1'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #2'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'SILVER'
,'20191201'
,'20191231'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'GOLD'
,'20201201'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artA'
,'FREQUENT BUYER of artA in 2019'
,'20190101'
,'20191231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C2'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID01'
,'box1'
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,6,'ID02'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C1'
,'20191231'
,4,'ID03'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,7,'ID04'
,'box1'
);
reponse
success
insert
into
sales values
('artC'
,'C1'
,'20200102'
,8,'ID05'
,''
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID06'
,'box1'
);
reponse
success
insert
into
sales values
('artA'
,'C2'
,'20191231'
,5,'ID07'
,'box1'
);
reponse
success
insert
into
inventory values
('artA'
,32);
reponse
success
insert
into
inventory values
('artC'
,12);
reponse
success
refresh
dirty
view
;
reponse
success
-- ###########################
-- RUNNING doc_basics.sql
--
--
select
<*|table1.*|col1|table1#col1>, ... from
...
desc
view
;
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_items 1 3 n n n 10 0 13 13 0 0
v_item_tags 1 2 n n n 10 0 12 12 0 0
v_fidelity_cards 1 2 n n n 10 0 12 12 0 0
v_item_customer_infos 1 3 n n n 10 0 13 13 0 0
v_sales 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00001 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00002 3 1 n n n 10 0 11 11 0 0
v_inventory 2 2 n n n 10 0 12 12 0 0
select
* from
v_items;
item_id art_label dept avg_week_sales sales_price
artA the article A dept #1 10 1.500
artB the article B dept #2 10 3.200
box1 a box packaging 10 0
select
* from
v_item_tags;
item_id tag
artA tag #1
artA tag #2
select
* from
v_sales;
item_id customer_id sales_date sales_qty line_id packaging_id item_id art_label dept avg_week_sales sales_price customer_id customer_name
artA C1 20191231 5 ID01 box1 artA the article A dept #1 10 1.500 C1 customer #1
artB C2 20200102 6 ID02 # artB the article B dept #2 10 3.200 C2 customer #2
artB C1 20191231 4 ID03 # artB the article B dept #2 10 3.200 C1 customer #1
artB C2 20200102 7 ID04 box1 artB the article B dept #2 10 3.200 C2 customer #2
# C1 20200102 8 ID05 # # # # 0 0 C1 customer #1
artA C1 20191231 5 ID06 box1 artA the article A dept #1 10 1.500 C1 customer #1
artA C2 20191231 5 ID07 box1 artA the article A dept #1 10 1.500 C2 customer #2
select
sales.* from
v_sales;
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artB C2 20200102 6 ID02 #
artB C1 20191231 4 ID03 #
artB C2 20200102 7 ID04 box1
# C1 20200102 8 ID05 #
artA C1 20191231 5 ID06 box1
artA C2 20191231 5 ID07 box1
--
select
col1, col2, ... from
... group by
col1, col2, ...
group by
clause can be ommited
select
item_id, count(*) from
v_items group by
item_id;
item_id count(*)
artA 1
artB 1
box1 1
select
item_id, count(*) from
v_item_tags group by
item_id;
item_id count(*)
artA 2
select
customer_id, dept, avg_week_sales, count(*) from
v_sales group by
customer_id, dept, avg_week_sales;
customer_id dept avg_week_sales count(*)
C1 dept #1 10 2
C2 dept #2 10 2
C1 dept #2 10 1
C1 # # 1
C2 dept #1 10 1
--group by can be ommited if group by is on text columns
select
customer_id, dept, count(*) from
v_sales;
customer_id dept count(*)
C1 dept #1 2
C2 dept #2 2
C1 dept #2 1
C1 # 1
C2 dept #1 1
--
select
.. from
.. <callback
|cb
> callback_function1 callback_function2 ...
select
customer_id, count(*) from
v_sales group by
customer_id
callback
sort(2,'desc'
);
customer_id count(*)
C1 4
C2 3
select
customer_id, count(*) from
v_sales group by
customer_id
callback
sort(2,'asc'
) limit(1);
customer_id count(*)
C2 3
select
customer_id, count(*) from
v_sales group by
customer_id
cb
sort(2,'asc'
) limit(1);
customer_id count(*)
C2 3
select
customer_id, count(*)as
nb_sales from
v_sales group by
customer_id
cb
sort('nb_sales'
,'asc'
) limit(1);
customer_id nb_sales
C2 3
--
<sum|count|countdistinct|min|max|avg>
--sum (number columns only)
select
customer_id, sum(sales_qty), sum(avg_week_sales) from
v_sales group by
customer_id;
customer_id sum(sales_qty) sum(avg_week_sales)
C1 22 30
C2 18 30
--count
select
customer_id, count(item_id), count(sales_qty), count(avg_week_sales) from
v_sales group by
customer_id;
customer_id count(item_id) count(sales_qty) count(avg_week_sales)
C1 3 4 3
C2 3 3 3
--countdistinct
select
customer_id, countdistinct(item_id), countdistinct(sales_qty), countdistinct(avg_week_sales) from
v_sales group by
customer_id;
customer_id countdistinct(item_id) countdistinct(sales_qty) countdistinct(avg_week_sales)
C1 2 3 1
C2 2 3 1
--min
select
customer_id, min(item_id), min(sales_qty), min(avg_week_sales) from
v_sales group by
customer_id;
customer_id min(item_id) min(sales_qty) min(avg_week_sales)
C1 artA 4 10
C2 artA 5 10
--max
select
customer_id, max(item_id), max(sales_qty), max(avg_week_sales) from
v_sales group by
customer_id;
customer_id max(item_id) max(sales_qty) max(avg_week_sales)
C1 artB 8 10
C2 artB 7 10
--avg (number columns only)
select
customer_id, avg(sales_qty), avg(avg_week_sales) from
v_sales group by
customer_id;
customer_id avg(sales_qty) avg(avg_week_sales)
C1 5.500 10
C2 6 10
--
<unique|countsequence|minstr|maxstr|list|ival|ivallist|ivalbin|p>
select
customer_id, unique(line_id), unique(item_id), unique(customer_id) from
v_sales group by
customer_id;
customer_id unique(line_id) unique(item_id) unique(customer_id)
C1 # artA C1
C2 ID07 # C2
select
customer_id, countsequence(item_id) from
v_sales group by
customer_id;
customer_id countsequence(item_id)
C1 3
C2 2
select
customer_id, minstr(item_id) from
v_sales group by
customer_id;
customer_id minstr(item_id)
C1 artA
C2 artA
select
customer_id, maxstr(item_id) from
v_sales group by
customer_id;
customer_id maxstr(item_id)
C1 artB
C2 artB
select
customer_id, list(item_id) from
v_sales group by
customer_id;
customer_id list(item_id)
C1 artA,artB
C2 artA,artB
select
customer_id, ival(item_id), ival(customer_id) from
v_sales group by
customer_id;
customer_id ival(item_id) ival(customer_id)
C1 11 11
C2 # 12
select
customer_id, ivallist(item_id), ivallist(customer_id) from
v_sales group by
customer_id;
customer_id ivallist(item_id) ivallist(customer_id)
C1 11/12 11
C2 11/12 12
select
customer_id, ivalbin(item_id), ivalbin(customer_id) from
v_sales group by
customer_id;
customer_id ivalbin(item_id) ivalbin(customer_id)
C1 6144 2048
C2 6144 4096
select
customer_id, p(item_id), p(customer_id) from
v_sales group by
customer_id;
customer_id
select
item_id,sales#item_id,items#item_id from
v_sales;
item_id sales#item_id items#item_id
artA artA artA
artB artB artB
artB artB artB
artB artB artB
# artC #
artA artA artA
artA artA artA
select
count(*) from
v_sales and
item_id='artC'
;
count(*)
select
count(*) from
v_sales and
items#item_id='artC'
;
count(*)
select
count(*) from
v_sales and
sales#item_id='artC'
;
count(*)
1
''
insert
into
item_tags values
('artD'
,'A'
'__B'
);
reponse
success
refresh
dirty
view
;
reponse
success
select
* from
v_item_tags where
item_id='artD'
;
item_id tag
artD A'__B
select
* from
v_item_tags where
tag='A'
'__B'
;
item_id tag
artD A'__B
Lua is a progamming langage.
Stormbase is not developped in Lua, it is developped in native C.
But in some cases (callbacks and
where
clause, see here under) Stormbase calls Lua code.
Standard Lua code (provided in Stormbase installer) is located in _FUNCTIONS/stormbase.lua.
Custom Lua code can also be added in _FUNCTIONS/custom.lua.
--utility functions list
desc
function
callback
where
(2,'utility'
);
function_name function_type signature comment
ratio utility ratio( value1, value2) returns value1/value2 or 0 in limit cases (division by zero, nil value)
round utility round( v , n_digits ) returns math.floor(v+0.5), with n_digits precision, n_digits defaults to 0
decode utility decode( x , y , a , b ) returns a in x equals y , b otherwise
least utility least( x , y ) checks that x and y are numbers and returns least
greatest utility greatest( x , y ) checks that x and y are numbers and returns greatest
ceil utility ceil( x ) checks that x is number and calls math.ceil
floor utility floor( x ) checks that x is number and calls math.floor
check utility check( cond ) boolean function
instr utility instr( str1 , str2 ) calls string.find(str1, str2), returns -1 if not found
length utility length( tbl ) return numbers of key of a key/value table
merge utility merge( tbl1, tbl2 ) sums the values of 2 key/value tables
stringify utility stringify( tbl ) stringify a key/value table
join utility join(idx_values, arr_fields, _sep, _type, fn) joins an array _sep, _type and fn are optional
concat utility concat( a , b, c, d, e, f ) returns a .. nvl(b,"") .. nvl(c,"") .. nvl(d,"") .. nvl(e,"") .. nvl(f,"")
nvl utility nvl( a , b ) returns b if a==nil
--callback functions list
desc
function
callback
where
(2,'callback'
);
function_name function_type signature comment
add_number callback add_number( name, str_fn ) adds a number column to resultset
add_text callback add_text( name, str_fn ) adds a text column to resultset
select_from callback select_from( 'group(<column_pos/column_name>) <sum/count/avg/min/max>(<column_pos/column_name>)' ) does a select ... from (the_resultset)
sub_select callback sub_select( 'group(<column_pos/column_name>) <sum/count/avg/min/max>(<column_pos/column_name>)' ) adds a sub select column
where callback where( <column_pos/column_name> , 'value' ) does an "equal where clause" on the resultset
like callback like( <column_pos/column_name> , 'value1!value2' ) does a "like where clause" on the resultset
grep callback grep( 'value1!value2' ) does a "like where clause" on the resultset
limit callback limit( rownum ) limits the resultset to rownum lines
sort callback sort( <column_pos1/column_name1> , <'asc'/'desc'> , <column_pos2/column_name2> , ... ) sorts the resultset
export callback export( path/to/file , header_yn )
open_join callback open_join( 'with1,with2..' , 'join_col1,join_col2..' , 'new_with_name') do an open join on withs
rename callback rename( 'col_name' , 'new_col_name' ) rename a column
keep_columns callback keep_columns( 'col1,col2...' ) keeps column list
select
item_id,customer_id,sum(sales_qty) from
v_sales group by
item_id,customer_id callback
sort(3,'asc'
);
item_id customer_id sum(sales_qty)
artB C1 4
artA C2 5
# C1 8
artA C1 10
artB C2 13
select
item_id,sum(sales_qty) from
v_sales group by
item_id callback
sort(2,'asc'
);
item_id sum(sales_qty)
# 8
artA 15
artB 17
select
customer_id,sum(sales_qty) from
v_sales group by
customer_id callback
sort(2,'asc'
);
customer_id sum(sales_qty)
C2 18
C1 22
select
item_id,customer_id,sales_qty from
v_sales
callback
select_from('group(1) group(2) sum(3)'
) sort(3,'asc'
);
group_item_id group_customer_id sum_sales_qty
artB C1 4
artA C2 5
# C1 8
artA C1 10
artB C2 13
select
item_id,customer_id,sales_qty from
v_sales
callback
select_from('group(1) sum(3)'
) sort(2,'asc'
);
group_item_id sum_sales_qty
# 8
artA 15
artB 17
select
item_id,customer_id,sales_qty from
v_sales
callback
select_from('group(2) sum(3)'
) sort(2,'asc'
);
group_customer_id sum_sales_qty
C2 18
C1 22
select
item_id,customer_id,sales_qty from
v_sales
callback
sub_select('group(1) group(2) sum(3)'
);
item_id customer_id sales_qty group_item_id group_customer_id sum_sales_qty
artA C1 5 artA C1 10
artB C2 6 artB C2 13
artB C1 4 artB C1 4
artB C2 7 artB C2 13
# C1 8 # C1 8
artA C1 5 artA C1 10
artA C2 5 artA C2 5
select
item_id,customer_id,sales_qty from
v_sales
callback
sub_select('group(2) sum(3)'
);
item_id customer_id sales_qty group_customer_id sum_sales_qty
artA C1 5 C1 22
artB C2 6 C2 18
artB C1 4 C1 22
artB C2 7 C2 18
# C1 8 C1 22
artA C1 5 C1 22
artA C2 5 C2 18
select
sales_date, dept, sum(sales_qty) from
v_sales;
sales_date dept sum(sales_qty)
20191231 dept #1 15
20200102 dept #2 13
20191231 dept #2 4
20200102 # 8
select
dept, sum(sales_qty) from
v_sales;
dept sum(sales_qty)
dept #1 15
dept #2 17
# 8
select
sum(sales_qty) from
v_sales;
sum(sales_qty)
40
with
a as
(select
sales_date, dept, sum(sales_qty) as
sales_qty from
v_sales),
b as
(select
dept, sum(sales_qty) as
dept_sales_qty from
v_sales),
c as
(select
sum(sales_qty) as
tot_sales_qty from
v_sales),
select
from
no_view, * function
fn_nothing
cb
open_join('a,b'
,'dept'
,'a_b'
)
open_join('a_b,c'
)
add_number('dept_share'
,'return round(line.sales_qty/line.dept_sales_qty*100,3)'
)
add_number('total_share'
,'return line.sales_qty/line.tot_sales_qty*100'
)
keep_columns('sales_date,dept,sales_qty,dept_sales_qty,dept_share,total_share'
)
;
sales_date dept sales_qty dept_sales_qty dept_share total_share
20200102 # 8 8 100 20
20200102 dept #2 13 17 76.471 32.500
20191231 dept #2 4 17 23.529 10
20191231 dept #1 15 15 100 37.500
...
create
big table
sales( item_id t_item_id, customer_id t_customer_id, sales_date text
, sales_qty number
, line_id text
, packaging_id t_item_id, sales_qty2 number
);
reponse
success
--commit changes
refresh
dirty
view
;
reponse
success
select
sales.* from
v_sales;
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2
artA C1 20191231 5 ID01 box1 0
artB C2 20200102 6 ID02 # 0
artB C1 20191231 4 ID03 # 0
artB C2 20200102 7 ID04 box1 0
# C1 20200102 8 ID05 # 0
artA C1 20191231 5 ID06 box1 0
artA C2 20191231 5 ID07 box1 0
insert
into
sales values
('artA'
,'C2'
,'20191231'
,5,'ID09'
,''
,55);
reponse
success
refresh
dirty
view
;
reponse
success
select
sales.* from
v_sales;
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2
artA C1 20191231 5 ID01 box1 0
artB C2 20200102 6 ID02 # 0
artB C1 20191231 4 ID03 # 0
artB C2 20200102 7 ID04 box1 0
# C1 20200102 8 ID05 # 0
artA C1 20191231 5 ID06 box1 0
artA C2 20191231 5 ID07 box1 0
artA C2 20191231 5 ID09 # 55
create
col_type
t_a as
text
;
reponse
success
create
col_type
t_c as
text
;
reponse
success
create
merge table
dim_a (a t_a, aa text
);
reponse
success
create
big table
foo (a t_a, b number
, c text
);
reponse
success
create
view
v_foo as
select
* from
foo, dim_a where
foo.a=dim_a.a;
reponse
success
refresh
dirty
view
;
reponse
success
insert
into
foo values
('a1'
,1,'c1'
);
reponse
success
insert
into
foo values
('a2'
,2,'c2'
);
reponse
success
insert
into
dim_a values
('a1'
,'aa1'
)('a2'
,'aa2'
)('a3'
,'aa3'
);
reponse
success
refresh
dirty
view
;
reponse
success
select
* from
v_foo;
a b c a aa
a1 1 c1 a1 aa1
a2 2 c2 a2 aa2
--col_type change --> forbidden
continue_on_error(121);
reponse
success
create
big table
foo (a t_a, b number
, c t_c);
reponse
error 121 (continue): changing column type during table alter is not allowed
--col position change and/or add column --> ok
create
big table
foo (c text
, a t_a, b number
, d text
);
reponse
success
desc
table
cb
grep('foo'
);
table_name column_count line_count has_delete has_update parent_view_hidden
foo 3 2 n n n
__NEW_foo__ 4 0 n n n
desc
view
cb
grep('foo'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_foo 2 2 y n n 10 0 12 12 0 0
--view must be recreated because joined columns position have changed, hence the error here under
continue_on_error(43);
reponse
success
refresh
dirty
view
;
reponse
error 43 (continue): joined columns must have same column type
desc
view
cb
grep('foo'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_foo 2 2 y n n 10 0 12 12 0 0
continue_on_error(1);
reponse
success
create
view
v_foo as
select
* from
foo, dim_a where
foo.a=dim_a.a;
reponse
success
refresh
dirty
view
;
reponse
success
desc
table
cb
grep('foo'
);
table_name column_count line_count has_delete has_update parent_view_hidden
foo 4 2 n n n
desc
view
cb
grep('foo'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_foo 2 2 n n n 10 0 12 12 0 0
select
* from
v_foo;
c a b d a aa
c1 a1 1 NO_DATA a1 aa1
c2 a2 2 NO_DATA a2 aa2
insert
into
foo values
('c3'
,'a3'
,3,'d3'
);
reponse
success
insert
into
foo values
('c4'
,'a4'
,4,'d4'
);
reponse
success
refresh
dirty
view
;
reponse
success
select
* from
v_foo;
c a b d a aa
c1 a1 1 NO_DATA a1 aa1
c2 a2 2 NO_DATA a2 aa2
c3 a3 3 d3 a3 aa3
c4 # 4 d4 # #
create
big table
foo (c text
, d text
, b number
);
reponse
success
create
view
v_foo as
select
* from
foo;
reponse
success
refresh
dirty
view
;
reponse
success
select
* from
v_foo;
c d b
c1 NO_DATA 1
c2 NO_DATA 2
c3 d3 3
c4 d4 4
--
select
.. from
...
<where
|and
> where_clause1
and
where_clause2
and
where_clause3
..
column_name='value'
column_name in ('value1'
, 'value2'
, ...)
column_name nin ('value1'
, 'value2'
, ...)
column_name like 'value'
select
* from
v_sales and
dept='dept #1'
;
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2 item_id art_label dept avg_week_sales sales_price customer_id customer_name
artA C1 20191231 5 ID01 box1 0 artA the article A dept #1 10 1.500 C1 customer #1
artA C1 20191231 5 ID06 box1 0 artA the article A dept #1 10 1.500 C1 customer #1
artA C2 20191231 5 ID07 box1 0 artA the article A dept #1 10 1.500 C2 customer #2
artA C2 20191231 5 ID09 # 55 artA the article A dept #1 10 1.500 C2 customer #2
--SB allows you to use "and" instead of where, bacause it is easier
select
* from
v_sales and
dept='dept #1'
;
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2 item_id art_label dept avg_week_sales sales_price customer_id customer_name
artA C1 20191231 5 ID01 box1 0 artA the article A dept #1 10 1.500 C1 customer #1
artA C1 20191231 5 ID06 box1 0 artA the article A dept #1 10 1.500 C1 customer #1
artA C2 20191231 5 ID07 box1 0 artA the article A dept #1 10 1.500 C2 customer #2
artA C2 20191231 5 ID09 # 55 artA the article A dept #1 10 1.500 C2 customer #2
select
* from
v_sales and
dept in ('dept #1'
);
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2 item_id art_label dept avg_week_sales sales_price customer_id customer_name
artA C1 20191231 5 ID01 box1 0 artA the article A dept #1 10 1.500 C1 customer #1
artA C1 20191231 5 ID06 box1 0 artA the article A dept #1 10 1.500 C1 customer #1
artA C2 20191231 5 ID07 box1 0 artA the article A dept #1 10 1.500 C2 customer #2
artA C2 20191231 5 ID09 # 55 artA the article A dept #1 10 1.500 C2 customer #2
select
* from
v_sales and
dept nin ('dept #2'
);
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2 item_id art_label dept avg_week_sales sales_price customer_id customer_name
artA C1 20191231 5 ID01 box1 0 artA the article A dept #1 10 1.500 C1 customer #1
# C1 20200102 8 ID05 # 0 # # # 0 0 C1 customer #1
artA C1 20191231 5 ID06 box1 0 artA the article A dept #1 10 1.500 C1 customer #1
artA C2 20191231 5 ID07 box1 0 artA the article A dept #1 10 1.500 C2 customer #2
artA C2 20191231 5 ID09 # 55 artA the article A dept #1 10 1.500 C2 customer #2
--like is not case sensitive
select
* from
v_sales and
dept like 'DePt #1'
;
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2 item_id art_label dept avg_week_sales sales_price customer_id customer_name
artA C1 20191231 5 ID01 box1 0 artA the article A dept #1 10 1.500 C1 customer #1
artA C1 20191231 5 ID06 box1 0 artA the article A dept #1 10 1.500 C1 customer #1
artA C2 20191231 5 ID07 box1 0 artA the article A dept #1 10 1.500 C2 customer #2
artA C2 20191231 5 ID09 # 55 artA the article A dept #1 10 1.500 C2 customer #2
--
any Lua boolean function
can be used in a where
clause using one and
only one column
--boolean functions list
desc
function
callback
where
(2,'boolean'
);
function_name function_type signature comment
regex boolean regex( str , pattern ) returns true is if string.match( str , pattern ) (upperbase comparaison) more info http://www.lua.org/manual/5.2/manual.html#6.4.1
is_not boolean is_not( boolean_value ) returns not boolean_value
regex_or boolean function regex_or( str , pattern, pattern2, ...) returns regex( str , pattern ) or regex( str , pattern2 ) ...
btw boolean btw( x , y , z ) returns true if y < x and x <=z then (number comparaison)
btwe boolean btwe( x , y , z ) returns true if y <= x and x <= z then (number comparaison)
btwe_s boolean btwe_s( x , y , z ) returns true if y <= x and x <= z then (string comparaison)
nbtwe boolean nbtwe( x , y , z ) not between or equal, returns false if y <= x and x <= z
btwe_or boolean function btwe_or( x , y , z , y2 , z2 ... ) returns btwe( x , y , z ) or btwe( x , y2 , z2 )
nbtwe_and boolean function nbtwe_and( x , y , z , y2 , z2 ... ) returns nbtwe( x , y , z ) and nbtwe( x , y2 , z2 )
gt boolean gt( x , y ) greater than, returns true is x > y (number comparaison)
gte boolean gte_( x , y ) greater than or equal, returns true is x >= y (number comparaison)
lt boolean lt( x , y ) lower than, returns true is x < y (number comparaison)
lte boolean lte( x , y ) lower than or equal, returns true is x <= y (number comparaison)
gt_s boolean gt_s( x , y ) greater than, returns true is x > y (string comparaison)
gte_s boolean gte_s( x , y ) greater than or equal, returns true is x >= y (string comparaison)
lt_s boolean lt_s( x , y ) lower than, returns true is x < y (string comparaison)
lte_s boolean lte_s( x , y ) lower than or equal, returns true is x <= y (string comparaison)
e boolean e( x , y ) equal, returns true is x==y
ne boolean ne( x , y ) not equal, returns true is x!=y
isin boolean isin( x , { val1, val2, .. } ) is in
isnin boolean isnin( x , { val1, val2, .. } ) is not in
mod boolean mod( x , y , z ) (x % y) == z
--
select
.. from
view1
and
view2.col='something'
and
view2!.col='something'
--
--
--exists
select
sales.* from
v_sales
where
v_item_tags.tag='tag #1'
;
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2
artA C1 20191231 5 ID01 box1 0
artA C1 20191231 5 ID06 box1 0
artA C2 20191231 5 ID07 box1 0
artA C2 20191231 5 ID09 # 55
--not exists
select
sales.* from
v_sales
where
v_item_tags!.tag='tag #1'
;
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2
artB C2 20200102 6 ID02 # 0
artB C1 20191231 4 ID03 # 0
artB C2 20200102 7 ID04 box1 0
# C1 20200102 8 ID05 # 0
with
with_name1 ( col_type_check1 ),
with_name2 ( col_type_check2 ),
...
select
.. from
view1, with_name1, with_name2! ..
select
item_id, dept from
v_items cb
sort(1,'asc'
);
item_id dept
artA dept #1
artB dept #2
box1 packaging
--sales in dept #1
with
a as
(select
item_id, count(item_id) from
v_items where
dept='dept #1'
group by
item_id)
select
sales.* from
v_sales, a
;
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2
artA C1 20191231 5 ID01 box1 0
artA C1 20191231 5 ID06 box1 0
artA C2 20191231 5 ID07 box1 0
artA C2 20191231 5 ID09 # 55
--sales not in dept #1
with
a as
(select
item_id, count(item_id) from
v_items where
dept='dept #1'
group by
item_id)
select
sales.* from
v_sales, a!
;
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2
artB C2 20200102 6 ID02 # 0
artB C1 20191231 4 ID03 # 0
artB C2 20200102 7 ID04 box1 0
# C1 20200102 8 ID05 # 0
--using alias
--if the alias can't be found, the "first column with same col_type" rule applies
with
a as
(select
item_id as
columm_that_does_not_exists, count(item_id) from
v_items where
dept='dept #1'
group by
item_id)
select
sales.* from
v_sales, a
;
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2
artA C1 20191231 5 ID01 box1 0
artA C1 20191231 5 ID06 box1 0
artA C2 20191231 5 ID07 box1 0
artA C2 20191231 5 ID09 # 55
--no packaging in dept #1
with
a as
(select
item_id as
packaging_id, count(item_id) from
v_items where
dept='dept #1'
group by
item_id)
select
sales.* from
v_sales, a
;
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2
--sales of items having packaging in dept packaging
with
a as
(select
item_id as
packaging_id, count(item_id) from
v_items where
dept='packaging'
group by
item_id)
select
sales.* from
v_sales, a
;
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2
artA C1 20191231 5 ID01 box1 0
artB C2 20200102 7 ID04 box1 0
artA C1 20191231 5 ID06 box1 0
artA C2 20191231 5 ID07 box1 0
with
with_name1 ( col_type_check1 ),
with_name2 ( select
col, count(col) from
view2, with_name1 where
... group by
col ),
...
select
.. from
view3, with_name2
select
item_id, dept, customer_id, sales_qty from
v_sales;
item_id dept customer_id sales_qty
artA dept #1 C1 5
artB dept #2 C2 6
artB dept #2 C1 4
artB dept #2 C2 7
# # C1 8
artA dept #1 C1 5
artA dept #1 C2 5
artA dept #1 C2 5
select
* from
v_item_tags;
item_id tag
artA tag #1
artA tag #2
artD A'__B
--in which other departments do customers that buy from dept #1, buy from
--the group of customers is "customers that buy from dept #1"
with
group_of_customers as
(select
customer_id, count(customer_id) from
v_sales and
dept='dept #1'
group by
customer_id)
select
dept,count(*),list(customer_id) from
v_sales, group_of_customers and
dept nin ('dept #1'
,null
) group by
dept
;
dept count(*) list(customer_id)
dept #2 3 C1,C2
select
customer_id, count(customer_id) from
v_sales and
dept='dept #1'
group by
customer_id
;
customer_id count(customer_id)
C1 2
C2 2
with
group_of_customers as
(select
customer_id, count(customer_id) from
v_sales and
dept='dept #1'
group by
customer_id)
select
sales.* from
v_sales, group_of_customers and
dept nin ('dept #1'
,null
)
;
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2
artB C2 20200102 6 ID02 # 0
artB C1 20191231 4 ID03 # 0
artB C2 20200102 7 ID04 box1 0
--sames thing but my group of customers is "customers that buy from dept #1" and "buy item with tag #1"
with
items_with_tag1 as
(select
item_id, count(item_id) from
v_item_tags and
tag='tag #1'
group by
item_id),
group_of_customers as
(select
customer_id, count(customer_id) from
v_sales, items_with_tag1 and
dept='dept #1'
group by
customer_id)
select
dept,count(*),list(customer_id) from
v_sales, group_of_customers and
dept nin ('dept #1'
,null
) group by
dept
;
dept count(*) list(customer_id)
dept #2 3 C1,C2
end_user
col_type
create
table
article_permission ( user_name end_user
, art_id t_item_id);
reponse
success
insert
into
article_permission values
('rd'
,'artA'
);
reponse
success
insert
into
article_permission values
('pt'
,'artA'
);
reponse
success
insert
into
article_permission values
('pt'
,'artB'
);
reponse
success
create
view
v_article_permission as
select
* from
article_permission;
reponse
success
create
table
customer_permission ( user_name end_user
, customer_id t_customer_id);
reponse
success
insert
into
customer_permission values
('rd'
,'C1'
);
reponse
success
insert
into
customer_permission values
('pt'
,'*'
);
reponse
success
create
view
v_customer_permission as
select
* from
customer_permission;
reponse
success
refresh
dirty
view
;
reponse
success
select
* from
v_sales where
end_user
='pt'
;
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2 item_id art_label dept avg_week_sales sales_price customer_id customer_name
artA C1 20191231 5 ID01 box1 0 artA the article A dept #1 10 1.500 C1 customer #1
artB C2 20200102 6 ID02 # 0 artB the article B dept #2 10 3.200 C2 customer #2
artB C1 20191231 4 ID03 # 0 artB the article B dept #2 10 3.200 C1 customer #1
artB C2 20200102 7 ID04 box1 0 artB the article B dept #2 10 3.200 C2 customer #2
artA C1 20191231 5 ID06 box1 0 artA the article A dept #1 10 1.500 C1 customer #1
artA C2 20191231 5 ID07 box1 0 artA the article A dept #1 10 1.500 C2 customer #2
artA C2 20191231 5 ID09 # 55 artA the article A dept #1 10 1.500 C2 customer #2
select
* from
v_sales where
end_user
='rd'
;
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2 item_id art_label dept avg_week_sales sales_price customer_id customer_name
artA C1 20191231 5 ID01 box1 0 artA the article A dept #1 10 1.500 C1 customer #1
artA C1 20191231 5 ID06 box1 0 artA the article A dept #1 10 1.500 C1 customer #1
set
log_verbose='n'
;
reponse
success
--`#LOAD#..` and custom callbacks
-- this sql
select
count(*) from
v_sales
cb
add_text('foo'
,'return "hello world"'
)
;
count(*) foo
8 hello world
-- can be written this way using Lua load, usefull when you write complex callbacks
select
count(*) from
v_sales
cb
add_text('foo'
,'return fn()'
)
`#LOAD#
function
fn()
return "hello world"
end
`
;
count(*) foo
8 hello world
--`#LOAD#..` and `#DYN_SQL#..`
-- this sql
select
count(*) from
v_sales
and
item_id='artA'
;
count(*)
4
-- can be written this way with a "dynamic sql" approach
select
count(*) from
v_sales
`#DYN_SQL#fn_where_clause()`
`#LOAD#
function
fn_where_clause()
return "and
item_id='artA'
"
end
`
;
count(*)
4
-- stormbase_replay1.sql and stormbase_replay2.sql
-- when you write dynamic code, at some point you will want to see the "final sql" and maybe replay it for debugging
-- replay files and parameter generate_replay should be used to achieve this
system 'rm -f stormbase_replay*.sql'
;
reponse
set
generate_replay='y'
;
reponse
success
-- this sql has dynamic code and complex custom callbacks
select
count(*) from
v_sales
`#DYN_SQL#fn_where_clause()`
cb
add_text('foo'
,'return fn()'
)
`#LOAD#
function
fn_where_clause()
return "and
item_id='artA'
"
end
function
fn()
return "hello world"
end
`
;
count(*) foo
4 hello world
-- stormbase_replay1.sql contains the initial sql text
system 'cat stormbase_replay1.sql'
;
reponse
select count(*) from v_sales
`#DYN_SQL#fn_where_clause()`
cb add_text('foo','return fn()')
`#LOAD#
function fn_where_clause()
return "and item_id='artA'"
end
function fn()
return "hello world"
end
`
;
system 'cat stormbase_replay1.sql';
-- stormbase_replay2.sql contains the sql after
-- `#LOAD#..` have been loaded in session's context, and have been removed from stormbase_replay2.sql
-- `#LOAD_KEEP#..` have been loaded in session's context, but have *NOT* been removed from stormbase_replay2.sql
-- `#DYN_SQL#..` have been evaluated and replaced in stormbase_replay2.sql
system 'cat stormbase_replay2.sql'
;
reponse
select count(*) from v_sales
and item_id='artA'
cb add_text('foo','return fn()')
;
system 'cat stormbase_replay1.sql';
system 'cat stormbase_replay2.sql';
system 'rm -f stormbase_replay*.sql'
;
reponse
-- so when to use LOAD vs LOAD_KEEP ?
-- it is just a matter of being able to replay, so if sql in stormbase_replay2.sql needs the lua code I must use LOAD_KEEP
-- so this sql would be better this way, even though in terms of execution it is the same
select
count(*) from
v_sales
`#DYN_SQL#fn_where_clause()`
cb
add_text('foo'
,'return fn()'
)
`#LOAD#
function
fn_where_clause()
return "and
item_id='artA'
"
end
`
`#LOAD_KEEP#
function
fn()
return "hello world"
end
`
;
count(*) foo
4 hello world
--this sql can be replayed and does not content useless code
system 'cat stormbase_replay2.sql'
;
reponse
select count(*) from v_sales
and item_id='artA'
cb add_text('foo','return fn()')
`#LOAD_KEEP#
function fn()
return "hello world"
end
`
;
system 'cat stormbase_replay2.sql';
-- comments are allowed in dynamic sql
system 'rm -f stormbase_replay*.sql'
;
reponse
select
count(*) from
v_sales
`#DYN_SQL#fn_where_clause()`
`#LOAD#
function
fn_where_clause()
return "-- and item_id='artA'"
end
`
;
count(*)
8
system 'cat stormbase_replay2.sql'
;
reponse
select count(*) from v_sales
-- and item_id='artA'
;
system 'cat stormbase_replay2.sql';
-- with query, "fn_nothing" syntax (small number of lines)
with
a as
(select
sales.* from
v_sales),
select
from
no_view,* function
fn_nothing
cb
union('a'
)
;
item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2
artA C1 20191231 5 ID01 box1 0
artB C2 20200102 6 ID02 # 0
artB C1 20191231 4 ID03 # 0
artB C2 20200102 7 ID04 box1 0
# C1 20200102 8 ID05 # 0
artA C1 20191231 5 ID06 box1 0
artA C2 20191231 5 ID07 box1 0
artA C2 20191231 5 ID09 # 55
-- using results from previous with
-- v_max_sales_qty is a lua global variable, it can be "called" in the sql using `the_variable`
-- note the tonumber, SB with inject variable with quotes ('8' in this case, hence the need of tonumber)
-- note also that number_column=value can't be used in SB, use e(number_column,value) instead
with
a as
(select
max(sales_qty) as
max_sales_qty from
v_sales cb
add_text('foo'
,'return fn(max_sales_qty)'
)),
b as
(select
sales.* from
v_sales and
e(sales_qty,tonumber(`v_max_sales_qty`))),
select
from
no_view,* function
fn_nothing
cb
open_join('a,b'
)
`#LOAD_KEEP#
v_max_sales_qty="no_set"
function
fn(n)
v_max_sales_qty=n
return "done"
end
`
;
max_sales_qty foo item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2
8 done # C1 20200102 8 ID05 # 0
-- `#DYN_SQL#..` vs `DYN_SQL#..`
-- the same can be done with a dynamic sql syntax
-- `#DYN_SQL#..` can't be used because it is evaluted before sql execution
-- `DYN_SQL#..` (w/o #) should be used instead because it is evaluated before each "with sql" execution
-- therefore results of `DYN_SQL#..` are visible in stormbase.log but not in replay files
with
a as
(select
max(sales_qty) as
max_sales_qty from
v_sales cb
add_text('foo'
,'v_max_sales_qty=max_sales_qty return "foo"'
)),
b as
(`DYN_SQL#fn_sql()`),
select
from
no_view,* function
fn_nothing
cb
open_join('a,b'
)
`#LOAD_KEEP#
v_max_sales_qty="no_set"
function
fn_sql()
return "select
sales.* from
v_sales and
e(sales_qty,"..v_max_sales_qty..")"
end
`
;
max_sales_qty foo item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2
8 foo # C1 20200102 8 ID05 # 0
-- note that comments are not allowed in `DYN_SQL#..`
continue_on_error(151);
reponse
success
with
a as
(select
max(sales_qty) as
max_sales_qty from
v_sales cb
add_text('foo'
,'v_max_sales_qty=max_sales_qty return "foo"'
)),
b as
(`DYN_SQL#fn_sql()`),
select
from
no_view,* function
fn_nothing
cb
open_join('a,b'
)
`#LOAD_KEEP#
v_max_sales_qty="no_set"
function
fn_sql()
return "select
sales.* from
v_sales \n--and e(sales_qty,"..v_max_sales_qty..")"
end
`
;
reponse
error 151 (continue): select on secondary view failed
stop_on_error;
reponse
success
-- debug function
-- you may use lua debug function (taking one string parameter) do display thing in file stormbase.debug
system 'rm -f stormbase.debug'
;
reponse
with
a as
(select
max(sales_qty) as
max_sales_qty from
v_sales cb
add_text('foo'
,'v_max_sales_qty=max_sales_qty return "foo"'
)),
b as
(`DYN_SQL#fn_sql()`),
select
from
no_view,* function
fn_nothing
cb
open_join('a,b'
)
`#LOAD_KEEP#
v_max_sales_qty="no_set"
function
fn_sql()
debug("lua variable v_max_sales_qty value is "..v_max_sales_qty)
debug("lua variable v_max_sales_qty type is "..type(v_max_sales_qty))
return "select
sales.* from
v_sales and
e(sales_qty,"..v_max_sales_qty..")"
end
`
;
max_sales_qty foo item_id customer_id sales_date sales_qty line_id packaging_id sales_qty2
8 foo # C1 20200102 8 ID05 # 0
system 'cat stormbase.debug'
;
reponse
lua variable v_max_sales_qty value is 8
lua variable v_max_sales_qty type is number
-- error 222 and file stormbase.debug
-- if your lua code fails during parsing or during execution, stormbase will return an error 222
-- usefull debuging information will also be displayed in file stormbase.debug
continue_on_error(222);
reponse
success
system 'rm -f stormbase.debug'
;
reponse
with
a as
(select
max(sales_qty) as
max_sales_qty from
v_sales cb
add_text('foo'
,'v_max_sales_qty=max_sales_qty return "foo"'
)),
b as
(`DYN_SQL#fn_sql()`),
select
from
no_view,* function
fn_nothing
cb
open_join('a,b'
)
`#LOAD_KEEP#
v_max_sales_qty="no_set"
function
fn_sql()
-- this will fail
local foo=nil..""
return "select
sales.* from
v_sales and
e(sales_qty,"..v_max_sales_qty..")"
end
`
;
reponse
error 222 (continue): error in dynamic sql (see file stormbase.debug for more info)
stop_on_error;
reponse
success
-- message "[string "..."]:5: attempt to concatenate a nil value" tells you to look at line #5 in lua load section
system 'cat stormbase.debug'
;
reponse
<<<
-- error:
[string "..."]:5: attempt to concatenate a nil value
--
-- lua call:
return fn_sql()
--
-- lua load:
1.
2. v_max_sales_qty="no_set"
3. function fn_sql()
4. -- this will fail
5. local foo=nil..""
6. return "select sales.* from v_sales and e(sales_qty,"..v_max_sales_qty..")"
7. end
8.
--
>>>
-- cache management
-- since v1.17.10_F28 cache is based on the original sqk, so there is nothing to worry about when you do dynamic sql
set
cache
='y'
;
reponse
success
-- sql is executed because it is new
with
a as
(`DYN_SQL#fn_sql()`),
--AA
select
from
no_view,* function
fn_nothing
cb
union('a'
)
`#LOAD#
function
fn_sql()
return "select
unique(item_id) from
v_sales and
item_id='artA'
"
end
`
;
unique(item_id)
artA
-- sql is executed because the `#LOAD#..` has changed
with
a as
(`DYN_SQL#fn_sql()`),
--BB
select
from
no_view,* function
fn_nothing
cb
union('a'
)
`#LOAD#
function
fn_sql()
return "select
unique(item_id) from
v_sales and
item_id='artB'
"
end
`
;
unique(item_id)
artB
save
;
reponse
success
-- ###########################
-- RUNNING shutdown.sql
shutdown
;
-- ###########################
-- RUNNING stop_on_error.sql
stop_on_error;
reponse
success
-- ###########################
-- RUNNING clear.sh
/*
sure mode, no confirmation prompt
clearing directory INTERNAL_FILES_DIR (../STORAGE/INT) ...
done
clearing directory MAX_PERF_FILES_DIR (../STORAGE/MAX_PERF) ...
done
clearing directory TRANSAC_FILES_DIR (../STORAGE/TRANSAC) ...
done
*/
-- ###########################
-- RUNNING doc_data_init.sql
create
col_type
t_site_id as
text
;
reponse
success
create
col_type
t_dept_id as
text
;
reponse
success
create
col_type
t_item_id as
text
;
reponse
success
create
col_type
t_customer_id as
text
;
reponse
success
create
col_type
t_date as
text
;
reponse
success
create
col_type
t_customer_info as
text
;
reponse
success
create
col_type
end_user
as
text
;
reponse
success
create
merge table
items( item_id t_item_id, art_label text
, dept t_dept_id, avg_week_sales number
, sales_price number
);
reponse
success
create
merge table
customers( customer_id t_customer_id, customer_name text
);
reponse
success
create
table
item_tags( item_id t_item_id, tag text
);
reponse
success
create
table
fidelity_cards( customer_id t_customer_id, card_label t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
table
item_customer_infos( customer_id t_customer_id, item_id t_item_id, info t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
big table
sales( item_id t_item_id, customer_id t_customer_id, sales_date t_date, sales_qty number
, line_id text
, packaging_id t_item_id);
reponse
success
create
big table
inventory( item_id t_item_id, inv_qty number
);
reponse
success
create
view
v_items as
select
* from
items;
reponse
success
create
view
v_item_tags as
select
* from
item_tags;
reponse
success
create
view
v_fidelity_cards as
select
* from
fidelity_cards;
reponse
success
create
view
v_item_customer_infos as
select
* from
item_customer_infos;
reponse
success
create
view
v_sales as
select
* from
sales, items, customers where
items.item_id=sales.item_id and
customers.customer_id=sales.customer_id;
reponse
success
create
view
v_inventory as
select
* from
inventory, items where
items.item_id=inventory.item_id;
reponse
success
insert
into
items values
('artA'
,'the article A'
,'dept #1'
,10,1.5);
reponse
success
insert
into
items values
('artB'
,'the article B'
,'dept #2'
,10,3.2);
reponse
success
insert
into
items values
('box1'
,'a box'
,'packaging'
,10,0);
reponse
success
insert
into
customers values
('C1'
,'customer #1'
)('C2'
,'customer #2'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #1'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #2'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'SILVER'
,'20191201'
,'20191231'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'GOLD'
,'20201201'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artA'
,'FREQUENT BUYER of artA in 2019'
,'20190101'
,'20191231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C2'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID01'
,'box1'
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,6,'ID02'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C1'
,'20191231'
,4,'ID03'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,7,'ID04'
,'box1'
);
reponse
success
insert
into
sales values
('artC'
,'C1'
,'20200102'
,8,'ID05'
,''
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID06'
,'box1'
);
reponse
success
insert
into
sales values
('artA'
,'C2'
,'20191231'
,5,'ID07'
,'box1'
);
reponse
success
insert
into
inventory values
('artA'
,32);
reponse
success
insert
into
inventory values
('artC'
,12);
reponse
success
refresh
dirty
view
;
reponse
success
-- ###########################
-- RUNNING doc_commands.sql
--
save
;
reponse
success
--ACCEPT_DIFF_START
--defragmentation did not occur because of the memory_max_gb parameter
system 'grep CELL_STORE ../STORAGE/INT/_DESC'
;
reponse
CELL_STORE:8:7560
system 'ls -l ../STORAGE/INT/CELL*'
;
reponse
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0000.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0001.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0002.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0003.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0004.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0005.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0006.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0007.cell
--force fragmentation
set
memory_max_gb=0;
reponse
success
save
;
reponse
success
--less CELL are needed but the files are still here
system 'grep CELL_STORE ../STORAGE/INT/_DESC'
;
reponse
CELL_STORE:4:3608
system 'ls -l ../STORAGE/INT/CELL*'
;
reponse
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0000.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0001.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0002.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0003.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0004.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0005.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0006.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0007.cell
--next start/bounce will remove them
bounce;
system 'ls -ltr ../STORAGE/INT/CELL*'
;
reponse
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0000.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0001.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0002.cell
-rw-r--r-- 1 philippe staff 1000 16 jan 09:59 ../STORAGE/INT/CELL_BLOCK_0003.cell
--ACCEPT_DIFF_END
select
sales.* from
v_sales;
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artB C2 20200102 6 ID02 #
artB C1 20191231 4 ID03 #
artB C2 20200102 7 ID04 box1
# C1 20200102 8 ID05 #
artA C1 20191231 5 ID06 box1
artA C2 20191231 5 ID07 box1
--
continue_on_error
stop_on_error
continue_on_error(err_code1, err_code2, ...)
create
col_type
foo as
text
;
reponse
success
continue_on_error(11,1);
reponse
success
create
col_type
foo as
text
;
reponse
error 1 (continue): object exists already
c__reate col_type
foo as
text
;
reponse
error 11 (continue): non valid SQL statement
--
start.sh
stop.sh
bounce.sh
show_log.sh
--
i means interactive (the file is executed but you must press enter after each execution)
Note about logging: sql.sh will create
a log file (sql.log), this file is bit different from
stdout, it is used to generate this documentation !!
--
n tells sql.sh to "read but not run"
y tells sql.sh to restart normally
--
something I want to see in sql.log without being executed
it can be on several lines but cannot contain a semi column
--
same as
bounce.sh without killing the process
bounce;
--
same as
stop.sh without killing the process
shutdown
;
-- ###########################
-- RUNNING stop_on_error.sql
stop_on_error;
reponse
success
-- ###########################
-- RUNNING clear.sh
/*
sure mode, no confirmation prompt
clearing directory INTERNAL_FILES_DIR (../STORAGE/INT) ...
done
clearing directory MAX_PERF_FILES_DIR (../STORAGE/MAX_PERF) ...
done
clearing directory TRANSAC_FILES_DIR (../STORAGE/TRANSAC) ...
done
*/
-- ###########################
-- RUNNING doc_data_init.sql
create
col_type
t_site_id as
text
;
reponse
success
create
col_type
t_dept_id as
text
;
reponse
success
create
col_type
t_item_id as
text
;
reponse
success
create
col_type
t_customer_id as
text
;
reponse
success
create
col_type
t_date as
text
;
reponse
success
create
col_type
t_customer_info as
text
;
reponse
success
create
col_type
end_user
as
text
;
reponse
success
create
merge table
items( item_id t_item_id, art_label text
, dept t_dept_id, avg_week_sales number
, sales_price number
);
reponse
success
create
merge table
customers( customer_id t_customer_id, customer_name text
);
reponse
success
create
table
item_tags( item_id t_item_id, tag text
);
reponse
success
create
table
fidelity_cards( customer_id t_customer_id, card_label t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
table
item_customer_infos( customer_id t_customer_id, item_id t_item_id, info t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
big table
sales( item_id t_item_id, customer_id t_customer_id, sales_date t_date, sales_qty number
, line_id text
, packaging_id t_item_id);
reponse
success
create
big table
inventory( item_id t_item_id, inv_qty number
);
reponse
success
create
view
v_items as
select
* from
items;
reponse
success
create
view
v_item_tags as
select
* from
item_tags;
reponse
success
create
view
v_fidelity_cards as
select
* from
fidelity_cards;
reponse
success
create
view
v_item_customer_infos as
select
* from
item_customer_infos;
reponse
success
create
view
v_sales as
select
* from
sales, items, customers where
items.item_id=sales.item_id and
customers.customer_id=sales.customer_id;
reponse
success
create
view
v_inventory as
select
* from
inventory, items where
items.item_id=inventory.item_id;
reponse
success
insert
into
items values
('artA'
,'the article A'
,'dept #1'
,10,1.5);
reponse
success
insert
into
items values
('artB'
,'the article B'
,'dept #2'
,10,3.2);
reponse
success
insert
into
items values
('box1'
,'a box'
,'packaging'
,10,0);
reponse
success
insert
into
customers values
('C1'
,'customer #1'
)('C2'
,'customer #2'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #1'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #2'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'SILVER'
,'20191201'
,'20191231'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'GOLD'
,'20201201'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artA'
,'FREQUENT BUYER of artA in 2019'
,'20190101'
,'20191231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C2'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID01'
,'box1'
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,6,'ID02'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C1'
,'20191231'
,4,'ID03'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,7,'ID04'
,'box1'
);
reponse
success
insert
into
sales values
('artC'
,'C1'
,'20200102'
,8,'ID05'
,''
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID06'
,'box1'
);
reponse
success
insert
into
sales values
('artA'
,'C2'
,'20191231'
,5,'ID07'
,'box1'
);
reponse
success
insert
into
inventory values
('artA'
,32);
reponse
success
insert
into
inventory values
('artC'
,12);
reponse
success
refresh
dirty
view
;
reponse
success
-- ###########################
-- RUNNING doc_contexts.sql
--
contexts are C structure that you can create
and
reuse later in your custom C code embeded in SB
desc
context;
context_name is_drop_yn
system '(echo '
'loop fidelity_cards(customer_id,card_label) function fn_build_idx1;'
' > ./my_contexts.sb)'
;
reponse
system '(echo '
'loop item_customer_infos(customer_id,item_id,info) function fn_build_idx2;'
' >> ./my_contexts.sb)'
;
reponse
system '(echo '
'loop item_customer_infos(customer_id,item_id,info,valid_from,valid_until) function fn_build_idx2_period('
'sales_date'
');'
' >> ./my_contexts.sb)'
;
reponse
./my_contexts.sb
loop fidelity_cards(customer_id,card_label) function fn_build_idx1;
loop item_customer_infos(customer_id,item_id,info) function fn_build_idx2;
loop item_customer_infos(customer_id,item_id,info,valid_from,valid_until) function fn_build_idx2_period(sales_date);
set
init_file_path='./my_contexts.sb'
;
reponse
success
--no context at this step
desc
context;
context_name is_drop_yn
refresh
dirty
view
;
reponse
success
--contexts defined in ./my_contexts.sb are now loaded
desc
context;
context_name is_drop_yn
IDX1#customer_id->card_label n
IDX2#customer_id->item_id->info n
IDX2_PERIOD#customer_id->item_id->sales_date->info n
refresh
dirty
view
;
reponse
success
--contexts defined in ./my_contexts.sb have been re-loaded and previous have been drop
desc
context;
context_name is_drop_yn
IDX1#customer_id->card_label n
IDX2#customer_id->item_id->info n
IDX2_PERIOD#customer_id->item_id->sales_date->info n
-- ###########################
-- RUNNING shutdown.sql
shutdown
;
-- ###########################
-- RUNNING stop_on_error.sql
stop_on_error;
reponse
success
-- ###########################
-- RUNNING clear.sh
/*
sure mode, no confirmation prompt
clearing directory INTERNAL_FILES_DIR (../STORAGE/INT) ...
done
clearing directory MAX_PERF_FILES_DIR (../STORAGE/MAX_PERF) ...
done
clearing directory TRANSAC_FILES_DIR (../STORAGE/TRANSAC) ...
done
*/
-- ###########################
-- RUNNING doc_data_init.sql
create
col_type
t_site_id as
text
;
reponse
success
create
col_type
t_dept_id as
text
;
reponse
success
create
col_type
t_item_id as
text
;
reponse
success
create
col_type
t_customer_id as
text
;
reponse
success
create
col_type
t_date as
text
;
reponse
success
create
col_type
t_customer_info as
text
;
reponse
success
create
col_type
end_user
as
text
;
reponse
success
create
merge table
items( item_id t_item_id, art_label text
, dept t_dept_id, avg_week_sales number
, sales_price number
);
reponse
success
create
merge table
customers( customer_id t_customer_id, customer_name text
);
reponse
success
create
table
item_tags( item_id t_item_id, tag text
);
reponse
success
create
table
fidelity_cards( customer_id t_customer_id, card_label t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
table
item_customer_infos( customer_id t_customer_id, item_id t_item_id, info t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
big table
sales( item_id t_item_id, customer_id t_customer_id, sales_date t_date, sales_qty number
, line_id text
, packaging_id t_item_id);
reponse
success
create
big table
inventory( item_id t_item_id, inv_qty number
);
reponse
success
create
view
v_items as
select
* from
items;
reponse
success
create
view
v_item_tags as
select
* from
item_tags;
reponse
success
create
view
v_fidelity_cards as
select
* from
fidelity_cards;
reponse
success
create
view
v_item_customer_infos as
select
* from
item_customer_infos;
reponse
success
create
view
v_sales as
select
* from
sales, items, customers where
items.item_id=sales.item_id and
customers.customer_id=sales.customer_id;
reponse
success
create
view
v_inventory as
select
* from
inventory, items where
items.item_id=inventory.item_id;
reponse
success
insert
into
items values
('artA'
,'the article A'
,'dept #1'
,10,1.5);
reponse
success
insert
into
items values
('artB'
,'the article B'
,'dept #2'
,10,3.2);
reponse
success
insert
into
items values
('box1'
,'a box'
,'packaging'
,10,0);
reponse
success
insert
into
customers values
('C1'
,'customer #1'
)('C2'
,'customer #2'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #1'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #2'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'SILVER'
,'20191201'
,'20191231'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'GOLD'
,'20201201'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artA'
,'FREQUENT BUYER of artA in 2019'
,'20190101'
,'20191231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C2'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID01'
,'box1'
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,6,'ID02'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C1'
,'20191231'
,4,'ID03'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,7,'ID04'
,'box1'
);
reponse
success
insert
into
sales values
('artC'
,'C1'
,'20200102'
,8,'ID05'
,''
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID06'
,'box1'
);
reponse
success
insert
into
sales values
('artA'
,'C2'
,'20191231'
,5,'ID07'
,'box1'
);
reponse
success
insert
into
inventory values
('artA'
,32);
reponse
success
insert
into
inventory values
('artC'
,12);
reponse
success
refresh
dirty
view
;
reponse
success
-- ###########################
-- RUNNING doc_analytics.sql
--
select
column_name, function_parameters
from
view_name function
function_name(function_arguments)
group by
column_name
fn_having(<count|countdistinct|sum|sumpareto>, operator, value)
select
item_id, count(customer_id), countdistinct(customer_id), sum(sales_qty)
from
v_sales
group by
item_id
callback
sort(4,'desc'
);
item_id count(customer_id) countdistinct(customer_id) sum(sales_qty)
artB 3 2 17
artA 3 2 15
# 1 1 8
select
sum(sales_qty) from
v_sales;
sum(sales_qty)
40
select
item_id, p(customer_id) from
v_sales function
fn_having(count,=,3) group by
item_id;
item_id =3
artA y
artB y
select
item_id, p(customer_id) from
v_sales function
fn_having(countdistinct,>=,2) group by
item_id;
item_id >=2
artA y
artB y
select
item_id, p(sales_qty) from
v_sales function
fn_having(sum,<,17) group by
item_id;
item_id <17.00
artA y
# y
--top items, doing 80% (0.8*50=40) of the total sales
select
item_id, p(sales_qty) from
v_sales function
fn_having(sumpareto,<=,40) group by
item_id;
item_id <=40.00
artA y
artB y
# y
--bottom items, doing the remaning 20% of the total sales
select
item_id, p(sales_qty) from
v_sales function
fn_having(sumpareto,>=,40) group by
item_id;
item_id >=40.00
# y
with
with_name1 as
(
select
group_by_col_name, stored_values
from
view_name function
fn_store ...
group by
group_by_col_name
),
...
select
col_type
.val, with_name1.alias1 ... from
no_view, * function
fn_merge
with
a as
(select
item_id, sum(avg_week_sales) as
v, unique(art_label) as
label from
v_items function
fn_store group by
item_id),
b as
(select
item_id, sum(sales_qty) as
v from
v_sales function
fn_store group by
item_id)
select
col_type
.val,
a.v, a.label,
b.v,
'avg is '
..decode(nvl(a.v,0)>b.v,true,'greater'
,'lower'
)..' than real sales'
as
label,
'previous value is: '
..line.label as
label2,
from
no_view, * function
fn_merge;
col_type.val a.v a.label b.v label label2
# # 8 avg is lower than real sales previous value is: avg is lower than real sales
artA 10 the article A 15 avg is lower than real sales previous value is: avg is lower than real sales
artB 10 the article B 17 avg is lower than real sales previous value is: avg is lower than real sales
box1 10 a box # # previous value is: [string "return 'avg is '..decode(nvl(a.v,0)>b.v,tru..."]:1: attempt to compare nil with number
select
group_by_col_name, p(text_col_name) from
view_name function
fn_build_filter(order,columns_kept)
--sort asc and keep everything by default
select
item_id, p(sales_date) from
v_sales function
fn_build_filter() and
item_id nin (null
) group by
item_id;
item_id 20191231 20200102
artA X _
artB X X
--sort desc
select
item_id, p(sales_date) from
v_sales function
fn_build_filter(desc
) and
item_id nin (null
) group by
item_id;
item_id 20200102 20191231
artA _ X
artB X X
--get the first sales_date by item
select
item_id, p(sales_date) from
v_sales function
fn_build_filter(asc,1) and
item_id nin (null
) group by
item_id;
item_id 20191231 20200102
artA X _
artB X _
select
group_by_col_name, stored_values, p(col_name1), p(col_name2) from
view_name
function
fn_apply_filter(with_alias) ...
group by
group_by_col_name
--last sales date of item artA
select
unique(item_id), sales_date, sum(sales_qty), count(sales_qty) from
v_sales and
item_id='artA'
group by
sales_date callback
sort(2,'desc'
) limit(1);
unique(item_id) sales_date sum(sales_qty) count(sales_qty)
artA 20191231 15 3
--last sales date of item artB
select
unique(item_id), sales_date, sum(sales_qty), count(sales_qty) from
v_sales and
item_id='artB'
group by
sales_date callback
sort(2,'desc'
) limit(1);
unique(item_id) sales_date sum(sales_qty) count(sales_qty)
artB 20200102 13 2
--same in one select using fn_build_filter/fn_apply_filter
--note the utility function stringify
with
a as
(select
item_id, p(sales_date) from
v_sales function
fn_build_filter(desc
,1) and
item_id nin (null
) group by
item_id),
b as
(
select
item_id,
maxstr(sales_date) as
sales_date, sum(sales_qty) as
sales_qty, count(sales_qty) as
count,
p(item_id), p(sales_date),
from
v_sales function
fn_apply_filter(a)
group by
item_id
)
select
col_type
.val, b.sales_date, b.sales_qty, b.count, stringify(b) from
no_view, * function
fn_merge
;
col_type.val b.sales_date b.sales_qty b.count stringify(b)
artA 20191231 15 3 {#lines:3,count:3,sales_qty:15,sales_date:'20191231',}
artB 20200102 13 2 {#lines:2,count:2,sales_qty:13,sales_date:'20200102',}
select
group_by_col_name, p(text_col_name), p(number_col_name)
from
view_name function
fn_pivot(operator, with_alias, custom_function, signature, before_fn, after_fn)
group by
group_by_col_name
-- 1 dimension matrix per item_id
select
p(item_id), p(sales_qty) from
v_sales function
fn_pivot(sum,null
);
null NO_DATA artA artB box1 artC
0 0 0 0 8 0 0 0 0 0 0 15 17 0 0
-- 2 dimensions matrix per customer_id/item_id
select
customer_id, p(item_id), p(sales_qty) from
v_sales function
fn_pivot(sum,null
) group by
customer_id;
customer_id null NO_DATA artA artB box1 artC
C1 0 0 0 0 8 0 0 0 0 0 0 10 4 0 0
C2 0 0 0 0 0 0 0 0 0 0 0 5 13 0 0
-- 2 dimensions matrix per item_id/sales_date
select
item_id, p(sales_date), p(sales_qty) from
v_sales function
fn_pivot(sum,null
) group by
item_id;
item_id null 20191201 20191231 20201201 20201231 20190101 20200101 20200102
artA 0 0 0 0 0 0 0 0 0 0 0 15 0 0 0 0 0
artB 0 0 0 0 0 0 0 0 0 0 0 4 0 0 0 0 13
# 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 8
select
item_id, p(sales_date), p(sales_qty) from
v_sales function
fn_pivot(sumprevious,null
) group by
item_id;
item_id null 20191201 20191231 20201201 20201231 20190101 20200101 20200102
artA 0 0 0 0 0 0 0 0 0 0 0 15 15 15 15 15 15
artB 0 0 0 0 0 0 0 0 0 0 0 4 4 4 4 4 17
# 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 8
select
item_id, p(sales_date), p(sales_qty) from
v_sales function
fn_pivot(sumafter,null
) group by
item_id;
item_id null 20191201 20191231 20201201 20201231 20190101 20200101 20200102
artA 15 15 15 15 15 15 15 15 15 15 15 0 0 0 0 0 0
artB 17 17 17 17 17 17 17 17 17 17 17 13 13 13 13 13 0
# 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 0
select
item_id, p(sales_date), p(sales_qty) from
v_sales function
fn_pivot(last,null
) group by
item_id;
item_id null 20191201 20191231 20201201 20201231 20190101 20200101 20200102
artA 0 0 0 0 0 0 0 0 0 0 0 5 0 0 0 0 0
artB 0 0 0 0 0 0 0 0 0 0 0 4 0 0 0 0 7
# 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 8
-- fn_build_filter+fn_pivot
-- 2 dimensions matrix per item_id/sales_date, the columns are limited to the first sales date of C2/artA
select
minstr(sales_date) from
v_sales and
customer_id='C2'
and
item_id='artA'
;
minstr(sales_date)
20191231
with
a as
(select
p(sales_date) from
v_sales function
fn_build_filter(asc,1) and
customer_id='C2'
and
item_id='artA'
)
b as
(select
item_id, p(sales_date), p(sales_qty) from
v_sales function
fn_pivot(sum,a) group by
item_id)
select
col_type
.val, stringify(b) from
no_view, * function
fn_merge;
col_type.val stringify(b)
{20191231:0,}
artA {20191231:15,}
artB {20191231:4,}
select
* from
no_view function
fn_filter_pivot(with_alias1, with_alias2)
-- fn_build_filter+fn_pivot+fn_filter_pivot
-- build 2 dimensions matrix per item_id/sales_date build only for the first sales date of C2/artA
with
a as
(select
p(sales_date) from
v_sales function
fn_build_filter(asc,1) and
customer_id='C2'
and
item_id='artA'
),
b as
(select
item_id, p(sales_date), p(sales_qty) from
v_sales function
fn_pivot(sum,null
) group by
item_id),
c as
(select
* from
no_view function
fn_filter_pivot(b,a)),
select
col_type
.val, stringify(a), stringify(b), stringify(c) from
no_view, * function
fn_merge;
col_type.val stringify(a) stringify(b) stringify(c)
{20191231:'X',} {:0,20191201:0,20201201:0,20191231:0,20200102:8,20190101:0,20201231:0,null:0,20200101:0,} {20191231:0,}
artA {20191231:'X',} {:0,20191201:0,20201201:0,20191231:15,20200102:0,20190101:0,20201231:0,null:0,20200101:0,} {20191231:15,}
artB {20191231:'X',} {:0,20191201:0,20201201:0,20191231:4,20200102:13,20190101:0,20201231:0,null:0,20200101:0,} {20191231:4,}
-- fn_build_filter+fn_pivot+fn_filter_pivot
-- build a 2 dimensions matrix per item_id/sales_date, then keep only the first sales date of the item for customer C2
with
a as
(select
item_id, p(sales_date) from
v_sales function
fn_build_filter(asc,1) and
customer_id='C2'
group by
item_id),
b as
(select
item_id, p(sales_date), p(sales_qty) from
v_sales function
fn_pivot(sum,null
) group by
item_id),
c as
(select
* from
no_view function
fn_filter_pivot(b,a)),
select
col_type
.val, stringify(a), stringify(b), stringify(c) from
no_view, * function
fn_merge;
col_type.val stringify(a) stringify(b) stringify(c)
{} {:0,20191201:0,20201201:0,20191231:0,20200102:8,20190101:0,20201231:0,null:0,20200101:0,} {}
artA {20191231:'X',20200102:'_',} {:0,20191201:0,20201201:0,20191231:15,20200102:0,20190101:0,20201231:0,null:0,20200101:0,} {20191231:15,20200102:0,}
artB {20191231:'_',20200102:'X',} {:0,20191201:0,20201201:0,20191231:4,20200102:13,20190101:0,20201231:0,null:0,20200101:0,} {20191231:0,20200102:13,}
select
* from
no_view funtion fn_unpivot(<sum|avg>, with_holding_the_fn_pivot, custom_function)
select
sum(sales_qty), count(sales_qty), avg(sales_qty) from
v_sales;
sum(sales_qty) count(sales_qty) avg(sales_qty)
40 7 5.714
select
p(item_id), p(sales_qty) from
v_sales function
fn_pivot(sum,null
);
null NO_DATA artA artB box1 artC
0 0 0 0 8 0 0 0 0 0 0 15 17 0 0
with
a as
(select
p(item_id), p(sales_qty) from
v_sales function
fn_pivot(sum,null
))
b as
(select
* from
no_view function
fn_unpivot(sum,a)),
c as
(select
* from
no_view function
fn_unpivot(avg,a))
select
b.val, c.val, b.val/c.val as
nb_col_in_fn_pivot from
no_view,* function
fn_merge;
b.val c.val nb_col_in_fn_pivot
40 2.667 15
select
customer_id,item_id, sum(sales_qty) from
v_sales group by
customer_id,item_id;
customer_id item_id sum(sales_qty)
C1 artA 10
C2 artB 13
C1 artB 4
C1 # 8
C2 artA 5
with
a as
(select
customer_id, p(item_id), p(sales_qty) from
v_sales function
fn_pivot(sum,null
) group by
customer_id)
b as
(select
* from
no_view function
fn_unpivot(sum,a)),
c as
(select
* from
no_view function
fn_unpivot(avg,a)),
select
col_type
.val, b.val, c.val from
no_view,* function
fn_merge;
col_type.val b.val c.val
C1 22 1.467
C2 18 1.200
-- fn_build_filter+fn_pivot+fn_filter_pivot+fn_unpivot
-- show total sales per item on the date where customer C2 bought it for the first time
with
a as
(select
item_id, p(sales_date) from
v_sales function
fn_build_filter(asc,1) and
customer_id='C2'
group by
item_id),
b as
(select
item_id, p(sales_date), p(sales_qty) from
v_sales function
fn_pivot(sum,null
) group by
item_id),
c as
(select
* from
no_view function
fn_filter_pivot(b,a)),
d as
(select
* from
no_view function
fn_unpivot(sum,c)),
select
col_type
.val, d.val from
no_view, * function
fn_merge;
col_type.val d.val
artA 15
artB 13
select
group_by_col_name, function_parameters from
view_name
function
fn_custom(custom_function, signature, before_fn, after_fn)
group by
group_by_col_name
./_SO_CODE/Z_doc_fn_custom_step1.c
#include
"./common.h"
static
char
*my_column = "my_column"
;
int
fn_one_column_one_line_before(WITH_INFOS* with_infos) {
 with_infos->out_col_count
= 1;
 with_infos->out_types
= malloc
(with_infos->out_col_count
* sizeof
(int
));
 with_infos->out_headers
= malloc
(with_infos->out_col_count
* sizeof
(char
*));
 char
* s = malloc
(strlen(my_column)+1);
 strcpy(s, my_column);
 with_infos->out_headers
[0] = s;
 with_infos->out_types
[0] = ANALYTIC_FLOAT;
 float
*f = malloc
(sizeof
(float
));
 *f = 0.;
 with_infos->out_dyn_fn_context
= f;
 with_infos->out_multi_thread_autorized
= 'n';
 return
OK;
}
int
fn_one_column_one_line_after(WITH_INFOS* with_infos) {
 WITH_RESULT *res = with_infos->current_with_result
;
 float
*f = res->dyn_fn_context
;
 res->lines_float
[0] = malloc
(res->col_count
* sizeof
(float
));
 res->lines_float
[0][0] = *f;
 return
OK;
}
set
SO_FILE_NAME='Z_doc_fn_custom_step1.so'
;
reponse
success
--#SB log_verbose
select
p(sales_qty) from
v_sales function
fn_custom(
'(void* c, U_INT thread_pos, U_INT iline, float num){float *f=c; *f+=num; return 0.;}'
,
'fn_num'
,
fn_one_column_one_line_before,fn_one_column_one_line_after
)
;
my_column
40
select
sum(sales_qty) from
v_sales
;
sum(sales_qty)
40
-- ###########################
-- RUNNING shutdown.sql
shutdown
;
-- ###########################
-- RUNNING stop_on_error.sql
stop_on_error;
reponse
success
-- ###########################
-- RUNNING clear.sh
/*
sure mode, no confirmation prompt
clearing directory INTERNAL_FILES_DIR (../STORAGE/INT) ...
done
clearing directory MAX_PERF_FILES_DIR (../STORAGE/MAX_PERF) ...
done
clearing directory TRANSAC_FILES_DIR (../STORAGE/TRANSAC) ...
done
*/
-- ###########################
-- RUNNING doc_data_init.sql
create
col_type
t_site_id as
text
;
reponse
success
create
col_type
t_dept_id as
text
;
reponse
success
create
col_type
t_item_id as
text
;
reponse
success
create
col_type
t_customer_id as
text
;
reponse
success
create
col_type
t_date as
text
;
reponse
success
create
col_type
t_customer_info as
text
;
reponse
success
create
col_type
end_user
as
text
;
reponse
success
create
merge table
items( item_id t_item_id, art_label text
, dept t_dept_id, avg_week_sales number
, sales_price number
);
reponse
success
create
merge table
customers( customer_id t_customer_id, customer_name text
);
reponse
success
create
table
item_tags( item_id t_item_id, tag text
);
reponse
success
create
table
fidelity_cards( customer_id t_customer_id, card_label t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
table
item_customer_infos( customer_id t_customer_id, item_id t_item_id, info t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
big table
sales( item_id t_item_id, customer_id t_customer_id, sales_date t_date, sales_qty number
, line_id text
, packaging_id t_item_id);
reponse
success
create
big table
inventory( item_id t_item_id, inv_qty number
);
reponse
success
create
view
v_items as
select
* from
items;
reponse
success
create
view
v_item_tags as
select
* from
item_tags;
reponse
success
create
view
v_fidelity_cards as
select
* from
fidelity_cards;
reponse
success
create
view
v_item_customer_infos as
select
* from
item_customer_infos;
reponse
success
create
view
v_sales as
select
* from
sales, items, customers where
items.item_id=sales.item_id and
customers.customer_id=sales.customer_id;
reponse
success
create
view
v_inventory as
select
* from
inventory, items where
items.item_id=inventory.item_id;
reponse
success
insert
into
items values
('artA'
,'the article A'
,'dept #1'
,10,1.5);
reponse
success
insert
into
items values
('artB'
,'the article B'
,'dept #2'
,10,3.2);
reponse
success
insert
into
items values
('box1'
,'a box'
,'packaging'
,10,0);
reponse
success
insert
into
customers values
('C1'
,'customer #1'
)('C2'
,'customer #2'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #1'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #2'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'SILVER'
,'20191201'
,'20191231'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'GOLD'
,'20201201'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artA'
,'FREQUENT BUYER of artA in 2019'
,'20190101'
,'20191231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C2'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID01'
,'box1'
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,6,'ID02'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C1'
,'20191231'
,4,'ID03'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,7,'ID04'
,'box1'
);
reponse
success
insert
into
sales values
('artC'
,'C1'
,'20200102'
,8,'ID05'
,''
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID06'
,'box1'
);
reponse
success
insert
into
sales values
('artA'
,'C2'
,'20191231'
,5,'ID07'
,'box1'
);
reponse
success
insert
into
inventory values
('artA'
,32);
reponse
success
insert
into
inventory values
('artC'
,12);
reponse
success
refresh
dirty
view
;
reponse
success
-- ###########################
-- RUNNING doc_parameters.sql
--
desc
<|parameter> <|verbose
>
--ACCEPT_DIFF_START
--show non default parameter names
desc
callback
where
(2,'parameter'
);
object_name object_type
TCP_PORT_TRANSAC parameter
LOG_VERBOSE parameter
CPU_COUNT parameter
SLEEP_AFTER_SQL parameter
SB_LICENSE_PATH parameter
ARRAY_BLOCK_SIZE_BIG parameter
ARRAY_BLOCK_SIZE_SMALL parameter
PROD_SERVER parameter
MAX_PERF_USE_COMPRESSION parameter
PARETO_LIMIT parameter
PARTITION_LINE_COUNT parameter
CACHE parameter
MAX_PERF_CHANGE_COUNT parameter
ALLOW_WHERE_ERROR parameter
ALLOW_GROUP_BY_ERROR parameter
INIT_FILE_PATH parameter
--show non default parameters information
desc
parameter;
param_name current_value comment is_default default_value
TCP_PORT_TRANSAC 3319 integer, SB listening port for transaction mode n -1
LOG_VERBOSE y y/n, y: increase verbosity in stormbase.log n n
CPU_COUNT 10 integer, number of threads that SB will create for most operations (select, refresh, insert) n 20
SLEEP_AFTER_SQL n y/n, y: a pause of 0.1 sec is added after each sql execution n y
SB_LICENSE_PATH ./fo/foo SB's license key path n ./stormbase.license
ARRAY_BLOCK_SIZE_BIG 2 integer, array size in B, this parameter should not be modified in there is already data n 10000
ARRAY_BLOCK_SIZE_SMALL 2 integer, small array size in B, this parameter should not be modified in there is already data n 100
PROD_SERVER n y/n, n: reduces SB internal memory for non prod server n y
MAX_PERF_USE_COMPRESSION y y/n, y: SB will compress (put several ival in one integer) columns in memory n n
PARETO_LIMIT 80 % float<=1, during max_perf ival are split between rare and often value, the x% more frequent ivals are the often ival n 100 %
PARTITION_LINE_COUNT 3 integer, maximum #line in a table partition (SB will created a new partition during insert if needed), 0 means no partition n 3000000000
CACHE n integer, SB listening port for transaction mode n y
MAX_PERF_CHANGE_COUNT n y/n, y: count becomes countdistinct n y
ALLOW_WHERE_ERROR y y/n, y: where clause on bad column is ignored in select and does not return an error n n
ALLOW_GROUP_BY_ERROR y y/n, y: group by clause on bad column is replaced by * in select resultset and does not return an error n n
INIT_FILE_PATH init.sb string, path of script executed on startup and after "refresh dirty view" n ./init.sb
--show all parameters information
desc
parameter verbose
;
param_name current_value comment is_default default_value
TCP_PORT 2219 integer, SB listening port y 2219
TCP_PORT_TRANSAC 3319 integer, SB listening port for transaction mode n -1
LOG_VERBOSE y y/n, y: increase verbosity in stormbase.log n n
GENERATE_REPLAY n y/n, y: sql are written in stormbase_replay.log y n
CPU_COUNT 10 integer, number of threads that SB will create for most operations (select, refresh, insert) n 20
SLEEP_AFTER_SQL n y/n, y: a pause of 0.1 sec is added after each sql execution n y
ACTIVITY_SECURITY n y/n, y: a dynamic pause (from 0 to 10 sec depending on #active sessions) is added before select execution y n
SESSION_SECURITY y y/n, y: no more connections is accepted if #active sessions >= 30 y y
CSV_FILES_DIR ../STORAGE/CSV string, CSV file location used in "insert from file" when file path does not start with / y ../STORAGE/CSV
INTERNAL_FILES_DIR ../STORAGE/INT string, SB internal storage location y ../STORAGE/INT
TMP_FILES_DIR ../STORAGE/TMP string, SB temporary storage location y ../STORAGE/TMP
MAX_PERF_FILES_DIR ../STORAGE/MAX_PERF string, SB "memory image" storage location y ../STORAGE/MAX_PERF
TRANSAC_FILES_DIR ../STORAGE/TRANSAC string, SB internal storage location for transaction mode y ../STORAGE/TRANSAC
SB_LICENSE_PATH ./fo/foo SB's license key path n ./stormbase.license
CELL_BLOCK_INCREMENT_SIZE_GB 0.10 GB float, CELL size, this parameter should not be modified if there is already data y 0.10 GB
ARRAY_BLOCK_SIZE_BIG 2 integer, array size in B, this parameter should not be modified in there is already data n 10000
ARRAY_BLOCK_SIZE_SMALL 2 integer, small array size in B, this parameter should not be modified in there is already data n 100
MEMORY_MAX_GB 20 integer, SB allows defragmentation if "CELL memory" exceed this number y 20
UNFRAG_BLOCK_PER_BLOCK n y/n, y : SB will defrag block per block hence defrag will be longer but it won't consume more memory y n
SPARSE_TEXT_KEEP 10000000 integer, number of values than can be displayed (last inserted) for sparse_text columns y 10000000
PROD_SERVER n y/n, n: reduces SB internal memory for non prod server n y
IN_MEMORY_BIG_TABLES ,*, string, list (comma separated) of in memory big tables or * y ,*,
NOT_IN_MEMORY_BIG_TABLES ,-1, string, list (comma separated) of "not in memory" big tables y ,-1,
NOT_IN_MEMORY_COLUMNS ,-1, string, list (comma separated) of "not in memory columns" big tables y ,-1,
MAX_PERF_INDEX_FOR_NOT_IN_MEMORY_BIG_TABLES n y/n, y: compute indexes for non in memory tables y n
MAX_PERF_INDEX_ONLY n y/n, y: indexes are in memory but data is on disk y n
NOT_INDEXED_DIMENSIONS ,-1, string, list (comma separated) of dimensions on which indexes won't be created y ,-1,
MAX_PERF_FILTER_COL_TYPE -1 string, col_type used to reduce amount of data in memory (for sandbox environments) y -1
MAX_PERF_FILTER_FN -1 string, Lua boolean function used to reduce amount of data in memory (for sandbox environments) y -1
MAX_PERF_USE_COMPRESSION y y/n, y: SB will compress (put several ival in one integer) columns in memory n n
PARETO_LIMIT 80 % float<=1, during max_perf ival are split between rare and often value, the x% more frequent ivals are the often ival n 100 %
MAX_PERF_USE_IVAL_OFTEN_LIMIT 60000 integer, pareto ival are not computed during max_perf if the ival sparsity exceed this number y 60000
PIN_MEMORY n y/n, y: memory is pin hence virtual memory becomes resident y n
MAX_PERF_COMPUTE_BIG_TABLES_JOIN y y/n, n: joined columns in big tables are not stored in memory y y
MAX_COUNT_DISTINCT_THREAD_SIZE_GB 30 integer, SB will return an error if a countdistinct requires too much memory, the limit is per thread (see CPU_COUNT) y 30
QUERY_TABLE_ALLOWED n y/n, y: select from table allowed (non in memory volatile views will be created on top of each table) y n
SKIP_REFRESH_FOR_NOT_IN_MEMORY_BIG_TABLES n y/n, y: (NOT_)IN_MEMORY_BIG_TABLES parameter applies also to view refresh (not only to MAX_PERF) y n
HEADER y y/n, y: CSV files have a header line y y
FLOAT_PRECISION 3 integer, number of digits kept when a number is inserted y 3
ALLOW_ORPHAN y y/n, y: SB will not refresh a view if its big tables has an orphan value (not found in dimension) y y
NEW_COLUMN_NULL_STRING NO_DATA string, default value when a column is added to a table or when a column is not provided in a CSV file y NO_DATA
FILE_SEPARATOR , one character, CSV file separator y ,
INSERT_FORCE_VALUE.col_type_name forced_value string: forced value during insert of this col_type in a big table, unset means "not active" y forced_value
SHOW_LINE 100000 integer, progression is log every x lines during inserts y 100000
PARTITION_LINE_COUNT 3 integer, maximum #line in a table partition (SB will created a new partition during insert if needed), 0 means no partition n 3000000000
FILE_LOAD_WITHOUT_BUFFER y y/n, y: insert from file does not use buffer, hence it is faster but the full file is loaded in memory y y
ESCAPE_SEPARATOR_INSIDE_DOUBLE_QUOTE n y/n, y: ,"abc,def", in a csv file with , separator is interpreted as one value: abc,def y n
SKIP_LINE_KO n y/n, y: bad lines in csv are skipped, n: bad lines generate error in insert y n
CACHE n integer, SB listening port for transaction mode n y
MAX_PERF_USE_WHERE_INDEX y y/n, y: index are created on dimensions for each view and put in memory y y
USE_INDEX_LIMIT 2 integer (>0 and <100), if a where clause targets less than x% of the lines then index scan is done y 2
USE_INDEX_WITH_SORT_LIMIT 10 integer, if a where clause targets less than #lines/this_parameter of the lines then index scan is done in sequence context y 10
REPLACE_COUNTDISTINCT_BY_COUNTSEQUENCE n y/n, countdistinct are replaced by countsequence for sequence columns y n
MAX_PERF_CHANGE_COUNT n y/n, y: count becomes countdistinct n y
MAX_GBY_BOOSTER_LEN 200000 integer, a select with group by will be computed with optimum performance if the combined sparsity of the group by columns doesn't exceed this number, otherwise it will be slower y 200000
SEQUENCE_COLUMNS ,-1, string, list (comma separated) of sequence columns y ,-1,
SEQUENCE_COLUMN_COMPANIONS ,-1, string, list (comma separated) of sequence columns companions y ,-1,
ALLOW_WHERE_ERROR y y/n, y: where clause on bad column is ignored in select and does not return an error n n
ALLOW_GROUP_BY_ERROR y y/n, y: group by clause on bad column is replaced by * in select resultset and does not return an error n n
ALLOW_EXP_ERROR n y/n, y: sum/count/etc.. on bad column is replaced by 0 in select resultset and does not return an error y n
ACTIVATE_NEW_DEV n y/n, used to test a new dev on demand y n
INIT_FILE_PATH init.sb string, path of script executed on startup and after "refresh dirty view" n ./init.sb
COMPUTED_COLUMNS ,-1, string, list (comma separated) of computed columns y ,-1,
SO_FILE_NAME stormbase.so string, file under _SO_LINUX that contains the custom C code y stormbase.so
DEBUG n y/n, y: triggers debug mode (dev only) y n
CELL_BLOCK_INCREMENT_SIZE_B 1000 B float, CELL size, this parameter should not be modified if there is already data y 1000 B
--ACCEPT_DIFF_END
set
param_name='param_value'
set
param_name=default
set
LOG_VERBOSE='n'
;
reponse
success
desc
parameter verbose
callback
where
(1,'LOG_VERBOSE'
);
param_name current_value comment is_default default_value
LOG_VERBOSE n y/n, y: increase verbosity in stormbase.log y n
set
LOG_VERBOSE='y'
;
reponse
success
desc
parameter verbose
callback
where
(1,'LOG_VERBOSE'
);
param_name current_value comment is_default default_value
LOG_VERBOSE y y/n, y: increase verbosity in stormbase.log n n
set
LOG_VERBOSE=default;
reponse
success
desc
parameter verbose
callback
where
(1,'LOG_VERBOSE'
);
param_name current_value comment is_default default_value
LOG_VERBOSE n y/n, y: increase verbosity in stormbase.log y n
--error management
continue_on_error(202,203,204);
reponse
success
set
LOG_VERBOSE='bad_value'
;
reponse
error 203 (continue): invalid parameter value
set
BAD_PARAMETER='value'
;
reponse
error 202 (continue): invalid parameter name
set
TCP_PORT_TRANSAC='2324'
;
reponse
error 204 (continue): parameter can only be set in stormbase.conf
stop_on_error;
reponse
success
-- ###########################
-- RUNNING shutdown.sql
shutdown
;
-- ###########################
-- RUNNING stop_on_error.sql
stop_on_error;
reponse
success
-- ###########################
-- RUNNING clear.sh
/*
sure mode, no confirmation prompt
clearing directory INTERNAL_FILES_DIR (../STORAGE/INT) ...
done
clearing directory MAX_PERF_FILES_DIR (../STORAGE/MAX_PERF) ...
done
clearing directory TRANSAC_FILES_DIR (../STORAGE/TRANSAC) ...
done
*/
-- ###########################
-- RUNNING doc_data_init.sql
create
col_type
t_site_id as
text
;
reponse
success
create
col_type
t_dept_id as
text
;
reponse
success
create
col_type
t_item_id as
text
;
reponse
success
create
col_type
t_customer_id as
text
;
reponse
success
create
col_type
t_date as
text
;
reponse
success
create
col_type
t_customer_info as
text
;
reponse
success
create
col_type
end_user
as
text
;
reponse
success
create
merge table
items( item_id t_item_id, art_label text
, dept t_dept_id, avg_week_sales number
, sales_price number
);
reponse
success
create
merge table
customers( customer_id t_customer_id, customer_name text
);
reponse
success
create
table
item_tags( item_id t_item_id, tag text
);
reponse
success
create
table
fidelity_cards( customer_id t_customer_id, card_label t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
table
item_customer_infos( customer_id t_customer_id, item_id t_item_id, info t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
big table
sales( item_id t_item_id, customer_id t_customer_id, sales_date t_date, sales_qty number
, line_id text
, packaging_id t_item_id);
reponse
success
create
big table
inventory( item_id t_item_id, inv_qty number
);
reponse
success
create
view
v_items as
select
* from
items;
reponse
success
create
view
v_item_tags as
select
* from
item_tags;
reponse
success
create
view
v_fidelity_cards as
select
* from
fidelity_cards;
reponse
success
create
view
v_item_customer_infos as
select
* from
item_customer_infos;
reponse
success
create
view
v_sales as
select
* from
sales, items, customers where
items.item_id=sales.item_id and
customers.customer_id=sales.customer_id;
reponse
success
create
view
v_inventory as
select
* from
inventory, items where
items.item_id=inventory.item_id;
reponse
success
insert
into
items values
('artA'
,'the article A'
,'dept #1'
,10,1.5);
reponse
success
insert
into
items values
('artB'
,'the article B'
,'dept #2'
,10,3.2);
reponse
success
insert
into
items values
('box1'
,'a box'
,'packaging'
,10,0);
reponse
success
insert
into
customers values
('C1'
,'customer #1'
)('C2'
,'customer #2'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #1'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #2'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'SILVER'
,'20191201'
,'20191231'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'GOLD'
,'20201201'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artA'
,'FREQUENT BUYER of artA in 2019'
,'20190101'
,'20191231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C2'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID01'
,'box1'
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,6,'ID02'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C1'
,'20191231'
,4,'ID03'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,7,'ID04'
,'box1'
);
reponse
success
insert
into
sales values
('artC'
,'C1'
,'20200102'
,8,'ID05'
,''
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID06'
,'box1'
);
reponse
success
insert
into
sales values
('artA'
,'C2'
,'20191231'
,5,'ID07'
,'box1'
);
reponse
success
insert
into
inventory values
('artA'
,32);
reponse
success
insert
into
inventory values
('artC'
,12);
reponse
success
refresh
dirty
view
;
reponse
success
-- ###########################
-- RUNNING doc_others.sql
--
desc
parameter verbose
cb
where
(1,'QUERY_TABLE_ALLOWED'
);
param_name current_value comment is_default default_value
QUERY_TABLE_ALLOWED n y/n, y: select from table allowed (non in memory volatile views will be created on top of each table) y n
continue_on_error(49);
reponse
success
select
* from
sales;
reponse
error 49 (continue): <text> is not a view
stop_on_error;
reponse
success
--the volatile views used to query table are created at startup, hence SB must be saved/bounced to take into account QUERY_TABLE_ALLOWED
system '(echo "QUERY_TABLE_ALLOWED:y">>stormbase.conf)'
;
reponse
save
;
reponse
success
bounce;
desc
parameter verbose
cb
where
(1,'QUERY_TABLE_ALLOWED'
);
param_name current_value comment is_default default_value
QUERY_TABLE_ALLOWED y y/n, y: select from table allowed (non in memory volatile views will be created on top of each table) n n
--volatile views are created
desc
view
cb
like(1,'volatile'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
volatile#items 1 3 n y y 10 0 0 0 0 0
volatile#customers 1 2 n y y 10 0 0 0 0 0
volatile#item_tags 1 2 n y y 10 0 0 0 0 0
volatile#fidelity_cards 1 2 n y y 10 0 0 0 0 0
volatile#item_customer_infos 1 3 n y y 10 0 0 0 0 0
volatile#sales 1 3 n y y 10 0 0 0 0 0
volatile#inventory 1 2 n y y 10 0 0 0 0 0
volatile#sales_#partition#_00001 1 3 n y y 10 0 0 0 0 0
volatile#sales_#partition#_00002 1 1 n y y 10 0 0 0 0 0
select
* from
sales;
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artB C2 20200102 6 ID02 #
artB C1 20191231 4 ID03 #
artB C2 20200102 7 ID04 box1
artC C1 20200102 8 ID05 #
artA C1 20191231 5 ID06 box1
artA C2 20191231 5 ID07 box1
select
sales.* from
v_sales;
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artB C2 20200102 6 ID02 #
artB C1 20191231 4 ID03 #
artB C2 20200102 7 ID04 box1
# C1 20200102 8 ID05 #
artA C1 20191231 5 ID06 box1
artA C2 20191231 5 ID07 box1
set
skip_refresh_for_not_in_memory_big_tables='y'
;
reponse
success
set
in_memory_big_tables='-1'
;
reponse
success
refresh_force
dirty
view
;
reponse
success
select
sales.* from
v_sales;
item_id customer_id sales_date sales_qty line_id packaging_id
select
* from
sales;
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artB C2 20200102 6 ID02 #
artB C1 20191231 4 ID03 #
artB C2 20200102 7 ID04 box1
artC C1 20200102 8 ID05 #
artA C1 20191231 5 ID06 box1
artA C2 20191231 5 ID07 box1
save
;
reponse
success
--volatile views are not saved, they are created at startup (or during table creation)
system 'ls -ltr ../STORAGE/INT/*|grep volatile|wc -l'
;
reponse
0
system 'cat ../STORAGE/INT/_DESC|grep volatile|wc -l'
;
reponse
0
system '(echo "QUERY_TABLE_ALLOWED:n">>stormbase.conf)'
;
reponse
save
;
reponse
success
bounce;
set
'INSERT_FORCE_VALUE.t_item_id'
='-1'
;
reponse
success
desc
parameter verbose
cb
like(1,'INSERT_FORCE_VALUE.T_ITEM_ID'
);
param_name current_value comment is_default default_value
INSERT_FORCE_VALUE.T_ITEM_ID -1 string: forced value during insert of this col_type in a big table, unset means "not active" y -1
insert
into
sales values
('artZ'
,'C2'
,'20191231'
,5,'ID_TEST'
,'box1'
);
reponse
success
refresh
dirty
view
;
reponse
success
select
sales#item_id from
v_sales and
line_id='ID_TEST'
;
sales#item_id
-1
set
'INSERT_FORCE_VALUE.t_item_id'
='unset'
;
reponse
success
desc
parameter verbose
cb
like(1,'INSERT_FORCE_VALUE.T_ITEM_ID'
);
param_name current_value comment is_default default_value
INSERT_FORCE_VALUE.T_ITEM_ID unset string: forced value during insert of this col_type in a big table, unset means "not active" n -1
insert
into
sales values
('artZ'
,'C2'
,'20191231'
,5,'ID_TEST'
,'box1'
);
reponse
success
refresh
dirty
view
;
reponse
success
select
sales#item_id from
v_sales and
line_id='ID_TEST'
;
sales#item_id
-1
artZ
loop table_name(columns) function
sb_export('path/to/directory'
,'file_tag'
,thread_count) ...
loop sales(*) function
sb_export('./foo'
,'ABC'
,4);
LOOP_RESULT
DONE
--ACCEPT_DIFF_START
system 'for f in `ls ./foo/*.csv`; do echo "### $f ###"; cat $f; done'
;
reponse
### ./foo/ABC_001.csv ###
item_id,customer_id,sales_date,sales_qty,line_id,packaging_id
artB,C2,20200102,6.000,ID02,
artB,C2,20200102,7.000,ID04,box1
artC,C1,20200102,8.000,ID05,
artA,C2,20191231,5.000,ID07,box1
-1,C2,20191231,5.000,ID_TEST,-1
### ./foo/ABC_002.csv ###
item_id,customer_id,sales_date,sales_qty,line_id,packaging_id
artB,C1,20191231,4.000,ID03,
artA,C1,20191231,5.000,ID06,box1
### ./foo/ABC_003.csv ###
item_id,customer_id,sales_date,sales_qty,line_id,packaging_id
artA,C1,20191231,5.000,ID01,box1
artZ,C2,20191231,5.000,ID_TEST,box1
### ./foo/ABC_004.csv ###
item_id,customer_id,sales_date,sales_qty,line_id,packaging_id
### ./foo/aa_001.csv ###
col
a1
a2
a3
### ./foo/bb_001.csv ###
col
a1
a2
a3
--ACCEPT_DIFF_END
continue_on_error(20);
reponse
success
drop view
v_foo;
reponse
error 20 (continue): "<object_name>" is not an object or "<type_name>" is not correct
drop table
foo;
reponse
error 20 (continue): "<object_name>" is not an object or "<type_name>" is not correct
refresh
dirty
view
;
reponse
success
stop_on_error;
reponse
success
create
table
foo(col number
,str text
);
reponse
success
create
view
v_foo as
select
* from
foo;
reponse
success
refresh
dirty
view
;
reponse
success
insert
into
foo values
(111.11,'A'
)(222.22,'B'
);
reponse
success
refresh
dirty
view
;
reponse
success
select
* from
v_foo;
col str
111.110 A
222.220 B
desc
table
foo;
table_name column_name column_type col_type_name
foo col number sys#type#foo#col
foo str text sys#type#foo#str
set_text sys#type#foo#col;
reponse
success
desc
table
foo;
table_name column_name column_type col_type_name
foo col text sys#type#foo#col
foo str text sys#type#foo#str
select
* from
v_foo;
col str
111.110 A
222.220 B
insert
into
foo values
('True'
,'C'
);
reponse
success
refresh
dirty
view
;
reponse
success
select
* from
v_foo;
col str
111.110 A
222.220 B
True C
-- create table foo(a text, b number);
-- create view v_foo as select * from foo;
--ival behaves "normaly"
insert
into
foo values
('aa3'
,'bb3'
);
reponse
success
insert
into
foo values
('aa3'
,'bb3'
);
reponse
success
insert
into
foo values
('aa3'
,'bb3'
);
reponse
success
refresh
dirty
view
;
reponse
success
select
col,ival(col),count(*) from
v_foo group by
col;
col ival(col) count(*)
111.110 10 1
222.220 11 1
True 12 1
aa3 13 3
set_sparse_text sys#type#foo#col;
reponse
success
select
col,ival(col),count(*) from
v_foo group by
col;
col ival(col) count(*)
111.110 10 1
222.220 11 1
True 12 1
aa3 13 3
save
;
reponse
success
--ACCEPT_DIFF_START
system 'ls -l ../STORAGE/INT/*.cell_array'
;
reponse
ls: ../STORAGE/INT/*.cell_array: No such file or directory
--bounce will finalize the migration
bounce;
system 'ls -l ../STORAGE/INT/*.cell_array'
;
reponse
-rw-r--r-- 1 philippe staff 117 16 jan 10:00 ../STORAGE/INT/0113_sys#type#foo#col.ival_to_val.cell_array
--ACCEPT_DIFF_END
system 'cat ../STORAGE/INT/*.cell_array'
;
reponse
0;0.000
1;0.000
2;0.000
3;0.000
4;0.000
5;0.000
6;0.000
7;0.000
8;0.000
9;0.000
10;111.110
11;222.220
12;True
13;aa3
--
select
col,ival(col),count(*) from
v_foo group by
col;
col ival(col) count(*)
111.110 10 1
222.220 11 1
True 12 1
aa3 13 3
--each insert gets a new ival
insert
into
foo values
('aa3'
,'bb3'
)('aa3'
,'bb3'
);
reponse
success
insert
into
foo values
('aa4'
,'bb4'
)('aa4'
,'bb4'
);
reponse
success
refresh
dirty
view
;
reponse
success
select
col,ival(col),count(*) from
v_foo group by
col;
col ival(col) count(*)
111.110 10 1
222.220 11 1
True 12 1
aa3 13 3
aa3 14 1
aa3 15 1
aa4 16 1
aa4 17 1
save
;
reponse
success
--ACCEPT_DIFF_START
system 'ls -l ../STORAGE/INT/*.cell_array'
;
reponse
-rw-r--r-- 1 philippe staff 145 16 jan 10:00 ../STORAGE/INT/0113_sys#type#foo#col.ival_to_val.cell_array
system 'cat ../STORAGE/INT/*.cell_array'
;
reponse
0;0.000
1;0.000
2;0.000
3;0.000
4;0.000
5;0.000
6;0.000
7;0.000
8;0.000
9;0.000
10;111.110
11;222.220
12;True
13;aa3
14;aa3
15;aa3
16;aa4
17;aa4
--ACCEPT_DIFF_END
loop sales(sales_date,sales_qty) function
fn_compute_info('my_info'
);
column_name abs_avg abs_min abs_max
sales_date nan nan nan
sales_qty 5.56 4.00 8.00
select
sum(sales_qty),count(sales_qty),avg(sales_qty) from
v_sales;
sum(sales_qty) count(sales_qty) avg(sales_qty)
15 3 5
continue_on_error(53,120,150);
reponse
success
select
sales.* from
v_sales and
bad_column='1'
and
item_id='artA'
;
item_id customer_id sales_date sales_qty line_id packaging_id
artA C2 20191231 5 ID07 box1
select
* from
v_items and
v_fidelity_cards.card_label='bad_value'
;
item_id art_label dept avg_week_sales sales_price
artA the article A dept #1 10 1.500
artB the article B dept #2 10 3.200
box1 a box packaging 10 0
with
a as
(select
card_label,count(card_label) from
v_fidelity_cards group by
card_label) select
* from
v_items,a;
item_id art_label dept avg_week_sales sales_price
artA the article A dept #1 10 1.500
artB the article B dept #2 10 3.200
box1 a box packaging 10 0
stop_on_error;
reponse
success
set
allow_where_error='y'
;
reponse
success
select
sales.* from
v_sales and
bad_column='1'
and
item_id='artA'
;
item_id customer_id sales_date sales_qty line_id packaging_id
artA C2 20191231 5 ID07 box1
select
sales.* from
v_sales and
bad_column in ('1'
,'2'
,'3'
) and
item_id='artA'
;
item_id customer_id sales_date sales_qty line_id packaging_id
artA C2 20191231 5 ID07 box1
select
sales.* from
v_sales and
gt(bad_column,1) and
item_id='artA'
;
item_id customer_id sales_date sales_qty line_id packaging_id
artA C2 20191231 5 ID07 box1
select
sales.* from
v_sales and
bad_column in ('1'
,'2'
,'3'
) and
item_id='artA'
and
bad_column in ('1'
,'2'
,'3'
) and
item_id='artA'
and
gt(bad_column,1) and
item_id='artA'
;
item_id customer_id sales_date sales_qty line_id packaging_id
artA C2 20191231 5 ID07 box1
select
* from
v_items and
bad_column='bad_value'
;
item_id art_label dept avg_week_sales sales_price
artA the article A dept #1 10 1.500
artB the article B dept #2 10 3.200
box1 a box packaging 10 0
select
* from
v_items and
v_fidelity_cards.card_label='bad_value'
;
item_id art_label dept avg_week_sales sales_price
artA the article A dept #1 10 1.500
artB the article B dept #2 10 3.200
box1 a box packaging 10 0
with
a as
(select
card_label,count(card_label) from
v_fidelity_cards group by
card_label) select
* from
v_items,a;
item_id art_label dept avg_week_sales sales_price
artA the article A dept #1 10 1.500
artB the article B dept #2 10 3.200
box1 a box packaging 10 0
continue_on_error(53);
reponse
success
select
bad_column,count(*) from
v_sales group by
bad_column;
bad_column count(*)
* 3
stop_on_error;
reponse
success
set
allow_group_by_error='y'
;
reponse
success
select
count(*) from
v_sales;
count(*)
3
select
bad_column,count(*) from
v_sales group by
bad_column;
bad_column count(*)
* 3
select
item_id,bad_column,item_id,count(*) from
v_sales group by
item_id,bad_column,item_id;
item_id bad_column item_id count(*)
artA * artA 1
# * # 2
continue_on_error(53);
reponse
success
select
sum(sales_qty),sum(bad_column) from
v_sales;
reponse
error 53 (continue): <column_name> not part of <view_name> definition
stop_on_error;
reponse
success
set
allow_exp_error='y'
;
reponse
success
select
sum(sales_qty),sum(bad_column) from
v_sales;
sum(sales_qty) sum(bad_column)
15 0
--only one sb_parallel clause is authorized
continue_on_error(206);
reponse
success
select
sales.* from
v_sales
and
sb_parallel(item_id)
and
sb_parallel(sales_date);
reponse
error 206 (continue): only one sb_parallel clause is allowed
stop_on_error;
reponse
success
set
pareto_limit='1'
;
reponse
success
refresh_force
dirty
view
;
reponse
success
select
sales.* from
v_sales
and
sb_parallel(item_id);
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artB C2 20200102 6 ID02 #
artB C1 20191231 4 ID03 #
artA C1 20191231 5 ID06 box1
artB C2 20200102 7 ID04 box1
artA C2 20191231 5 ID07 box1
--without the sb_parallel there is no logic between item_id and thread_pos
select
p(item_id) from
v_sales function
fn_custom(
'(void* c, U_INT thread_pos, U_INT iline, int key1){printf("## item_id ival: %d, thread_pos: %u\n",key1,thread_pos);return 0.;}'
,
'fn_key'
,
'fn_hello_world_before'
,'fn_hello_world_after'
)
;
item_id ival: 11, thread_pos: 0
item_id ival: 12, thread_pos: 0
item_id ival: 12, thread_pos: 0
item_id ival: 12, thread_pos: 0
item_id ival: 11, thread_pos: 0
item_id ival: 11, thread_pos: 0
txt
hello world
--with the sb_parallel one item_id receives one thread_pos
select
p(item_id) from
v_sales function
fn_custom(
'(void* c, U_INT thread_pos, U_INT iline, int key1){printf("## item_id ival: %d, thread_pos: %u\n",key1,thread_pos);return 0.;}'
,
'fn_key'
,
'fn_hello_world_before'
,'fn_hello_world_after'
)
and
sb_parallel(item_id)
;
item_id ival: 11, thread_pos: 0
item_id ival: 12, thread_pos: 0
item_id ival: 12, thread_pos: 0
item_id ival: 11, thread_pos: 0
item_id ival: 12, thread_pos: 0
item_id ival: 11, thread_pos: 0
txt
hello world
loop sales(sales_date,sales_qty) function
fn_compute_info('my_info'
);
column_name abs_avg abs_min abs_max
sales_date nan nan nan
sales_qty 5.56 4.00 8.00
select
sales_qty from
v_sales callback
sort(1,'desc'
);
sales_qty
8
7
6
5
5
5
5
5
4
with
a as
(select
avg(sales_qty) as
v from
v_sales function
fn_store),
del_count as
(select
count(sales_qty) as
v from
v_sales function
fn_store and
gte(math.abs(sales_qty),`a.v*1.1`)),
kept_count as
(select
count(sales_qty) as
v from
v_sales function
fn_store and
lt(math.abs(sales_qty),`a.v*1.1`)),
select
del_count.v, kept_count.v, a.v*1.1 from
no_view, * function
fn_merge
;
del_count.v kept_count.v a.v*1.1
2 7 6.111
loop sales(sales_qty) function
fn_smart_delete('my_info'
,'1.1'
) update
(item_id);
LOOP_RESULT
DONE
refresh
dirty
view
;
reponse
success
select
sales_qty from
v_sales callback
sort(1,'desc'
);
sales_qty
6
5
5
5
5
5
4
--
refresh
a particular view
set_dirty <the_view>
desc
view
callback
like(1,'v_sales'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
set_dirty v_sales;
reponse
success
desc
view
callback
like(1,'v_sales'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 3 y n n 10 0 13 13 0 0
v_sales_#partition#_00001 3 1 y n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 y n n 10 0 13 13 0 0
refresh
dirty
view
;
reponse
success
desc
view
callback
like(1,'v_sales'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
--but in partition context you must specify the partitions
save
;
reponse
success
set
PARTITION_LINE_COUNT=1;
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'_'
,'box1'
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'_'
,'box1'
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'_'
,'box1'
);
reponse
success
refresh
dirty
view
;
reponse
success
desc
view
callback
like(1,'v_sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00003 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00004 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00005 3 1 n n n 10 0 11 11 0 0
set_dirty v_sales;
reponse
success
set_dirty v_sales_#partition#_00002;
reponse
success
desc
view
callback
like(1,'v_sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 3 y n n 10 0 13 13 0 0
v_sales_#partition#_00001 3 1 y n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 y n n 10 0 13 13 0 0
v_sales_#partition#_00003 3 1 y n n 10 0 11 11 0 0
v_sales_#partition#_00004 3 1 y n n 10 0 11 11 0 0
v_sales_#partition#_00005 3 1 y n n 10 0 11 11 0 0
refresh
dirty
view
;
reponse
success
desc
view
callback
like(1,'v_sales'
) sort(1,'asc'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00003 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00004 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00005 3 1 n n n 10 0 11 11 0 0
bounce;
--
backup 'the_directory'
--ACCEPT_DIFF_START
system 'ls -l ../STORAGE/INT| wc -l'
;
reponse
357
save
;
reponse
success
save
;
reponse
success
system 'ls -l ../STORAGE/INT| wc -l'
;
reponse
359
system 'rm -Rf ./toto'
;
reponse
system 'mkdir ./toto'
;
reponse
backup './toto'
;
reponse
BACKUP OK
system 'ls -l ./toto| wc -l'
;
reponse
322
system 'ls -l ../STORAGE/INT| wc -l'
;
reponse
359
--ACCEPT_DIFF_END
bounce;
--
--ACCEPT_DIFF_START
select
now(), sysdate(), to_char(now(),'yyyymmdd'
) from
no_view function
fn_merge;
now() sysdate() to_char(now(),'yyyymmdd')
1737017984 1737017984 20250116
--ACCEPT_DIFF_END
select
to_date('20200101'
,'yyyymmdd'
) as
d,
to_char(line.d,'%a'
),
to_char(line.d,'%j'
),
from
no_view function
fn_merge;
d to_char(line.d,'%a') to_char(line.d,'%j')
1577833216 Wed 001
create
table
my_data (date_yyyymmdd text
);
reponse
success
create
view
v_my_data as
select
* from
my_data;
reponse
success
insert
into
my_data values
('20191230'
)('20191231'
)('20200101'
)('20200102'
)('20200103'
)('20200104'
)('20200105'
);
reponse
success
insert
into
my_data values
('20200107'
)('20200108'
)('20200109'
)('20200110'
)('20200111'
)('20200112'
);
reponse
success
refresh
dirty
view
;
reponse
success
select
* from
v_my_data;
date_yyyymmdd
20191230
20191231
20200101
20200102
20200103
20200104
20200105
20200107
20200108
20200109
20200110
20200111
20200112
set
log_verbose='y'
;
reponse
success
--working with week numbers (TODO)
--#SB no_cache log_verbose
with
a as
(select
date_yyyymmdd,unique(date_yyyymmdd) as
v from
v_my_data function
fn_store group by
date_yyyymmdd)
select
col_type
.val as
yyyymmdd,
to_date(a.v,'yyyymmdd'
) as
d,
-1 as
prio_lag,
196 as
keep_count,
--20000103 is a Monday
to_date('20000103'
,'yyyymmdd'
)+line.prio_lag*24*3600 as
prio_day,
to_char(line.prio_day,'%a'
) as
prio_dow, to_char(line.d,'%a'
) as
d_dow,
math.floor((now()-line.d)/24/3600) as
delta_today,
(line.d-line.prio_day)/24/3600 as
delta,
math.floor(line.delta/7) as
delta_week,
line.delta%7 as
delta_dow,
from
no_view, a function
fn_merge
callback
sub_select('group(delta_week) min(delta_dow)'
)
add_text('keep_yn'
,'if line.delta_today>line.keep_count then return "n" end if math.floor(line.min_delta_dow-line.delta_dow)==0 then return "y" else return "n" end'
)
select_from('group(yyyymmdd) group(keep_yn) group(d_dow)'
)
sort(1,'asc'
)
;
group_yyyymmdd group_keep_yn group_d_dow
20191230 n Mon
20191231 n Tue
20200101 n Wed
20200102 n Thu
20200103 n Fri
20200104 n Sat
20200105 n Sun
20200107 n Tue
20200108 n Wed
20200109 n Thu
20200110 n Fri
20200111 n Sat
20200112 n Sun
--build a calendar table (TODO)
select
`fn_calendar('20200101'
,'20200230'
,'yyyymmdd'
,{"%Y%m%d","%A","%B","%Y"})` as
`"date"..sep().."week_day"..sep().."month_name"..sep().."full_year"`,
from
no_view function
fn_merge
export to tmp_file('calendar.csv'
)
;
--
select
..
from
...
--no_cache: obvious
--the explain tag tells SB to return the logs instead of the values
--the idea is that the logs "explain" what SB does
--note that without no_cache SB will "do nothing" (read result from cache)
select
--#SB no_cache explain
* from
v_sales
;
logs
--handy to understand error
continue_on_error(53);
reponse
success
--#SB explain
select
bad_column from
v_sales;
logs
reponse
y
success
stop_on_error;
reponse
success
--log_verbose: obvious
--explain is always used with log_verbose
select
--#SB no_cache explain log_verbose
* from
v_sales
callback
limit(10)
;
logs
--no_index: obvious
select
--#SB explain no_cache log_verbose no_index
* from
v_sales and
dept like 'dept #1'
callback
like(1,'where_index|function'
)
;
logs
--use_index: obvious
select
--#SB explain no_cache log_verbose use_index
* from
v_sales and
dept like 'dept #1'
callback
like(1,'where_index|function'
)
;
logs
index read versus no_index (aka sequential read or full scan) read
set
PARETO_LIMIT='1'
;
reponse
success
set
CPU_COUNT='1'
;
reponse
success
refresh_force
dirty
view
;
reponse
success
--a no_index (full scan) reads all lines sequentialy and filter
select
--#SB log_verbose no_index no_cache
customer_id, line_id, sales#item_id from
v_sales and
item_id in ('artA'
,'artB'
)
;
customer_id line_id sales#item_id
C1 ID01 artA
C2 ID02 artB
C1 ID03 artB
C1 ID06 artA
C2 ID07 artA
--an index read does that (lines corresponding to artA, then artB)
select
--#SB log_verbose use_index no_cache
customer_id, line_id, sales#item_id from
v_sales and
item_id in ('artA'
,'artB'
)
;
customer_id line_id sales#item_id
C1 ID01 artA
C2 ID02 artB
C1 ID03 artB
C1 ID06 artA
C2 ID07 artA
--SB's logic to do index or sequential read is this:
--X=total #lines
--Y=#lines targeted by index
--SB will use the index if Y < X / USE_INDEX_LIMIT
set
USE_INDEX_LIMIT=1;
reponse
success
select
--#SB log_verbose no_cache explain
customer_id, line_id, sales#item_id from
v_sales and
item_id in ('artA'
,'artB'
)
callback
like(1,'where_index'
)
;
logs
set
USE_INDEX_LIMIT=10;
reponse
success
select
--#SB log_verbose no_cache explain
customer_id, line_id, sales#item_id from
v_sales and
item_id in ('artA'
,'artB'
)
callback
like(1,'where_index'
)
;
logs
--if SB needs to sort the lines before (countsequence context, explained after) then USE_INDEX_WITH_SORT_LIMIT is used
--SB will use the index if Y < X / USE_INDEX_WITH_SORT_LIMIT
set
USE_INDEX_LIMIT=1;
reponse
success
set
USE_INDEX_WITH_SORT_LIMIT=1;
reponse
success
select
--#SB log_verbose no_cache explain
countsequence(customer_id) from
v_sales and
item_id in ('artA'
,'artB'
)
callback
like(1,'where_index'
)
;
logs
set
USE_INDEX_WITH_SORT_LIMIT=10;
reponse
success
select
--#SB log_verbose no_cache explain
countsequence(customer_id) from
v_sales and
item_id in ('artA'
,'artB'
)
callback
like(1,'where_index'
)
;
logs
countsequence(the_column) counts the number
of value changes during reading
countsequence can be an alternative to countdistinct
!!! countsequence will return incorrect result if PARETO_LIMIT!=1 !!!
select
--#SB no_cache no_index
customer_id, line_id, sales#item_id from
v_sales and
item_id in ('artA'
,'artB'
)
;
customer_id line_id sales#item_id
C1 ID01 artA
C2 ID02 artB
C1 ID03 artB
C1 ID06 artA
C2 ID07 artA
select
--#SB no_cache no_index
countsequence(customer_id) from
v_sales and
item_id in ('artA'
,'artB'
)
;
countsequence(customer_id)
4
--same result (of course) with index
select
--#SB no_cache use_index
countsequence(customer_id) from
v_sales and
item_id in ('artA'
,'artB'
)
;
countsequence(customer_id)
4
--but when an index read is done, SB must sort the lines to get correct result
select
--#SB log_verbose use_index no_cache explain
countsequence(customer_id) from
v_sales and
item_id in ('artA'
,'artB'
)
callback
like(1,'sequence'
)
;
logs
save
;
reponse
success
select
customer_id,count(*) from
v_sales group by
customer_id;
customer_id count(*)
C1 3
C2 4
desc
view
callback
like(1,'sales'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 3 n n n 10 0 13 13 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 13 13 0 0
set
MAX_PERF_FILTER_COL_TYPE='t_customer_id'
;
reponse
success
set
MAX_PERF_FILTER_FN='e(x,'
'C1'
')'
;
reponse
success
refresh_force
dirty
view
;
reponse
success
desc
view
callback
like(1,'sales'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_sales 3 3 n n n 10 0 12 12 0 0
v_sales_#partition#_00001 3 1 n n n 10 0 11 11 0 0
v_sales_#partition#_00002 3 3 n n n 10 0 10 10 0 0
select
customer_id,count(*) from
v_sales group by
customer_id;
customer_id count(*)
C1 3
select
sales.* from
v_sales;
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artB C1 20191231 4 ID03 #
artA C1 20191231 5 ID06 box1
set
MAX_PERF_FILTER_COL_TYPE='-1'
;
reponse
success
refresh_force
dirty
view
;
reponse
success
select
customer_id,count(*) from
v_sales group by
customer_id;
customer_id count(*)
C1 3
C2 4
set
SEQUENCE_COLUMNS='customer_id'
;
reponse
success
--the view must be refresh, otherwise sql will fail
refresh_force
dirty
view
;
reponse
success
--we can see that lines have a new order, that can be seen with no_index tag
select
--#SB no_cache no_index
customer_id, line_id, sales#item_id from
v_sales and
item_id in ('artA'
,'artB'
)
;
customer_id line_id sales#item_id
C1 ID01 artA
C1 ID03 artB
C2 ID02 artB
C1 ID06 artA
C2 ID07 artA
--and countsequence(customer_id) has changed
select
--#SB no_cache
countsequence(customer_id) from
v_sales and
item_id in ('artA'
,'artB'
)
;
countsequence(customer_id)
4
--index read will see lines in this order
select
--#SB no_cache use_index
item_id, customer_id from
v_sales and
item_id in ('artA'
,'artB'
)
;
item_id customer_id
artA C1
artB C1
artB C2
artA C1
artA C2
--which is why index read will need a resort operation to get correct countsequence result
select
--#SB no_cache explain use_index log_verbose
countsequence(customer_id) from
v_sales and
item_id in ('artA'
,'artB'
)
callback
like(1,'sort'
)
;
logs
--but this sort operation can be long
--some dimensions are functionaly liked to a column
--common case is transaction_id and dates, you expect a given transaction (with several lines) to have a unique date
--in SB's jargon date dimension is a companion of transaction_id
--in such cases (you know something about your data), you can tell SB with param SEQUENCE_COLUMN_COMPANIONS
--here we will tell SB that items is a companion of customer_id
--this is incorrect, I know, but you will understand what happens without sorting
set
SEQUENCE_COLUMN_COMPANIONS='customer_id=items'
;
reponse
success
--no more sorting
select
--#SB no_cache explain use_index log_verbose
countsequence(customer_id) from
v_sales and
item_id in ('artA'
,'artB'
)
callback
like(1,'sort'
)
;
logs
--but result is incorrect
select
--#SB no_cache use_index
countsequence(customer_id) from
v_sales and
item_id in ('artA'
,'artB'
)
;
countsequence(customer_id)
4
-- countdistinct is greedy against high sparsity columns
-- countsequence is not
-- in some context both are equivalent
-- so SB will replace countdistinct(the_col) by countsequence(the_col) if:
-- SEQUENCE_COLUMNS is set to the_col
-- and REPLACE_COUNTDISTINCT_BY_COUNTSEQUENCE=y
insert
into
sales values
('artA'
,'CX'
,'20191231'
,5,'ID11'
,'box1'
);
reponse
success
insert
into
sales values
('artA'
,'CY'
,'20191231'
,5,'ID11'
,'box1'
);
reponse
success
refresh_online
dirty
view
;
reponse
success
select
--#SB log_verbose no_cache explain
countdistinct(customer_id) from
v_sales and
item_id in ('artA'
,'artB'
)
callback
like(1,'sequence'
);
logs
set
REPLACE_COUNTDISTINCT_BY_COUNTSEQUENCE='y'
;
reponse
success
--no_index context
select
--#SB log_verbose no_cache no_index explain
countdistinct(customer_id) from
v_sales and
item_id in ('artA'
,'artB'
)
callback
like(1,'sequence'
);
logs
select
--#SB log_verbose no_cache no_index
countdistinct(customer_id) from
v_sales and
item_id in ('artA'
,'artB'
)
;
countdistinct(customer_id)
2
--use_index context
select
--#SB log_verbose no_cache use_index explain
countdistinct(customer_id) from
v_sales and
item_id in ('artA'
,'artB'
)
callback
like(1,'sequence'
);
logs
select
--#SB log_verbose no_cache use_index
countdistinct(customer_id) from
v_sales and
item_id in ('artA'
,'artB'
)
;
countdistinct(customer_id)
2
--remember that USE_INDEX_WITH_SORT_LIMIT can be used (see before)
set
SEQUENCE_COLUMNS='-1'
;
reponse
success
refresh_force
dirty
view
;
reponse
success
continue_on_error(20);
reponse
success
drop view
v_foo;
reponse
success
drop table
foo;
reponse
success
refresh
dirty
view
;
reponse
success
stop_on_error;
reponse
success
create
table
foo(col number
,str text
);
reponse
success
create
view
v_foo as
select
* from
foo;
reponse
success
set
float_precision=1;
reponse
success
system '(printf "col,str\n1234,1234\n0.01234,0.01234\n12.43,12.43">./foo.csv)'
;
reponse
insert
into
foo select
* from
file('./foo.csv'
);
reponse
success
set
float_precision=4;
reponse
success
system '(printf "col,str\n1234,1234\n0.01234,0.01234\n12.43,12.43">./foo.csv)'
;
reponse
insert
into
foo select
* from
file('./foo.csv'
);
reponse
success
refresh
dirty
view
;
reponse
success
--number inserted with float_precision 1 are truncated in SB
select
* from
v_foo;
col str
1234 1234
0 0.01234
12.4000 12.43
1234 1234
0.0123 0.01234
12.4300 12.43
--float_precision param also affects the display
set
float_precision=0;
reponse
success
select
* from
v_foo;
col str
1234 1234
0 0.01234
12 12.43
1234 1234
0 0.01234
12 12.43
--complex notation is allowed, float_precision affects only display
system '(printf "col,str\n4.04e-02,4.04e-02">./foo.csv)'
;
reponse
insert
into
foo select
* from
file('./foo.csv'
);
reponse
success
refresh
dirty
view
;
reponse
success
select
* from
v_foo;
col str
1234 1234
0 0.01234
12 12.43
1234 1234
0 0.01234
12 12.43
0 4.04e-02
set
float_precision=4;
reponse
success
select
* from
v_foo;
col str
1234 1234
0 0.01234
12.4000 12.43
1234 1234
0.0123 0.01234
12.4300 12.43
0 4.04e-02
set
float_precision=2;
reponse
success
-- ###########################
-- RUNNING shutdown.sql
shutdown
;
-- ###########################
-- RUNNING stop_on_error.sql
stop_on_error;
reponse
success
-- ###########################
-- RUNNING clear.sh
/*
sure mode, no confirmation prompt
clearing directory INTERNAL_FILES_DIR (../STORAGE/INT) ...
done
clearing directory MAX_PERF_FILES_DIR (../STORAGE/MAX_PERF) ...
done
clearing directory TRANSAC_FILES_DIR (../STORAGE/TRANSAC) ...
done
*/
-- ###########################
-- RUNNING doc_data_init.sql
create
col_type
t_site_id as
text
;
reponse
success
create
col_type
t_dept_id as
text
;
reponse
success
create
col_type
t_item_id as
text
;
reponse
success
create
col_type
t_customer_id as
text
;
reponse
success
create
col_type
t_date as
text
;
reponse
success
create
col_type
t_customer_info as
text
;
reponse
success
create
col_type
end_user
as
text
;
reponse
success
create
merge table
items( item_id t_item_id, art_label text
, dept t_dept_id, avg_week_sales number
, sales_price number
);
reponse
success
create
merge table
customers( customer_id t_customer_id, customer_name text
);
reponse
success
create
table
item_tags( item_id t_item_id, tag text
);
reponse
success
create
table
fidelity_cards( customer_id t_customer_id, card_label t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
table
item_customer_infos( customer_id t_customer_id, item_id t_item_id, info t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
big table
sales( item_id t_item_id, customer_id t_customer_id, sales_date t_date, sales_qty number
, line_id text
, packaging_id t_item_id);
reponse
success
create
big table
inventory( item_id t_item_id, inv_qty number
);
reponse
success
create
view
v_items as
select
* from
items;
reponse
success
create
view
v_item_tags as
select
* from
item_tags;
reponse
success
create
view
v_fidelity_cards as
select
* from
fidelity_cards;
reponse
success
create
view
v_item_customer_infos as
select
* from
item_customer_infos;
reponse
success
create
view
v_sales as
select
* from
sales, items, customers where
items.item_id=sales.item_id and
customers.customer_id=sales.customer_id;
reponse
success
create
view
v_inventory as
select
* from
inventory, items where
items.item_id=inventory.item_id;
reponse
success
insert
into
items values
('artA'
,'the article A'
,'dept #1'
,10,1.5);
reponse
success
insert
into
items values
('artB'
,'the article B'
,'dept #2'
,10,3.2);
reponse
success
insert
into
items values
('box1'
,'a box'
,'packaging'
,10,0);
reponse
success
insert
into
customers values
('C1'
,'customer #1'
)('C2'
,'customer #2'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #1'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #2'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'SILVER'
,'20191201'
,'20191231'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'GOLD'
,'20201201'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artA'
,'FREQUENT BUYER of artA in 2019'
,'20190101'
,'20191231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C2'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID01'
,'box1'
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,6,'ID02'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C1'
,'20191231'
,4,'ID03'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,7,'ID04'
,'box1'
);
reponse
success
insert
into
sales values
('artC'
,'C1'
,'20200102'
,8,'ID05'
,''
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID06'
,'box1'
);
reponse
success
insert
into
sales values
('artA'
,'C2'
,'20191231'
,5,'ID07'
,'box1'
);
reponse
success
insert
into
inventory values
('artA'
,32);
reponse
success
insert
into
inventory values
('artC'
,12);
reponse
success
refresh
dirty
view
;
reponse
success
-- ###########################
-- RUNNING doc_data_update.sql
--
data in stormbase can be read/updated using loop statements and
C programs
loop table_name(col1, coll2, ...) function
function_name(param1, param2, ...)
update
(updated_col1, updated_col1, ...)
where
...
./_SO_CODE/common.h
typedef
struct sb_vals {
 // SB's version of the SB_VALS structure
 int
sb_vals_version;
 // number of columns
 int
count;
 // i_val of each column (loop function only)
 // NULL value: NULL_IVAL (see define here above)
 U_INT *i_vals;
 // num_val of each column (loop function only)
 // NULL value: NAN (C macro) (any float for which isnan evals to true)
 float
*num_vals;
 // str_val of each column (loop function only)
 // NULL value: NULL (C macro) or SB_NULL (see define here above)
 char
**str_vals;
 // type of each column, 2 possible values T (text) or N (number)
 char
*types;
 // obvious
 char
*table_name;
 // obvious
 char
**column_names;
 // obvious
 char
**col_type_names;
 // number of distinct i_val of each column
 U_INT *col_i_val_counts;
 // number of lines in table
 // (where clause is not taken into account)
 // (partitions are taken into account)
 long table_line_count;
 // (in/out) type used to update the columns type of each column,
 // 3 possible values: T (text) or N (number) or I (i_val)
 char
*update_types;
 // (in/out) whatever pointer you write here in the before function will be passed to the loop/after functions
 void
*context;
 // (in/out) response of the loop statement
 char
*result;
 // (in/out) #threads that SB will create, default is 1, of course your code must be thread safe if you put 2+
 int
thread_count; //SB_LIB_VERSION >= 1
 // current thread (loop function only)
 int
thread_pos; //SB_LIB_VERSION >= 1
 // if you need a mutex in the loop function, you can use this one
 // obsolete, do not use (kept for compatibility matters)
 pthread_mutex_t loop_mutex; //SB_LIB_VERSION >= 2
 // parameters passed to the loop function (parameters and columns are not the same thing!)
 char
**params; //SB_LIB_VERSION >= 3
 // #parameters passed to the loop function (parameters and columns are not the same thing!)
 int
params_len; //SB_LIB_VERSION >= 3
 //in out fields (can be modified in before function):
 // thread_count (default 1)
 // (in/out) defines which partition should be fetch (default ALL_PARTITIONS, can be set to DIRTY_PARTITIONS_ONLY)
 int
partitions_scope; //SB_LIB_VERSION >= 4
 // (in/out) defines which lines should be fetch (default ALL_LINES, can be set to LINES_INSERTED_SINCE_LAST_REFRESH_DIRTY_ONLY)
 int
lines_scope; //SB_LIB_VERSION >= 5
 // context field behavior:
 // Note that the context is global (there is only one "loop context" at a given time: "the current loop context")
 // - pass by stormbase to all loop command
 // - the before function can set the context (any non NULL value will be considered as the current loop context)
 // - the after function can set the context to NULL and usually frees memory at the same time (NULL value will be considered as "setting the current loop context to NULL")
 // - any function can manipulate the current loop context, it is standard C manipulation
 // - if the before function sets a name to the current loop context, it can be recall later (in computed columns). Of course in that case the after function should not free it.
 char
*context_name; //SB_LIB_VERSION >= 6
 // see structure definition
 UTILITY *U; //SB_LIB_VERSION >= 7
 // (in/out) optional name of the function to be called to free the context if any (added in v1.16.81)
 // this fn must be t_fn_free_context type
 char
*fn_free_context_name; //SB_LIB_VERSION >= 8
 // (in/out) if update_types uses text update (type T), y means SB will free the pointer, default n
 // Note: many malloc/free will generate memory fragmentation at OS level, so it is better to manage a buffer in your custom code for text updates
 char
text_update_sb_free; //SB_LIB_VERSION >= 10
 // (in/out) whatever pointer you write here in the before function will be passed to the loop/after functions,
 // the work_area can be considered as a second context except that it can't be called after the loop execution
 void
*work_area; //SB_LIB_VERSION >= 13
 // (in) unique line number provide by SB during the fetch, note that line is always < table_line_count
 // the goal in to facilitate multi threading, you don't have to manage a counter with a mutex in your code
 // Note: a given line in a table may receive a different line_fetch number in distinct loop execution
 // Note: in after fn line_fetch will be equal to the number of line fetch
 long line_fetch; //SB_LIB_VERSION >= 14
} SB_VALS;
// before function (returns OK or KO)
typedef
int
(*t_fn_line_before)(SB_VALS *read, SB_VALS *new);
// loop function (returns DELETED or UPDATED or NOT_MODIFIED)
typedef
int
(*t_fn_line)(SB_VALS *read, SB_VALS *new);
// after function (returns OK or KO)
typedef
int
(*t_fn_line_after)(SB_VALS *read, SB_VALS *new);
// free function
typedef
void
(*t_fn_free_context)(void
*context);
//
./_SO_CODE/Z_doc_loop_AA.c
#include
"./common.h"
int
fn1_before(SB_VALS* read, SB_VALS* new) {
 printf("## hello from fn1_before\n"
);
 read->thread_count
= 2;
 return
OK;
}
pthread_mutex_t my_mutex = PTHREAD_MUTEX_INITIALIZER;
int
fn1(SB_VALS* read, SB_VALS* new) {
 printf("## hello from fn1 (thread_pos %d) : "
, read->thread_pos
);
 for (int
i = 0; i < read->
count
; i++) {
   if
(read->types
[i] == 'T') {
     printf("%s=%s (ival %u)"
, read->column_names
[i], read->str_vals
[i], read->i_vals
[i]);
   } else if
(read->types
[i] == 'N') {
     printf("%s=%.2f (ival %u)"
, read->column_names
[i], read->num_vals
[i], read->i_vals
[i]);
   }
   if
(i != read->count
- 1) {
     printf(", "
);
   }
 }
 printf("\n"
);
 return
NOT_MODIFIED;
}
int
fn1_after(SB_VALS* read, SB_VALS* new) {
 printf("## hello from fn1_after\n"
);
 return
OK;
}
set
SO_FILE_NAME='Z_doc_loop_AA.so'
;
reponse
success
refresh
dirty
view
;
reponse
success
--ACCEPT_DIFF_START
loop sales(item_id, sales_qty, line_id) function
fn1;
hello from
fn1_before
hello from
fn1 (thread_pos 0) : item_id=artA (ival 11), sales_qty=5.00 (ival 10), line_id=ID01 (ival 10)
hello from
fn1 (thread_pos 1) : item_id=artB (ival 12), sales_qty=6.00 (ival 11), line_id=ID02 (ival 11)
hello from
fn1 (thread_pos 0) : item_id=artB (ival 12), sales_qty=4.00 (ival 12), line_id=ID03 (ival 12)
hello from
fn1 (thread_pos 1) : item_id=artB (ival 12), sales_qty=7.00 (ival 13), line_id=ID04 (ival 13)
hello from
fn1 (thread_pos 1) : item_id=artC (ival 14), sales_qty=8.00 (ival 14), line_id=ID05 (ival 14)
hello from
fn1 (thread_pos 1) : item_id=artA (ival 11), sales_qty=5.00 (ival 10), line_id=ID06 (ival 15)
hello from
fn1 (thread_pos 0) : item_id=artA (ival 11), sales_qty=5.00 (ival 10), line_id=ID07 (ival 16)
hello from
fn1_after
LOOP_RESULT
DONE
--ACCEPT_DIFF_END
./_SO_CODE/Z_doc_loop_BB.c
#include
"./common.h"
int
fn1_before(SB_VALS* read, SB_VALS* new) {
 read->thread_count
= 2;
 int
*c = malloc
(sizeof
(int
));
 *c = 99;
 read->context
= c;
 new->update_types
[0] = 'T';
 return
OK;
}
char
txt_th0[20];
char
txt_th1[20];
pthread_mutex_t my_mutex = PTHREAD_MUTEX_INITIALIZER;
int
fn1(SB_VALS* read, SB_VALS* new) {
 int
*c = read->context
;
 char
*txt = read->thread_pos
== 0 ? txt_th0 : txt_th1;
 sprintf(txt, "NEW_ID_%d"
, *c);
 *c = *c - 1;
 new->str_vals
[0] = txt;
 return
UPDATED;
}
int
fn1_after(SB_VALS* read, SB_VALS* new) {
 printf("## hello from fn1_after\n"
);
 free
(read->context
);
 return
OK;
}
set
SO_FILE_NAME='Z_doc_loop_BB.so'
;
reponse
success
refresh
dirty
view
;
reponse
success
select
item_id, sales_qty, line_id from
v_sales cb
sort(1,'asc'
);
item_id sales_qty line_id
# 8 ID05
artA 5 ID01
artA 5 ID06
artA 5 ID07
artB 6 ID02
artB 4 ID03
artB 7 ID04
loop sales(item_id, sales_qty) function
fn1 update
(line_id) where
item_id='artA'
;
hello from
fn1_after
LOOP_RESULT
DONE
refresh
dirty
view
;
reponse
success
select
item_id, sales_qty, line_id from
v_sales cb
sort(1,'asc'
);
item_id sales_qty line_id
# 8 ID05
artA 5 NEW_ID_99
artA 5 NEW_ID_98
artA 5 NEW_ID_97
artB 6 ID02
artB 4 ID03
artB 7 ID04
./_SO_CODE/Z_doc_loop_AB.c
#include
"./common.h"
int
fn1_before(SB_VALS* read, SB_VALS* new) {
 printf("## hello from fn1_before\n"
);
 for(int
i=0;i
params_len
;i++){
   printf("## fn1_before param %d is %s\n"
,i+1,read->params
[i]);
 }
 read->thread_count
= 2;
 return
OK;
}
pthread_mutex_t my_mutex = PTHREAD_MUTEX_INITIALIZER;
int
fn1(SB_VALS* read, SB_VALS* new) {
 return
NOT_MODIFIED;
}
int
fn1_after(SB_VALS* read, SB_VALS* new) {
 printf("## hello from fn1_after\n"
);
 return
OK;
}
set
SO_FILE_NAME='Z_doc_loop_AB.so'
;
reponse
success
refresh
dirty
view
;
reponse
success
loop sales(item_id, sales_qty, line_id) function
fn1('val1'
,'val2'
);
hello from
fn1_before
fn1_before param 1 is val1
fn1_before param 2 is val2
hello from
fn1_after
LOOP_RESULT
DONE
--ACCEPT_DIFF_START
./_SO_CODE/Z_doc_loop_without_mutex.c
#include
"./common.h"
int
fn1_before(SB_VALS* read, SB_VALS* new) {
 read->thread_count
= 2;
 return
OK;
}
pthread_mutex_t my_mutex = PTHREAD_MUTEX_INITIALIZER;
int
fn1(SB_VALS* read, SB_VALS* new) {
 int
r=rand()%1000000;
 printf("## fn1 A %p %d\n"
,&r,read->thread_pos
);
 usleep(r);
 printf("## fn1 B\n"
);
 return
NOT_MODIFIED;
}
int
fn1_after(SB_VALS* read, SB_VALS* new) {
 printf("## hello from fn1_after\n"
);
 return
OK;
}
set
SO_FILE_NAME='Z_doc_loop_without_mutex.so'
;
reponse
success
refresh
dirty
view
;
reponse
success
loop sales(item_id, sales_qty) function
fn1;
fn1 A 0x700004fddd4c 0
fn1 A 0x700005060d4c 1
fn1 B
fn1 A 0x700004fddd4c 0
fn1 B
fn1 B
fn1 A 0x700004fddd4c 0
fn1 A 0x700005060d4c 1
fn1 B
fn1 A 0x700005060d4c 1
fn1 B
fn1 B
fn1 A 0x700004fddd4c 0
fn1 B
hello from
fn1_after
LOOP_RESULT
DONE
./_SO_CODE/Z_doc_loop_with_mutex.c
#include
"./common.h"
int
fn1_before(SB_VALS* read, SB_VALS* new) {
 read->thread_count
= 2;
 return
OK;
}
pthread_mutex_t my_mutex = PTHREAD_MUTEX_INITIALIZER;
int
fn1(SB_VALS* read, SB_VALS* new) {
 int
r=rand()%1000000;
 pthread_mutex_lock(&my_mutex);
 printf("## fn1 A %p %d\n"
,&r,read->thread_pos
);
 usleep(r);
 printf("## fn1 B\n"
);
 pthread_mutex_unlock(&my_mutex);
 return
NOT_MODIFIED;
}
int
fn1_after(SB_VALS* read, SB_VALS* new) {
 printf("## hello from fn1_after\n"
);
 return
OK;
}
set
SO_FILE_NAME='Z_doc_loop_with_mutex.so'
;
reponse
success
refresh
dirty
view
;
reponse
success
loop sales(item_id, sales_qty) function
fn1;
fn1 A 0x700004fddd4c 0
fn1 B
fn1 A 0x700004fddd4c 0
fn1 B
fn1 A 0x700005060d4c 1
fn1 B
fn1 A 0x700004fddd4c 0
fn1 B
fn1 A 0x700005060d4c 1
fn1 B
fn1 A 0x700004fddd4c 0
fn1 B
fn1 A 0x700004fddd4c 0
fn1 B
hello from
fn1_after
LOOP_RESULT
DONE
--ACCEPT_DIFF_END
select
item_id,sales#customer_id from
v_sales cb
sort(1,'asc'
);
item_id sales#customer_id
# C1
artA C1
artA C1
artA C2
artB C2
artB C1
artB C2
create
table
customer_update(item_id t_item_id, new_customer_id t_customer_id,new_sales_qty number
,item_id_new t_item_id);
reponse
success
insert
into
customer_update values
('artA'
,'C_artA'
,123.45,'??'
);
reponse
success
--fn_build_idx1
loop customer_update(item_id,new_customer_id) function
fn_build_idx1;
LOOP_RESULT
DONE
desc
context;
context_name is_drop_yn
IDX1#item_id->new_customer_id n
loop sales(item_id) function
fn_update_idx1('IDX1#item_id->new_customer_id'
) update
(customer_id);
LOOP_RESULT
DONE
refresh
dirty
view
;
reponse
success
select
item_id,sales#customer_id from
v_sales cb
sort(1,'asc'
);
item_id sales#customer_id
# C1
artA C_artA
artA C_artA
artA C_artA
artB C2
artB C1
artB C2
--fn_build_idx1_num
loop customer_update(item_id,new_sales_qty) function
fn_build_idx1_num;
LOOP_RESULT
DONE
desc
context;
context_name is_drop_yn
IDX1#item_id->new_customer_id n
IDX1_NUM#item_id->new_sales_qty n
loop sales(item_id) function
fn_update_idx1_num('IDX1_NUM#item_id->new_sales_qty'
) update
(sales_qty);
LOOP_RESULT
DONE
refresh
dirty
view
;
reponse
success
select
item_id,sales_qty from
v_sales cb
sort(1,'asc'
);
item_id sales_qty
# 0
artA 123.450
artA 123.450
artA 123.450
artB 0
artB 0
artB 0
--fn_copy_col
create
view
v_customer_update as
select
* from
customer_update;
reponse
success
refresh
dirty
view
;
reponse
success
select
item_id,item_id_new from
v_customer_update;
item_id item_id_new
artA ??
loop customer_update(item_id) function
fn_copy_col update
(item_id_new);
LOOP_RESULT
DONE
refresh
dirty
view
;
reponse
success
select
item_id,item_id_new from
v_customer_update;
item_id item_id_new
artA artA
loop sales(item_id, sales_qty) function
fn_skip;
reponse
loop skip
-- ###########################
-- RUNNING shutdown.sql
shutdown
;
-- ###########################
-- RUNNING stop_on_error.sql
stop_on_error;
reponse
success
-- ###########################
-- RUNNING clear.sh
/*
sure mode, no confirmation prompt
clearing directory INTERNAL_FILES_DIR (../STORAGE/INT) ...
done
clearing directory MAX_PERF_FILES_DIR (../STORAGE/MAX_PERF) ...
done
clearing directory TRANSAC_FILES_DIR (../STORAGE/TRANSAC) ...
done
*/
-- ###########################
-- RUNNING doc_data_init.sql
create
col_type
t_site_id as
text
;
reponse
success
create
col_type
t_dept_id as
text
;
reponse
success
create
col_type
t_item_id as
text
;
reponse
success
create
col_type
t_customer_id as
text
;
reponse
success
create
col_type
t_date as
text
;
reponse
success
create
col_type
t_customer_info as
text
;
reponse
success
create
col_type
end_user
as
text
;
reponse
success
create
merge table
items( item_id t_item_id, art_label text
, dept t_dept_id, avg_week_sales number
, sales_price number
);
reponse
success
create
merge table
customers( customer_id t_customer_id, customer_name text
);
reponse
success
create
table
item_tags( item_id t_item_id, tag text
);
reponse
success
create
table
fidelity_cards( customer_id t_customer_id, card_label t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
table
item_customer_infos( customer_id t_customer_id, item_id t_item_id, info t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
big table
sales( item_id t_item_id, customer_id t_customer_id, sales_date t_date, sales_qty number
, line_id text
, packaging_id t_item_id);
reponse
success
create
big table
inventory( item_id t_item_id, inv_qty number
);
reponse
success
create
view
v_items as
select
* from
items;
reponse
success
create
view
v_item_tags as
select
* from
item_tags;
reponse
success
create
view
v_fidelity_cards as
select
* from
fidelity_cards;
reponse
success
create
view
v_item_customer_infos as
select
* from
item_customer_infos;
reponse
success
create
view
v_sales as
select
* from
sales, items, customers where
items.item_id=sales.item_id and
customers.customer_id=sales.customer_id;
reponse
success
create
view
v_inventory as
select
* from
inventory, items where
items.item_id=inventory.item_id;
reponse
success
insert
into
items values
('artA'
,'the article A'
,'dept #1'
,10,1.5);
reponse
success
insert
into
items values
('artB'
,'the article B'
,'dept #2'
,10,3.2);
reponse
success
insert
into
items values
('box1'
,'a box'
,'packaging'
,10,0);
reponse
success
insert
into
customers values
('C1'
,'customer #1'
)('C2'
,'customer #2'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #1'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #2'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'SILVER'
,'20191201'
,'20191231'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'GOLD'
,'20201201'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artA'
,'FREQUENT BUYER of artA in 2019'
,'20190101'
,'20191231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C2'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID01'
,'box1'
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,6,'ID02'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C1'
,'20191231'
,4,'ID03'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,7,'ID04'
,'box1'
);
reponse
success
insert
into
sales values
('artC'
,'C1'
,'20200102'
,8,'ID05'
,''
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID06'
,'box1'
);
reponse
success
insert
into
sales values
('artA'
,'C2'
,'20191231'
,5,'ID07'
,'box1'
);
reponse
success
insert
into
inventory values
('artA'
,32);
reponse
success
insert
into
inventory values
('artC'
,12);
reponse
success
refresh
dirty
view
;
reponse
success
-- ###########################
-- RUNNING doc_computed_columns.sql
--
desc
table
items;
table_name column_name column_type col_type_name
items item_id text t_item_id
items art_label text sys#type#items#art_label
items dept text t_dept_id
items avg_week_sales number sys#type#items#avg_week_sales
items sales_price number sys#type#items#sales_price
desc
table
sales;
table_name column_name column_type col_type_name
sales item_id text t_item_id
sales customer_id text t_customer_id
sales sales_date text t_date
sales sales_qty number sys#type#sales#sales_qty
sales line_id text sys#type#sales#line_id
sales packaging_id text t_item_id
select
item_id,dept,sales_price from
v_items cb
sort(1,'asc'
);
item_id dept sales_price
artA dept #1 1.500
artB dept #2 3.200
box1 packaging 0
--type A
loop items(item_id,dept) function
fn_computed_col('get_dept'
,'A'
);
LOOP_RESULT
DONE
refresh
computed_column
;
reponse
success
--no parameter defaults to item_id (first column of loop)
select
line_id,item_id,packaging_id,get_dept() from
v_sales cb
sort(1,'asc'
);
line_id item_id packaging_id get_dept()
ID01 artA box1 dept #1
ID02 artB # dept #2
ID03 artB # dept #2
ID04 artB box1 dept #2
ID05 # # #
ID06 artA box1 dept #1
ID07 artA box1 dept #1
--using reverse index
select
line_id,item_id,packaging_id,get_dept() from
v_sales and
get_dept()='dept #1'
cb
sort(1,'asc'
);
line_id item_id packaging_id get_dept()
ID01 artA box1 dept #1
ID06 artA box1 dept #1
ID07 artA box1 dept #1
select
line_id,item_id,packaging_id,get_dept(item_id) from
v_sales cb
sort(1,'asc'
);
line_id item_id packaging_id get_dept(item_id)
ID01 artA box1 dept #1
ID02 artB # dept #2
ID03 artB # dept #2
ID04 artB box1 dept #2
ID05 # # #
ID06 artA box1 dept #1
ID07 artA box1 dept #1
select
line_id,item_id,packaging_id,get_dept(packaging_id) from
v_sales cb
sort(1,'asc'
);
line_id item_id packaging_id get_dept(packaging_id)
ID01 artA box1 packaging
ID02 artB # #
ID03 artB # #
ID04 artB box1 packaging
ID05 # # #
ID06 artA box1 packaging
ID07 artA box1 packaging
select
item_id,sales_price from
v_items;
item_id sales_price
artA 1.500
artB 3.200
box1 0
--type B
loop items(item_id,sales_price) function
fn_computed_col('mult_by_sales_price'
,'B'
,'default=2.5 toto=1'
);
LOOP_RESULT
DONE
loop items(item_id,sales_price) function
fn_computed_col('divide_by_sales_price'
,'B'
,'1/X'
);
LOOP_RESULT
DONE
desc
context;
context_name is_drop_yn
get_deptA n
mult_by_sales_priceB n
divide_by_sales_priceB n
refresh
computed_column
;
reponse
success
--no second parameter defaults to item_id (first column of loop)
select
line_id,item_id,packaging_id,sales_qty,mult_by_sales_price(sales_qty) from
v_sales cb
sort(1,'asc'
);
line_id item_id packaging_id sales_qty mult_by_sales_price(sales_qty)
ID01 artA box1 5 7.500
ID02 artB # 6 19.200
ID03 artB # 4 12.800
ID04 artB box1 7 22.400
ID05 # # 8 20
ID06 artA box1 5 7.500
ID07 artA box1 5 7.500
select
line_id,item_id,packaging_id,sales_qty,mult_by_sales_price(sales_qty,item_id) from
v_sales cb
sort(1,'asc'
);
line_id item_id packaging_id sales_qty mult_by_sales_price(sales_qty,item_id)
ID01 artA box1 5 7.500
ID02 artB # 6 19.200
ID03 artB # 4 12.800
ID04 artB box1 7 22.400
ID05 # # 8 20
ID06 artA box1 5 7.500
ID07 artA box1 5 7.500
select
line_id,item_id,packaging_id,sales_qty,mult_by_sales_price(sales_qty,packaging_id) from
v_sales cb
sort(1,'asc'
);
line_id item_id packaging_id sales_qty mult_by_sales_price(sales_qty,packaging_id)
ID01 artA box1 5 0
ID02 artB # 6 15
ID03 artB # 4 10
ID04 artB box1 7 0
ID05 # # 8 20
ID06 artA box1 5 0
ID07 artA box1 5 0
--division instead of multiplication
select
line_id,item_id,packaging_id,sales_qty,divide_by_sales_price(sales_qty) from
v_sales cb
sort(1,'asc'
);
line_id item_id packaging_id sales_qty divide_by_sales_price(sales_qty)
ID01 artA box1 5 3.333
ID02 artB # 6 1.875
ID03 artB # 4 1.250
ID04 artB box1 7 2.188
ID05 # # 8 0
ID06 artA box1 5 3.333
ID07 artA box1 5 3.333
set
COMPUTED_COLUMNS='comp_col1,comp_col2,...'
./_SO_CODE/common.h
typedef
void
(*t_fn_computed_column)(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context);
typedef
struct computed_col_need {
 int
needed_col_count;
 char
**needed_col_names;
} COMPUTED_COL_NEED;
typedef
COMPUTED_COL_NEED* (*t_fn_get_needed_columns)(char
*param_fn_get_needed_columns, char
**available_columns,
   int
available_columns_len, char
**params, int
params_len);
typedef
struct computed_col_prereq {
 //mandatory destination col_type (lower case)
 char
*destination_col_type_name;
 //optional context that will be passed to functions
 char
*needed_context_name;
 //columns needed (needed_col/fn_get_needed_columns)
 // - simple case (needed_col!=NULL) : needed_col is used if fn_get_needed_columns is NULL
 // - complex case (fn_get_needed_columns!=NULL)
 // needed_col is computed this way: needed_col=fn_get_needed_columns(param_fn_get_needed_columns,available_columns,available_columns_len)
 COMPUTED_COL_NEED *needed_col;
 char
*param_fn_get_needed_columns;
 t_fn_get_needed_columns fn_get_needed_columns;
 //put your error message here, NULL means no error
 char
*error;
 //y/n (default n), if computed column overloads a real column
 char
priority_over_real_column_yn;
 // in simple cases this will trigger an optimized treatment in SB
 // defaults to ?
 // A means select_contex is an TYPE_A*
 // B means select_contex is an TYPE_B*
 char
select_context_type; // SB_LIB_VERSION >= 9
} COMPUTED_COL_PREREQ;
typedef
COMPUTED_COL_PREREQ* (*t_fn_computed_column_before)(UTILITY *U);
//returns a "select_context"
typedef
void
* (*t_fn_computed_column_before_select)(UTILITY *U, char
**params, int
params_len, void
*context);
typedef
void
(*t_fn_computed_column_after_select)(UTILITY *U, void
*select_context);
typedef
struct typeA {
 U_INT *ival_needed_2_ival_dest;
 U_INT len;
} TYPE_A;
typedef
TYPE_A* (*t_fn_computed_column_before_select_typeA)(UTILITY *U, char
**params, int
params_len, void
*context);
typedef
void
(*t_fn_computed_column_after_select_typeA)(UTILITY *U, TYPE_A *select_context);
typedef
struct typeB {
 float
*ival_needed_2_coef;
 U_INT len;
} TYPE_B;
typedef
TYPE_B* (*t_fn_computed_column_before_select_typeB)(UTILITY *U, char
**params, int
params_len, void
*context);
typedef
void
(*t_fn_computed_column_after_select_typeB)(UTILITY *U, TYPE_B *select_context);
COMPUTED_COL_PREREQ* sb_computed_col_prereq_factory();
//
set
COMPUTED_COLUMNS='customer_id'
;
reponse
success
set
CPU_COUNT=1;
reponse
success
--real will be returned
./_SO_CODE/Z_doc_comp_col_not_prio.c
#include
"./common.h"
COMPUTED_COL_PREREQ* customer_id_before(UTILITY *U) {
 COMPUTED_COL_PREREQ *ret = sb_computed_col_prereq_factory();
 ret->destination_col_type_name
= sb_clone("t_customer_id"
);
 COMPUTED_COL_NEED *needed_col = malloc
(sizeof
(COMPUTED_COL_NEED));
 ret->needed_col
= needed_col;
 needed_col->needed_col_count
= 1;
 needed_col->needed_col_names
= malloc
(needed_col->needed_col_count
* sizeof
(char
*));
 needed_col->needed_col_names
[0] = sb_clone("dept"
);
 return
ret;
}
void
customer_id(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context) {
 U_INT *pin_out = ivals[0];
 U_INT idept = *pin_out;
 *pin_out = idept == 10 ? NULL_IVAL : 10;
}
COMPUTED_COL_PREREQ* new_customer_id_before(UTILITY *U) {
 COMPUTED_COL_PREREQ *ret = sb_computed_col_prereq_factory();
 ret->destination_col_type_name
= sb_clone("t_customer_id"
);
 COMPUTED_COL_NEED *needed_col = malloc
(sizeof
(COMPUTED_COL_NEED));
 ret->needed_col
= needed_col;
 needed_col->needed_col_names
= malloc
(needed_col->needed_col_count
* sizeof
(char
*));
 return
ret;
}
void
new_customer_id(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context) {
 //position 0 is "in/out"
 U_INT *pin_out = ivals[0];
}
set
SO_FILE_NAME='Z_doc_comp_col_not_prio.so'
;
reponse
success
refresh
dirty
view
;
reponse
success
select
sales#customer_id, customer_id from
v_sales;
sales#customer_id customer_id
C1 C1
C2 C2
C1 C1
C2 C2
C1 C1
C1 C1
C2 C2
--computed will be returned returned
select
dept,ival(dept) from
v_sales group by
dept;
dept ival(dept)
dept #1 10
dept #2 11
./_SO_CODE/Z_doc_comp_col_prio.c
#include
"./common.h"
COMPUTED_COL_PREREQ* customer_id_before(UTILITY *U) {
 COMPUTED_COL_PREREQ *ret = sb_computed_col_prereq_factory();
 ret->destination_col_type_name
= sb_clone("t_customer_id"
);
 COMPUTED_COL_NEED *needed_col = malloc
(sizeof
(COMPUTED_COL_NEED));
 ret->needed_col
= needed_col;
 needed_col->needed_col_count
= 1;
 needed_col->needed_col_names
= malloc
(needed_col->needed_col_count
* sizeof
(char
*));
 needed_col->needed_col_names
[0] = sb_clone("dept"
);
 ret->priority_over_real_column_yn
= 'y';
 return
ret;
}
void
customer_id(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context) {
 U_INT *pin_out = ivals[0];
 U_INT idept = *pin_out;
 *pin_out = idept == 10 ? NULL_IVAL : 10;
}
COMPUTED_COL_PREREQ* new_customer_id_before(UTILITY *U) {
 COMPUTED_COL_PREREQ *ret = sb_computed_col_prereq_factory();
 ret->destination_col_type_name
= sb_clone("t_customer_id"
);
 COMPUTED_COL_NEED *needed_col = malloc
(sizeof
(COMPUTED_COL_NEED));
 ret->needed_col
= needed_col;
 needed_col->needed_col_names
= malloc
(needed_col->needed_col_count
* sizeof
(char
*));
 return
ret;
}
void
new_customer_id(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context) {
 //position 0 is "in/out"
 U_INT *pin_out = ivals[0];
}
set
SO_FILE_NAME='Z_doc_comp_col_prio.so'
;
reponse
success
refresh
dirty
view
;
reponse
success
select
sales#customer_id, customer_id from
v_sales;
sales#customer_id customer_id
C1 #
C2 NO_DATA
C1 NO_DATA
C2 NO_DATA
C1 NO_DATA
C1 #
C2 #
set
COMPUTED_COLUMNS='new_customer_id'
;
reponse
success
set
CPU_COUNT=1;
reponse
success
set
SO_FILE_NAME='Z_doc_comp_col_all_null.so'
;
reponse
success
refresh
dirty
view
;
reponse
success
select
customer_id, ival(customer_id) from
v_sales group by
customer_id
callback
sort(1,'asc'
)
;
customer_id ival(customer_id)
C1 11
C2 12
select
item_id, ival(item_id) from
v_sales group by
item_id
callback
sort(1,'asc'
)
;
item_id ival(item_id)
artA 11
artB 12
--new_customer_id will always be null
./_SO_CODE/Z_doc_comp_col_all_null.c
#include
"./common.h"
COMPUTED_COL_PREREQ* new_customer_id_before(UTILITY *U) {
 COMPUTED_COL_PREREQ *ret = sb_computed_col_prereq_factory();
 ret->destination_col_type_name
= sb_clone("t_customer_id"
);
 COMPUTED_COL_NEED *needed_col = malloc
(sizeof
(COMPUTED_COL_NEED));
 ret->needed_col
= needed_col;
 needed_col->needed_col_count
= 1; //only dept only (not really needed)
 needed_col->needed_col_names
= malloc
(needed_col->needed_col_count
* sizeof
(char
*));
 needed_col->needed_col_names
[0] = sb_clone("dept"
); //not really needed
 return
ret;
}
void
new_customer_id(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context) {
 //position 0 is "in/out"
 U_INT *pin_out = ivals[0];
 *pin_out = NULL_IVAL; //update to null value
}
set
CPU_COUNT=1;
reponse
success
desc
table
;
table_name column_count line_count has_delete has_update parent_view_hidden
items 5 3 n n n
customers 2 2 n n n
item_tags 2 2 n n n
fidelity_cards 4 2 n n n
item_customer_infos 5 3 n n n
sales 6 3 n n n
inventory 2 2 n n n
sales_#partition#_00001 6 3 n n n
sales_#partition#_00002 6 1 n n n
desc
computed_column
;
comp_col_name error
new_customer_id no error
get_dept no error
mult_by_sales_price no error
divide_by_sales_price no error
select
new_customer_id from
v_sales group by
new_customer_id;
new_customer_id
#
--#SB no_index
select
line_id,new_customer_id,new_customer_id from
v_sales and
new_customer_id=null
cb
sort(1,'asc'
);
line_id new_customer_id new_customer_id
ID01 # #
ID02 # #
ID03 # #
ID04 # #
ID05 # #
ID06 # #
ID07 # #
--#SB use_index
select
line_id,new_customer_id,new_customer_id from
v_sales and
new_customer_id=null
cb
sort(1,'asc'
);
line_id new_customer_id new_customer_id
ID01 # #
ID02 # #
ID03 # #
ID04 # #
ID06 # #
ID07 # #
--new_customer_id will be equal to the first customer_id created (C1 with ival=11)
./_SO_CODE/Z_doc_comp_col_first_value.c
#include
"./common.h"
COMPUTED_COL_PREREQ* new_customer_id_before(UTILITY *U) {
 COMPUTED_COL_PREREQ *ret = sb_computed_col_prereq_factory();
 ret->destination_col_type_name
= sb_clone("t_customer_id"
);
 COMPUTED_COL_NEED *needed_col = malloc
(sizeof
(COMPUTED_COL_NEED));
 ret->needed_col
= needed_col;
 needed_col->needed_col_count
= 1; //only dept only (not really needed)
 needed_col->needed_col_names
= malloc
(needed_col->needed_col_count
* sizeof
(char
*));
 needed_col->needed_col_names
[0] = sb_clone("dept"
); //not really needed
 return
ret;
}
void
new_customer_id(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context) {
 //position 0 is "in/out"
 U_INT *pin_out = ivals[0];
 *pin_out = 11; //update to ival 11
}
set
SO_FILE_NAME='Z_doc_comp_col_first_value.so'
;
reponse
success
refresh
dirty
view
;
reponse
success
select
line_id,new_customer_id from
v_sales;
line_id new_customer_id
ID01 C1
ID02 C1
ID03 C1
ID04 C1
ID05 C1
ID06 C1
ID07 C1
./_SO_CODE/Z_doc_comp_col_with_pareto_col.c
#include
"./common.h"
COMPUTED_COL_PREREQ* new_customer_id_before(UTILITY *U) {
 COMPUTED_COL_PREREQ *ret = sb_computed_col_prereq_factory();
 ret->destination_col_type_name
= sb_clone("t_customer_id"
);
 COMPUTED_COL_NEED *needed_col = malloc
(sizeof
(COMPUTED_COL_NEED));
 ret->needed_col
= needed_col;
 needed_col->needed_col_count
= 1; //only dept only (not really needed)
 needed_col->needed_col_names
= malloc
(needed_col->needed_col_count
* sizeof
(char
*));
 needed_col->needed_col_names
[0] = sb_clone("dept"
); //not really needed
 needed_col->needed_col_names
[0] = sb_clone("line_id"
); //pareto column
 ret->destination_col_type_name
= sb_clone("sys#type#sales#line_id"
); //pareto column
 return
ret;
}
void
new_customer_id(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context) {
 //position 0 is "in/out"
 U_INT *pin_out = ivals[0];
 *pin_out = 11; //update to ival 11
}
set
SO_FILE_NAME='Z_doc_comp_col_with_pareto_col.so'
;
reponse
success
refresh
dirty
view
;
reponse
success
--the analytic functions sees new_customer_id (always same ival)
select
customer_id, new_customer_id from
v_sales;
customer_id new_customer_id
C1 ID02
C2 ID02
C1 ID02
C2 ID02
C1 ID02
C1 ID02
C2 ID02
select
p(new_customer_id) from
v_sales function
fn_custom(
'(void* c, U_INT thread_pos, U_INT iline, int key){printf("## fn_custom ival: %d\n",key);float *f=c; *f+=1; return 0.;}'
,
'fn_key'
,
fn_num_before,fn_num_after
)
;
fn_custom ival: 11
fn_custom ival: 11
fn_custom ival: 11
fn_custom ival: 11
fn_custom ival: 11
fn_custom ival: 11
fn_custom ival: 11
num
7
--new_customer_id will be equal to customer_id pass in parameter (C1/ival=10 in param is A, C2/ival=11 in param is B)
./_SO_CODE/Z_doc_comp_col_with_param.c
#include
"./common.h"
COMPUTED_COL_PREREQ* new_customer_id_before(UTILITY *U) {
 COMPUTED_COL_PREREQ *ret = sb_computed_col_prereq_factory();
 ret->destination_col_type_name
= sb_clone("t_customer_id"
);
 COMPUTED_COL_NEED *needed_col = malloc
(sizeof
(COMPUTED_COL_NEED));
 ret->needed_col
= needed_col;
 needed_col->needed_col_count
= 1; //only dept only (not really needed)
 needed_col->needed_col_names
= malloc
(needed_col->needed_col_count
* sizeof
(char
*));
 needed_col->needed_col_names
[0] = sb_clone("dept"
); //not really needed
 return
ret;
}
void
* new_customer_id_before_select(UTILITY *U, char
**params, int
params_len, void
*context) {
 U_INT *select_context = malloc
(sizeof
(U_INT));
 if
(strcmp(params[0], "A"
) == 0) {
   *select_context = 10;
 } else if
(strcmp(params[0], "B"
) == 0) {
   *select_context = 11;
 } else {
   *select_context = NULL_IVAL;
 }
 return
select_context;
}
void
new_customer_id_after_select(UTILITY *U, void
*select_context) {
 free
(select_context);
}
void
new_customer_id(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context) {
 //position 0 is "in/out"
 U_INT *pin_out = ivals[0];
 U_INT ival_context = *(U_INT*) context;
 *pin_out = ival_context;
}
set
SO_FILE_NAME='Z_doc_comp_col_with_param.so'
;
reponse
success
refresh
dirty
view
;
reponse
success
select
line_id, new_customer_id('A'
), new_customer_id('B'
), new_customer_id('C'
) from
v_sales;
line_id new_customer_id('A') new_customer_id('B') new_customer_id('C')
ID01 NO_DATA C1 #
ID02 NO_DATA C1 #
ID03 NO_DATA C1 #
ID04 NO_DATA C1 #
ID05 NO_DATA C1 #
ID06 NO_DATA C1 #
ID07 NO_DATA C1 #
select
new_customer_id('A'
) as
col1, new_customer_id('B'
) as
col2, count(*) from
v_sales group by
new_customer_id('A'
), new_customer_id('B'
);
col1 col2 count(*)
NO_DATA C1 7
select
count(*) from
v_sales and
new_customer_id('A'
)='C1'
;
count(*)
select
new_customer_id('A'
),list(new_customer_id('B'
)) from
v_sales group by
new_customer_id('A'
);
new_customer_id('A') list(new_customer_id('B'))
NO_DATA C1
select
new_customer_id('A'
),count(*) from
v_sales group by
new_customer_id('A'
);
new_customer_id('A') count(*)
NO_DATA 7
--new_customer_id will be equal to customer_id pass in parameter (C1/ival=10 in param is A, C2/ival=11 in param is B)
./_SO_CODE/Z_doc_comp_col_with_param_typeA.c
#include
"./common.h"
COMPUTED_COL_PREREQ* new_customer_id_before(UTILITY *U) {
 COMPUTED_COL_PREREQ *ret = sb_computed_col_prereq_factory();
 ret->destination_col_type_name
= sb_clone("t_customer_id"
);
 COMPUTED_COL_NEED *needed_col = malloc
(sizeof
(COMPUTED_COL_NEED));
 ret->needed_col
= needed_col;
 needed_col->needed_col_count
= 1;
 needed_col->needed_col_names
= malloc
(needed_col->needed_col_count
* sizeof
(char
*));
 needed_col->needed_col_names
[0] = sb_clone("item_id"
);
 needed_col->needed_col_names
[0] = sb_clone("line_id"
); // ptaudou
 ret->select_context_type
= 'A';
 return
ret;
}
TYPE_A* new_customer_id_before_select_typeA(UTILITY *U, char
**params, int
params_len, void
*context) {
 TYPE_A *select_context = malloc
(sizeof
(TYPE_A));
 select_context->len
= U->fn_get_col_type_ival_count
("t_item_id"
);
 select_context->len
= U->fn_get_col_type_ival_count
("sys#type#sales#line_id"
); // ptaudou
 select_context->ival_needed_2_ival_dest
= malloc
(select_context->len
* sizeof
(U_INT));
 U_INT ival;
 if
(strcmp(params[0], "A"
) == 0) {
   ival = 10;
 } else if
(strcmp(params[0], "B"
) == 0) {
   ival = 11;
 } else {
   ival = NULL_IVAL;
 }
 for (int
i = 0; i < select_context->
len
; i++) {
   if
(i == 11) {
     select_context->ival_needed_2_ival_dest
[i] = ival;
   } else {
     select_context->ival_needed_2_ival_dest
[i] = NULL_IVAL;
   }
 }
 return
select_context;
}
void
new_customer_id_after_select_typeA(UTILITY *U, U_INT *select_context) {
 free
(select_context);
}
void
new_customer_id(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context) {
 //position 0 is "in/out"
 U_INT *pin_out = ivals[0];
 printf("## CALL new_customer_id pin_out %p, should not happen in type A\n"
, pin_out);
 exit(-1);
 *pin_out = NULL_IVAL;
}
set
SO_FILE_NAME='Z_doc_comp_col_with_param_typeA.so'
;
reponse
success
refresh
dirty
view
;
reponse
success
set
cache
='n'
;
reponse
success
select
customer_id from
v_sales;
customer_id
C1
C2
C1
C2
C1
C1
C2
select
customer_id, customer_id from
v_sales where
customer_id='C1'
;
customer_id customer_id
C1 C1
C1 C1
C1 C1
C1 C1
select
item_id, customer_id, new_customer_id('A'
) from
v_sales;
item_id customer_id new_customer_id('A')
artA C1 #
artB C2 NO_DATA
artB C1 #
artB C2 #
# C1 #
artA C1 #
artA C2 #
select
new_customer_id('A'
), new_customer_id('A'
) from
v_sales;
new_customer_id('A') new_customer_id('A')
# #
NO_DATA NO_DATA
# #
# #
# #
# #
# #
select
line_id,uniquesoft(customer_id) from
v_sales group by
line_id cb
sort(1,'asc'
);
line_id uniquesoft(customer_id)
ID01 C1
ID02 C2
ID03 C1
ID04 C2
ID05 C1
ID06 C1
ID07 C2
select
line_id,uniquesoft(new_customer_id('A'
)) from
v_sales group by
line_id cb
sort(1,'asc'
);
line_id uniquesoft(new_customer_id('A'))
ID02 NO_DATA
select
item_id, unique(new_customer_id('A'
)), max(new_customer_id('A'
)),count(*) from
v_sales
group by
item_id;
item_id unique(new_customer_id('A')) max(new_customer_id('A')) count(*)
artA # # 3
artB # NO_DATA 3
# # # 1
--#SB use_index explain
select
new_customer_id('A'
), new_customer_id('A'
) from
v_sales and
new_customer_id('A'
)='NO_DATA'
;
logs
--#SB no_index
select
new_customer_id('A'
), new_customer_id('A'
) from
v_sales and
new_customer_id('A'
)='NO_DATA'
;
new_customer_id('A') new_customer_id('A')
NO_DATA NO_DATA
select
new_customer_id('A'
), new_customer_id('B'
), new_customer_id('C'
) from
v_sales;
new_customer_id('A') new_customer_id('B') new_customer_id('C')
# # #
NO_DATA C1 #
# # #
# # #
# # #
# # #
# # #
select
line_id, new_customer_id('A'
), new_customer_id('B'
), new_customer_id('C'
) from
v_sales;
line_id new_customer_id('A') new_customer_id('B') new_customer_id('C')
ID01 # # #
ID02 NO_DATA C1 #
ID03 # # #
ID04 # # #
ID05 # # #
ID06 # # #
ID07 # # #
select
new_customer_id('A'
) as
col1, new_customer_id('B'
) as
col2, count(*) from
v_sales group by
new_customer_id('A'
), new_customer_id('B'
);
col1 col2 count(*)
# # 6
NO_DATA C1 1
select
count(*) from
v_sales and
new_customer_id('A'
)='C1'
;
count(*)
select
new_customer_id('A'
),list(new_customer_id('B'
)) from
v_sales group by
new_customer_id('A'
);
new_customer_id('A') list(new_customer_id('B'))
#
NO_DATA C1
select
new_customer_id('A'
),count(*) from
v_sales group by
new_customer_id('A'
);
new_customer_id('A') count(*)
# 6
NO_DATA 1
set
COMPUTED_COLUMNS='new_qty,new_qty2'
;
reponse
success
refresh
dirty
view
;
reponse
success
--new_customer_id will be equal to customer_id pass in parameter (C1/ival=10 in param is A, C2/ival=11 in param is B)
./_SO_CODE/Z_doc_comp_col_typeB.c
#include
"./common.h"
COMPUTED_COL_PREREQ* new_qty_before(UTILITY *U) {
 COMPUTED_COL_PREREQ *ret = sb_computed_col_prereq_factory();
 ret->destination_col_type_name
= sb_clone("sys#type#sales#sales_qty"
);
 COMPUTED_COL_NEED *needed_col = malloc
(sizeof
(COMPUTED_COL_NEED));
 ret->needed_col
= needed_col;
 needed_col->needed_col_count
= 2;
 needed_col->needed_col_names
= malloc
(needed_col->needed_col_count
* sizeof
(char
*));
 needed_col->needed_col_names
[0] = sb_clone("sales_qty"
);
 needed_col->needed_col_names
[1] = sb_clone("item_id"
);
 ret->select_context_type
= 'B';
 return
ret;
}
TYPE_B* new_qty_before_select_typeB(UTILITY *U, char
**params, int
params_len, void
*context) {
 TYPE_B *select_context = malloc
(sizeof
(TYPE_A));
 select_context->len
= U->fn_get_col_type_ival_count
("t_item_id"
);
 select_context->ival_needed_2_coef
= malloc
(select_context->len
* sizeof
(float
));
 for (int
i = 0; i < select_context->
len
; i++) {
   select_context->ival_needed_2_coef
[i] = i >= 10 ? 2 : -2;
 }
 return
select_context;
}
void
new_qty(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context) {
}
COMPUTED_COL_PREREQ* new_qty2_before(UTILITY *U) {
 COMPUTED_COL_PREREQ *ret = sb_computed_col_prereq_factory();
 ret->destination_col_type_name
= sb_clone("sys#type#sales#sales_qty"
);
 COMPUTED_COL_NEED *needed_col = malloc
(sizeof
(COMPUTED_COL_NEED));
 ret->needed_col
= needed_col;
 needed_col->needed_col_count
= 2;
 needed_col->needed_col_names
= malloc
(needed_col->needed_col_count
* sizeof
(char
*));
 needed_col->needed_col_names
[0] = sb_clone("sales_qty"
);
 needed_col->needed_col_names
[1] = sb_clone("line_id"
);
 ret->select_context_type
= 'B';
 return
ret;
}
TYPE_B* new_qty2_before_select_typeB(UTILITY *U, char
**params, int
params_len, void
*context) {
 TYPE_B *select_context = malloc
(sizeof
(TYPE_A));
 select_context->len
= U->fn_get_col_type_ival_count
("sys#type#sales#line_id"
);
 select_context->ival_needed_2_coef
= malloc
(select_context->len
* sizeof
(float
));
 for (int
i = 0; i < select_context->
len
; i++) {
   select_context->ival_needed_2_coef
[i] = i >= 10 ? 2 : -2;
 }
 return
select_context;
}
void
new_qty2(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context) {
}
COMPUTED_COL_PREREQ* new_customer_id_before(UTILITY *U) {
 COMPUTED_COL_PREREQ *ret = sb_computed_col_prereq_factory();
 ret->destination_col_type_name
= sb_clone("t_customer_id"
);
 COMPUTED_COL_NEED *needed_col = malloc
(sizeof
(COMPUTED_COL_NEED));
 ret->needed_col
= needed_col;
 needed_col->needed_col_names
= malloc
(needed_col->needed_col_count
* sizeof
(char
*));
 return
ret;
}
void
new_customer_id(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context) {
 //position 0 is "in/out"
 U_INT *pin_out = ivals[0];
}
set
SO_FILE_NAME='Z_doc_comp_col_typeB.so'
;
reponse
success
refresh
dirty
view
;
reponse
success
--using dimension ival (non pareto ival)
select
item_id, ival(item_id), sum(sales_qty), sum(new_qty) from
v_sales group by
item_id;
item_id ival(item_id) sum(sales_qty) sum(new_qty)
artA 11 15 30
artB 12 17 34
# # 8 -16
--using dimension ival (non pareto ival)
select
line_id, ival(line_id), sum(sales_qty), sum(new_qty2) from
v_sales group by
line_id;
line_id ival(line_id) sum(sales_qty) sum(new_qty2)
ID01 10 5 10
ID02 11 6 12
ID03 12 4 8
ID04 13 7 14
ID05 14 8 16
ID06 15 5 10
ID07 16 5 10
set
COMPUTED_COLUMNS='new_customer_id'
;
reponse
success
./_SO_CODE/Z_doc_comp_col_two_values.c
#include
"./common.h"
COMPUTED_COL_PREREQ* new_customer_id_before(UTILITY *U) {
 COMPUTED_COL_PREREQ *ret = sb_computed_col_prereq_factory();
 ret->destination_col_type_name
= sb_clone("t_customer_id"
);
 COMPUTED_COL_NEED *needed_col = malloc
(sizeof
(COMPUTED_COL_NEED));
 ret->needed_col
= needed_col;
 needed_col->needed_col_count
= 3; //customer_id and item_id are needed, we keep department for the moment
 needed_col->needed_col_names
= malloc
(needed_col->needed_col_count
* sizeof
(char
*));
 needed_col->needed_col_names
[0] = sb_clone("dept"
); //still not really needed
 needed_col->needed_col_names
[1] = sb_clone("customer_id"
);
 needed_col->needed_col_names
[2] = sb_clone("item_id"
);
 return
ret;
}
void
new_customer_id(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context) {
 //position 0 is "in/out"
 U_INT *pin_out = ivals[0];
 U_INT *pival_customer_id = ivals[1]; //customer_id is passed after dept
 U_INT *pival_item_id = ivals[2]; //item_id is passed after customer_id
 if
(*pival_item_id == 10) {
   *pin_out = *pival_customer_id; //update to customer_id
 } else {
   *pin_out = NULL_IVAL; //update to null value
 }
}
set
SO_FILE_NAME='Z_doc_comp_col_two_values.so'
;
reponse
success
refresh
dirty
view
;
reponse
success
select
line_id,customer_id,item_id from
v_sales;
line_id customer_id item_id
ID01 C1 artA
ID02 C2 artB
ID03 C1 artB
ID04 C2 artB
ID05 C1 #
ID06 C1 artA
ID07 C2 artA
select
line_id,new_customer_id from
v_sales;
line_id new_customer_id
ID01 #
ID02 #
ID03 #
ID04 #
ID05 #
ID06 #
ID07 #
select
line_id,new_customer_id,customer_id from
v_sales and
customer_id='C2'
;
line_id new_customer_id customer_id
ID02 # C2
ID04 # C2
ID07 # C2
select
line_id,new_customer_id from
v_sales and
new_customer_id='C2'
;
line_id new_customer_id
./_SO_CODE/Z_doc_comp_col_two_values_better.c
#include
"./common.h"
COMPUTED_COL_PREREQ* new_customer_id_before(UTILITY *U) {
 COMPUTED_COL_PREREQ *ret = sb_computed_col_prereq_factory();
 ret->destination_col_type_name
= sb_clone("t_customer_id"
);
 COMPUTED_COL_NEED *needed_col = malloc
(sizeof
(COMPUTED_COL_NEED));
 ret->needed_col
= needed_col;
 needed_col->needed_col_count
= 2;
 needed_col->needed_col_names
= malloc
(needed_col->needed_col_count
* sizeof
(char
*));
 needed_col->needed_col_names
[0] = sb_clone("customer_id"
);
 needed_col->needed_col_names
[1] = sb_clone("item_id"
);
 return
ret;
}
void
new_customer_id(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context) {
 //position 0 is "in/out"
 U_INT *pin_out = ivals[0];
 U_INT *pival_customer_id = ivals[0];
 U_INT *pival_item_id = ivals[1];
 if
(*pival_item_id == 10) {
   //already good
 } else {
   *pival_customer_id = NULL_IVAL; //update to null value
 }
}
set
SO_FILE_NAME='Z_doc_comp_col_two_values_better.so'
;
reponse
success
refresh
dirty
view
;
reponse
success
--#SB log_verbose
select
line_id, new_customer_id from
v_sales
;
line_id new_customer_id
ID01 #
ID02 #
ID03 #
ID04 #
ID05 #
ID06 #
ID07 #
select
line_id from
v_sales
and
new_customer_id='C1'
;
line_id
select
line_id from
v_sales
and
new_customer_id='C2'
;
line_id
select
line_id, maxstr(new_customer_id) from
v_sales group by
line_id
;
line_id maxstr(new_customer_id)
select
item_id, list(new_customer_id) from
v_sales group by
item_id
;
item_id list(new_customer_id)
artA
artB
#
select
item_id, list(new_customer_id), list(customer_id) from
v_sales group by
item_id
;
item_id list(new_customer_id) list(customer_id)
artA C1,C2
artB C1,C2
# C1
./_SO_CODE/common.h
typedef
struct idx1 {
 U_INT len;
 U_INT *ival1_to_ival2;
 char
*origin_col_type_name; //SB_LIB_VERSION 12
 char
*origin_col_name; //SB_LIB_VERSION 12
 char
*destination_col_type_name;
 COMPUTED_COL_NEED *needed_col;
} IDX1;
typedef
struct idx1_num {
 U_INT len;
 float
*ival1_to_num;
 char
*origin_col_type_name;
 char
*origin_col_name;
 char
*destination_col_type_name;
 char
ratio_yn;
} IDX1_NUM;
typedef
struct idx2 {
 U_INT **ival1_to_ival2_to_ival3;
 U_INT ival1_len;
 char
*destination_col_type_name;
 COMPUTED_COL_NEED *needed_col;
} IDX2;
#define YYYYMMDD unsigned int
typedef
struct line_period {
 U_INT ival;
 YYYYMMDD d1;
 YYYYMMDD d2;
} SB_PERIOD;
typedef
struct line_periods {
 SB_PERIOD *lines;
 int
count;
} SB_PERIODS;
typedef
struct idx2_period {
 U_INT ival1_len;
 U_INT ival2_len;
 SB_PERIODS ***ival1_to_ival2_to_ival3_periods;
 char
*destination_col_type_name;
 COMPUTED_COL_NEED *needed_col;
 YYYYMMDD *ival_to_d;
 int
ival_to_d_len;
 char
*d_col_type_name;
} IDX2_PERIOD;
COMPUTED_COL_PREREQ* sb_idx1_before(UTILITY *U, char
*col1, char
*col2);
COMPUTED_COL_PREREQ* sb_idx2_before(UTILITY *U, char
*col1, char
*col2, char
*col3);
COMPUTED_COL_PREREQ* sb_idx2_period_before(UTILITY *U, char
*col1, char
*col2, char
*col3, char
*col4);
void
sb_idx1(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context);
void
sb_idx2(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context);
void
sb_idx2_period(U_INT **ivals, float
**num_vals, int
ivals_len, void
*context);
//
set
COMPUTED_COLUMNS='last_fidelity_card,last_customer_info,valid_customer_info,best_customer_info'
;
reponse
success
select
item_id, customer_id, sales_date, line_id from
v_sales callback
sort(4,'asc'
);
item_id customer_id sales_date line_id
artA C1 20191231 ID01
artB C2 20200102 ID02
artB C1 20191231 ID03
artB C2 20200102 ID04
# C1 20200102 ID05
artA C1 20191231 ID06
artA C2 20191231 ID07
select
* from
v_fidelity_cards;
customer_id card_label valid_from valid_until
C1 SILVER 20191201 20191231
C1 GOLD 20201201 20201231
select
* from
v_item_customer_infos;
customer_id item_id info valid_from valid_until
C1 artA FREQUENT BUYER of artA in 2019 20190101 20191231
C1 artB FREQUENT BUYER of artB in 2020 20200101 20201231
C2 artB FREQUENT BUYER of artB in 2020 20200101 20201231
--1 column join with fn_build_idx1
--last fidelity card per customer_id
./_SO_CODE/Z_doc_comp_col2_last_fidelity_card.c
#include
"./common.h"
COMPUTED_COL_PREREQ* last_fidelity_card_before(UTILITY *U) {
 return
sb_idx1_before(U, "customer_id"
, "card_label"
);
}
void
last_fidelity_card(U_INT**ivals, float
**num_vals, int
ivals_len, void
*context) {
 sb_idx1(ivals, num_vals, ivals_len, context);
}
void
fn_free(void
*c){
 free
(c);
}
set
SO_FILE_NAME='Z_doc_comp_col2_last_fidelity_card.so'
;
reponse
success
loop fidelity_cards(customer_id,card_label) function
fn_build_idx1;
LOOP_RESULT
DONE
desc
context;
context_name is_drop_yn
get_deptA n
mult_by_sales_priceB n
divide_by_sales_priceB n
IDX1#customer_id->card_label n
refresh
computed_column
;
reponse
success
desc
computed_column
;
comp_col_name error
last_fidelity_card no error
last_customer_info function not found: last_customer_info
valid_customer_info function not found: valid_customer_info
best_customer_info function not found: best_customer_info
get_dept no error
mult_by_sales_price no error
divide_by_sales_price no error
select
item_id, customer_id, sales_date, line_id, last_fidelity_card from
v_sales callback
sort(4,'asc'
);
item_id customer_id sales_date line_id last_fidelity_card
artA C1 20191231 ID01 GOLD
artB C2 20200102 ID02 #
artB C1 20191231 ID03 GOLD
artB C2 20200102 ID04 #
# C1 20200102 ID05 GOLD
artA C1 20191231 ID06 GOLD
artA C2 20191231 ID07 #
select
customer_id, list(last_fidelity_card) from
v_sales group by
customer_id callback
sort(1,'asc'
);
customer_id list(last_fidelity_card)
C1 GOLD
C2
select
sales.* from
v_sales where
last_fidelity_card like 'GO'
;
item_id customer_id sales_date sales_qty line_id packaging_id
artA C1 20191231 5 ID01 box1
artB C1 20191231 4 ID03 #
# C1 20200102 8 ID05 #
artA C1 20191231 5 ID06 box1
select
count(*), list(customer_id) from
v_sales and
last_fidelity_card='GOLD'
;
count(*) list(customer_id)
4 C1
--2 columns join with fn_build_idx2
--last customer_info per item_id/customer_id
./_SO_CODE/Z_doc_comp_col2_last_customer_info.c
#include
"./common.h"
COMPUTED_COL_PREREQ* last_customer_info_before(UTILITY *U) {
 return
sb_idx2_before(U, "customer_id"
, "item_id"
, "info"
);
}
void
last_customer_info(U_INT**ivals, float
**num_vals, int
ivals_len, void
*context) {
 sb_idx2(ivals, num_vals, ivals_len, context);
}
void
fn_free(void
*c){
 free
(c);
}
set
SO_FILE_NAME='Z_doc_comp_col2_last_customer_info.so'
;
reponse
success
loop item_customer_infos(customer_id,item_id,info) function
fn_build_idx2;
LOOP_RESULT
DONE
desc
context;
context_name is_drop_yn
get_deptA n
mult_by_sales_priceB n
divide_by_sales_priceB n
IDX1#customer_id->card_label n
IDX2#customer_id->item_id->info n
refresh
computed_column
;
reponse
success
desc
computed_column
;
comp_col_name error
last_fidelity_card function not found: last_fidelity_card
last_customer_info no error
valid_customer_info function not found: valid_customer_info
best_customer_info function not found: best_customer_info
get_dept no error
mult_by_sales_price no error
divide_by_sales_price no error
select
item_id, customer_id, sales_date, line_id, last_customer_info from
v_sales callback
sort(4,'asc'
);
item_id customer_id sales_date line_id last_customer_info
artA C1 20191231 ID01 FREQUENT BUYER of artA in 2019
artB C2 20200102 ID02 FREQUENT BUYER of artB in 2020
artB C1 20191231 ID03 FREQUENT BUYER of artB in 2020
artB C2 20200102 ID04 FREQUENT BUYER of artB in 2020
# C1 20200102 ID05 #
artA C1 20191231 ID06 FREQUENT BUYER of artA in 2019
artA C2 20191231 ID07 #
--2 columns join + period check with fn_build_idx2_period
--valid customer_info per item_id/customer_id, valid at the sales_date
./_SO_CODE/Z_doc_comp_col2_valid_customer_info.c
#include
"./common.h"
COMPUTED_COL_PREREQ* valid_customer_info_before(UTILITY *U) {
 return
sb_idx2_period_before(U, "customer_id"
, "item_id"
, "sales_date"
, "info"
);
}
void
valid_customer_info(U_INT**ivals, float
**num_vals, int
ivals_len, void
*context) {
 sb_idx2_period(ivals, num_vals, ivals_len, context);
}
void
fn_free(void
*c){
 free
(c);
}
set
SO_FILE_NAME='Z_doc_comp_col2_valid_customer_info.so'
;
reponse
success
loop item_customer_infos(customer_id,item_id,info,valid_from,valid_until) function
fn_build_idx2_period('sales_date'
);
LOOP_RESULT
DONE
desc
context;
context_name is_drop_yn
get_deptA n
mult_by_sales_priceB n
divide_by_sales_priceB n
IDX1#customer_id->card_label n
IDX2#customer_id->item_id->info n
IDX2_PERIOD#customer_id->item_id->sales_date->info n
refresh
computed_column
;
reponse
success
desc
computed_column
;
comp_col_name error
last_fidelity_card function not found: last_fidelity_card
last_customer_info function not found: last_customer_info
valid_customer_info no error
best_customer_info function not found: best_customer_info
get_dept no error
mult_by_sales_price no error
divide_by_sales_price no error
select
item_id, customer_id, sales_date, line_id, valid_customer_info from
v_sales callback
sort(4,'asc'
);
item_id customer_id sales_date line_id valid_customer_info
artA C1 20191231 ID01 FREQUENT BUYER of artA in 2019
artB C2 20200102 ID02 FREQUENT BUYER of artB in 2020
artB C1 20191231 ID03 #
artB C2 20200102 ID04 FREQUENT BUYER of artB in 2020
# C1 20200102 ID05 #
artA C1 20191231 ID06 FREQUENT BUYER of artA in 2019
artA C2 20191231 ID07 #
--best_customer_info valid_customer_info or last_customer_info or last_fidelity_card
./_SO_CODE/Z_doc_comp_col2_best_customer_info.c
#include
"./common.h"
typedef
struct my_context {
 IDX1 *idx1;
 IDX2 *idx2;
 IDX2_PERIOD *idx2_p;
} MY_CONTEXT;
void
fn_free(void
*c){
 free
(c);
}
int
fn_build_my_context_before(SB_VALS* read, SB_VALS* new) {
 MY_CONTEXT *c = malloc
(sizeof
(MY_CONTEXT));
 c->idx1
= read->U
->fn_get_context
("IDX1#customer_id->card_label
"
);
 c->idx2
= read->U
->fn_get_context
("IDX2#customer_id->item_id
->info
"
);
 c->idx2_p
= read->U
->fn_get_context
("IDX2_PERIOD#customer_id->item_id
->sales_date
->info
"
);
 if
(c->idx1
== NULL) {
   read->U
->fn_log
("fn_build_my_context_before: context IDX1#customer_id->card_label
not found"
);
   return
KO;
 }
 if
(c->idx2
== NULL) {
   read->U
->fn_log
("fn_build_my_context_before: context IDX2#customer_id->item_id
->info
not found"
);
   return
KO;
 }
 if
(c->idx2_p
== NULL) {
   read->U
->fn_log
(
     "fn_build_my_context_before: context IDX2_PERIOD#customer_id->item_id
->sales_date
->info
not found"
);
   return
KO;
 }
 read->context
= c;
 read->context_name
= sb_clone("my_context"
);
 read->fn_free_context_name
= sb_clone("fn_free"
);
 return
OK;
}
int
fn_build_my_context(SB_VALS* read, SB_VALS* new) {
 return
NOT_MODIFIED;
}
COMPUTED_COL_PREREQ* best_customer_info_before(UTILITY *U) {
 COMPUTED_COL_PREREQ*ret = sb_computed_col_prereq_factory();
 ret->needed_context_name
= sb_clone("my_context"
);
 MY_CONTEXT* idx = U->fn_get_context
(ret->needed_context_name
);
 if
(idx == NULL) {
   ret->error
= sb_concat3("context "
, ret->needed_context_name
, " not found"
);
   return
ret;
 }
 ret->destination_col_type_name
= sb_clone(idx->idx2_p
->destination_col_type_name
);
 ret->needed_col
= sb_clone_need_col(idx->idx2_p
->needed_col
);
 return
ret;
}
void
best_customer_info(U_INT**ivals, float
**num_vals, int
ivals_len, void
*context) {
 MY_CONTEXT *c = context;
 U_INT customer_id = *ivals[0];
 sb_idx2_period(ivals, num_vals, ivals_len, c->idx2_p
);
 if
(*ivals[0] == NULL_IVAL) {
   *ivals[0] = customer_id;
   sb_idx2(ivals, num_vals, ivals_len, c->idx2
);
   if
(*ivals[0] == NULL_IVAL) {
     *ivals[0] = customer_id;
     sb_idx1(ivals, num_vals, ivals_len, c->idx1
);
   }
 }
}
set
SO_FILE_NAME='Z_doc_comp_col2_best_customer_info.so'
;
reponse
success
--create an empty table to trigger fn_build_my_context
create
table
empty_table (txt text
);
reponse
success
loop empty_table(txt) function
fn_build_my_context;
LOOP_RESULT
DONE
desc
context;
context_name is_drop_yn
get_deptA n
mult_by_sales_priceB n
divide_by_sales_priceB n
IDX1#customer_id->card_label n
IDX2#customer_id->item_id->info n
IDX2_PERIOD#customer_id->item_id->sales_date->info n
my_context n
refresh
computed_column
;
reponse
success
desc
computed_column
;
comp_col_name error
last_fidelity_card function not found: last_fidelity_card
last_customer_info function not found: last_customer_info
valid_customer_info function not found: valid_customer_info
best_customer_info no error
get_dept no error
mult_by_sales_price no error
divide_by_sales_price no error
select
item_id, customer_id, sales_date, line_id, best_customer_info from
v_sales callback
sort(4,'asc'
);
item_id customer_id sales_date line_id best_customer_info
artA C1 20191231 ID01 FREQUENT BUYER of artA in 2019
artB C2 20200102 ID02 FREQUENT BUYER of artB in 2020
artB C1 20191231 ID03 FREQUENT BUYER of artB in 2020
artB C2 20200102 ID04 FREQUENT BUYER of artB in 2020
# C1 20200102 ID05 GOLD
artA C1 20191231 ID06 FREQUENT BUYER of artA in 2019
artA C2 20191231 ID07 #
--all computed columns
./_SO_CODE/Z_doc_comp_col2_all.c
#include
"./common.h"
COMPUTED_COL_PREREQ* last_fidelity_card_before(UTILITY *U) {
 return
sb_idx1_before(U, "customer_id"
, "card_label"
);
}
void
last_fidelity_card(U_INT**ivals, float
**num_vals, int
ivals_len, void
*context) {
 sb_idx1(ivals, num_vals, ivals_len, context);
}
COMPUTED_COL_PREREQ* last_customer_info_before(UTILITY *U) {
 return
sb_idx2_before(U, "customer_id"
, "item_id"
, "info"
);
}
void
last_customer_info(U_INT**ivals, float
**num_vals, int
ivals_len, void
*context) {
 sb_idx2(ivals, num_vals, ivals_len, context);
}
COMPUTED_COL_PREREQ* valid_customer_info_before(UTILITY *U) {
 return
sb_idx2_period_before(U, "customer_id"
, "item_id"
, "sales_date"
, "info"
);
}
void
valid_customer_info(U_INT**ivals, float
**num_vals, int
ivals_len, void
*context) {
 sb_idx2_period(ivals, num_vals, ivals_len, context);
}
typedef
struct my_context {
 IDX1 *idx1;
 IDX2 *idx2;
 IDX2_PERIOD *idx2_p;
} MY_CONTEXT;
void
fn_free(void
*c){
 free
(c);
}
int
fn_build_my_context_before(SB_VALS* read, SB_VALS* new) {
 MY_CONTEXT *c = malloc
(sizeof
(MY_CONTEXT));
 c->idx1
= read->U
->fn_get_context
("IDX1#customer_id->card_label
"
);
 c->idx2
= read->U
->fn_get_context
("IDX2#customer_id->item_id
->info
"
);
 c->idx2_p
= read->U
->fn_get_context
("IDX2_PERIOD#customer_id->item_id
->sales_date
->info
"
);
 if
(c->idx1
== NULL) {
   read->U
->fn_log
("fn_build_my_context_before: context IDX1#customer_id->card_label
not found"
);
   return
KO;
 }
 if
(c->idx2
== NULL) {
   read->U
->fn_log
("fn_build_my_context_before: context IDX2#customer_id->item_id
->info
not found"
);
   return
KO;
 }
 if
(c->idx2_p
== NULL) {
   read->U
->fn_log
(
     "fn_build_my_context_before: context IDX2_PERIOD#customer_id->item_id
->sales_date
->info
not found"
);
   return
KO;
 }
 read->context
= c;
 read->context_name
= sb_clone("my_context"
);
 read->fn_free_context_name
= sb_clone("fn_free"
);
 return
OK;
}
int
fn_build_my_context(SB_VALS* read, SB_VALS* new) {
 return
NOT_MODIFIED;
}
COMPUTED_COL_PREREQ* best_customer_info_before(UTILITY *U) {
 COMPUTED_COL_PREREQ*ret = sb_computed_col_prereq_factory();
 ret->needed_context_name
= sb_clone("my_context"
);
 MY_CONTEXT* idx = U->fn_get_context
(ret->needed_context_name
);
 if
(idx == NULL) {
   ret->error
= sb_concat3("context "
, ret->needed_context_name
, " not found"
);
   return
ret;
 }
 ret->destination_col_type_name
= sb_clone(idx->idx2_p
->destination_col_type_name
);
 ret->needed_col
= sb_clone_need_col(idx->idx2_p
->needed_col
);
 return
ret;
}
void
best_customer_info(U_INT**ivals, float
**num_vals, int
ivals_len, void
*context) {
 MY_CONTEXT *c = context;
 U_INT customer_id = *ivals[0];
 sb_idx2_period(ivals, num_vals, ivals_len, c->idx2_p
);
 if
(*ivals[0] == NULL_IVAL) {
   *ivals[0] = customer_id;
   sb_idx2(ivals, num_vals, ivals_len, c->idx2
);
   if
(*ivals[0] == NULL_IVAL) {
     *ivals[0] = customer_id;
     sb_idx1(ivals, num_vals, ivals_len, c->idx1
);
   }
 }
}
set
SO_FILE_NAME='Z_doc_comp_col2_all.so'
;
reponse
success
refresh
computed_column
;
reponse
success
desc
computed_column
;
comp_col_name error
last_fidelity_card no error
last_customer_info no error
valid_customer_info no error
best_customer_info no error
get_dept no error
mult_by_sales_price no error
divide_by_sales_price no error
select
item_id, customer_id, sales_date, line_id, last_fidelity_card, last_customer_info, valid_customer_info, best_customer_info from
v_sales callback
sort(4,'asc'
);
item_id customer_id sales_date line_id last_fidelity_card last_customer_info valid_customer_info best_customer_info
artA C1 20191231 ID01 GOLD FREQUENT BUYER of artA in 2019 FREQUENT BUYER of artA in 2019 FREQUENT BUYER of artA in 2019
artB C2 20200102 ID02 # FREQUENT BUYER of artB in 2020 FREQUENT BUYER of artB in 2020 FREQUENT BUYER of artB in 2020
artB C1 20191231 ID03 GOLD FREQUENT BUYER of artB in 2020 # FREQUENT BUYER of artB in 2020
artB C2 20200102 ID04 # FREQUENT BUYER of artB in 2020 FREQUENT BUYER of artB in 2020 FREQUENT BUYER of artB in 2020
# C1 20200102 ID05 GOLD # # GOLD
artA C1 20191231 ID06 GOLD FREQUENT BUYER of artA in 2019 FREQUENT BUYER of artA in 2019 FREQUENT BUYER of artA in 2019
artA C2 20191231 ID07 # # # #
--index on last_fidelity_card can be used but not on last_customer_info
--#SB use_index
select
line_id from
v_sales
and
last_fidelity_card='GOLD'
and
last_customer_info='FREQUENT BUYER of artA in 2019'
;
line_id
ID01
ID06
-- refresh computed_column;
-- refresh cache;
-- ###########################
-- RUNNING shutdown.sql
shutdown
;
-- ###########################
-- RUNNING stop_on_error.sql (with ./cluster_sql.sh)
stop_on_error;
warning
CLI command sent: stop_on_error
-- ###########################
-- RUNNING cd ../02_sql; ./cluster.sh init;
/*
killing cluster (pid:21812
21816
21817
21818
21819
21820)
done
cluster is not running on this server
init -> true
foreground -> true
[0] CLUSTER master port: 2220 (0)
[0] CLUSTER slave count: 2 (0)
[0] FIRST_NODE ____comment#5____
[0] FIRST_NODE connecting to localhost : 2219 (0)
[0] SECOND_NODE ____comment#6____
[0] SECOND_NODE connecting to localhost : 2219 (0)
[0] SECOND_NODE -> authentication; (0)
[0] FIRST_NODE -> authentication; (0)
[0] SECOND_NODE <- data ( 104 ms ) (0)
[0] SECOND_NODE -> exec show_data_model(); (0)
[0] SECOND_NODE <- data ( 3 ms ) (0)
[0] FIRST_NODE <- data ( 203 ms ) (0)
[0] FIRST_NODE -> exec show_data_model(); (0)
[0] FIRST_NODE <- data ( 2 ms ) (0)
[0] FIRST_NODE col_type t_item_id exec sql #1 (0)
[0] FIRST_NODE -> SELECT_ALL item_id FROM v_item_tags GROUP BY item_id; (0)
[0] SECOND_NODE col_type t_item_id exec sql #1 (0)
[0] SECOND_NODE -> SELECT_ALL item_id FROM v_item_tags GROUP BY item_id; (0)
[0] FIRST_NODE <- data ( 4 ms ) (0)
[0] FIRST_NODE #2 len: 14 (0)
[0] FIRST_NODE #3 (0)
[0] SECOND_NODE <- data ( 4 ms ) (0)
[0] SECOND_NODE #2 len: 14 (0)
[0] SECOND_NODE #3 (0)
[0] CLUSTER colType OK t_item_id 1/9 (0)
[0] FIRST_NODE col_type sys#type#items#art_label exec sql #1 (0)
[0] FIRST_NODE -> SELECT_ALL art_label FROM v_items GROUP BY art_label; (0)
[0] SECOND_NODE col_type sys#type#items#art_label exec sql #1 (0)
[0] SECOND_NODE -> SELECT_ALL art_label FROM v_items GROUP BY art_label; (0)
[0] FIRST_NODE <- data ( 4 ms ) (0)
[0] FIRST_NODE #2 len: 12 (0)
[0] FIRST_NODE #3 (0)
[0] SECOND_NODE <- data ( 3 ms ) (0)
[0] SECOND_NODE #2 len: 12 (0)
[0] SECOND_NODE #3 (0)
[0] CLUSTER colType OK sys#type#items#art_label 2/9 (0)
[0] FIRST_NODE col_type t_dept_id exec sql #1 (0)
[0] FIRST_NODE -> SELECT_ALL dept FROM v_items GROUP BY dept; (0)
[0] SECOND_NODE col_type t_dept_id exec sql #1 (0)
[0] SECOND_NODE -> SELECT_ALL dept FROM v_items GROUP BY dept; (0)
[0] FIRST_NODE <- data ( 4 ms ) (0)
[0] FIRST_NODE #2 len: 12 (0)
[0] FIRST_NODE #3 (0)
[0] SECOND_NODE <- data ( 4 ms ) (0)
[0] SECOND_NODE #2 len: 12 (0)
[0] SECOND_NODE #3 (0)
[0] CLUSTER colType OK t_dept_id 3/9 (0)
[0] FIRST_NODE col_type t_customer_id exec sql #1 (0)
[0] FIRST_NODE -> SELECT_ALL customer_id FROM v_sales GROUP BY customer_id; (0)
[0] SECOND_NODE col_type t_customer_id exec sql #1 (0)
[0] SECOND_NODE -> SELECT_ALL customer_id FROM v_sales GROUP BY customer_id; (0)
[0] FIRST_NODE <- data ( 3 ms ) (0)
[0] FIRST_NODE #2 len: 13 (0)
[0] FIRST_NODE #3 (0)
[0] SECOND_NODE <- data ( 4 ms ) (0)
[0] SECOND_NODE #2 len: 13 (0)
[0] SECOND_NODE #3 (0)
[0] CLUSTER colType OK t_customer_id 4/9 (0)
[0] FIRST_NODE col_type t_date exec sql #1 (0)
[0] FIRST_NODE -> SELECT_ALL sales_date FROM v_sales GROUP BY sales_date; (0)
[0] SECOND_NODE col_type t_date exec sql #1 (0)
[0] SECOND_NODE -> SELECT_ALL sales_date FROM v_sales GROUP BY sales_date; (0)
[0] FIRST_NODE <- data ( 4 ms ) (0)
[0] FIRST_NODE #2 len: 17 (0)
[0] FIRST_NODE #3 (0)
[0] SECOND_NODE <- data ( 4 ms ) (0)
[0] SECOND_NODE #2 len: 17 (0)
[0] SECOND_NODE #3 (0)
[0] CLUSTER colType OK t_date 5/9 (0)
[0] FIRST_NODE col_type t_customer_info exec sql #1 (0)
[0] FIRST_NODE -> SELECT_ALL info FROM v_item_customer_infos GROUP BY info; (0)
[0] SECOND_NODE col_type t_customer_info exec sql #1 (0)
[0] SECOND_NODE -> SELECT_ALL info FROM v_item_customer_infos GROUP BY info; (0)
[0] FIRST_NODE <- data ( 4 ms ) (0)
[0] FIRST_NODE #2 len: 14 (0)
[0] FIRST_NODE #3 (0)
[0] SECOND_NODE <- data ( 4 ms ) (0)
[0] SECOND_NODE #2 len: 14 (0)
[0] SECOND_NODE #3 (0)
[0] CLUSTER colType OK t_customer_info 6/9 (0)
[0] FIRST_NODE col_type sys#type#customers#customer_name exec sql #1 (0)
[0] FIRST_NODE -> SELECT_ALL customer_name FROM v_sales GROUP BY customer_name; (0)
[0] SECOND_NODE col_type sys#type#customers#customer_name exec sql #1 (0)
[0] SECOND_NODE -> SELECT_ALL customer_name FROM v_sales GROUP BY customer_name; (0)
[0] FIRST_NODE <- data ( 3 ms ) (0)
[0] FIRST_NODE #2 len: 12 (0)
[0] FIRST_NODE #3 (0)
[0] SECOND_NODE <- data ( 3 ms ) (0)
[0] SECOND_NODE #2 len: 12 (0)
[0] SECOND_NODE #3 (0)
[0] CLUSTER colType OK sys#type#customers#customer_name 7/9 (0)
[0] FIRST_NODE col_type sys#type#sales#line_id exec sql #1 (0)
[0] FIRST_NODE -> SELECT_ALL line_id FROM v_sales GROUP BY line_id; (0)
[0] SECOND_NODE col_type sys#type#sales#line_id exec sql #1 (0)
[0] SECOND_NODE -> SELECT_ALL line_id FROM v_sales GROUP BY line_id; (0)
[0] SECOND_NODE <- data ( 3 ms ) (0)
[0] SECOND_NODE #2 len: 17 (0)
[0] SECOND_NODE #3 (0)
[0] FIRST_NODE <- data ( 3 ms ) (0)
[0] FIRST_NODE #2 len: 17 (0)
[0] FIRST_NODE #3 (0)
[0] CLUSTER colType OK sys#type#sales#line_id 8/9 (0)
[0] FIRST_NODE col_type sys#type#item_tags#tag exec sql #1 (0)
[0] FIRST_NODE -> SELECT_ALL tag FROM v_item_tags GROUP BY tag; (0)
[0] SECOND_NODE col_type sys#type#item_tags#tag exec sql #1 (0)
[0] SECOND_NODE -> SELECT_ALL tag FROM v_item_tags GROUP BY tag; (0)
[0] FIRST_NODE <- data ( 4 ms ) (0)
[0] FIRST_NODE #2 len: 12 (0)
[0] FIRST_NODE #3 (0)
[0] SECOND_NODE <- data ( 3 ms ) (0)
[0] SECOND_NODE #2 len: 12 (0)
[0] SECOND_NODE #3 (0)
[0] CLUSTER colType OK sys#type#item_tags#tag 9/9 (0)
*/
-- ###########################
-- RUNNING cd ../02_sql; ./cluster.sh bounce;
/*
cluster is not running on this server
archiving previous log file
purging log archive (7 days)
starting stormbase cluster .. OK: cluster listening on tcp port 2220
*/
-- ###########################
-- RUNNING status.sh
/*
PID: 25586, CMD: STORMBASE, TCP_PORT: 2219
PID: 25645, CMD: CLUSTER, TCP_PORT: 2220
PID: 25649, CMD: CLUSTER, TCP_PORT: 2220
PID: 25650, CMD: CLUSTER, TCP_PORT: 2220
*/
-- ###########################
-- RUNNING doc_cluster.sql (with ./cluster_sql.sh)
--
../02_sql/cluster_readme.txt
##############
## SB cluster
##############
1/ create file cluster.conf, sample here under
# cluster port
TCP_PORT:2220
# node 1
ID1:node1 (optional, defaults to N1)
HOST1:stormbase1_ip
TCP_PORT1:stormbase1_port
# node 12
ID2:node2
HOST2:stormbase2_ip
TCP_PORT2:stormbase2_port
# repartition keys
REPART_COL_TYPES:t_art_site_key,t_art_cinv
OR
REPART_COL_TYPES:/(t_art_site_key|t_art_cinv)/
# #process, defaults to 5
NB_WORKERS:a_number
# if an sql take more that x seconds then execution is cancelled and error 173 is sent, defaults to 100 seconds
SQL_TIMEOUT:a_number_in_seconds
# all SB nodes must have the same data model (otherwise init will return an error), defaults to y
SAME_DATA_MODEL:y/n
# if sql match the regex the sql will be sent to node #1 only, defaults to null
REGEX_FIRST_NODE_ONLY:a_javascript_regex
2/ init cluster
./cluster.sh init
Note. each time a cluster node receives a refresh the cluster must be re-init
3/ start cluster
./cluster start <|foreground>
Note. logs are in cluster.log
##############
## SB with PGSQL driver
##############
1/ create file cluster.conf, sample here under
create a classic SB cluster conf file (maybe with one SB node only)
2/ init companion
./cluster.sh init_pg_companion
PG_COMPANION_PORT:5432
PG_COMPANION_HOST:51.255.89.11
PG_COMPANION_USER:postgres
PG_COMPANION_PWD:toto
3/ start cluster
./cluster start <|foreground>
../02_sql/cluster.conf
TCP_PORT:2220
ID1:FIRST_NODE
HOST1:localhost
TCP_PORT1:2219
ID2:SECOND_NODE
HOST2:localhost
TCP_PORT2:2219
REGEX_FIRST_NODE_ONLY:/(create|insert|delete|refresh|set)\s/
NB_WORKERS:2
SQL_TIMEOUT:3
--#SB_CLUSTER debug
select
count(*) from
v_sales;
COUNT(*)
14
desc
table
callback
where
(1,'sales'
);
table_name column_count line_count has_delete has_update parent_view_hidden node_id
sales 5 7 n n n FIRST_NODE
sales 5 7 n n n SECOND_NODE
--#SB_CLUSTER desc('parameter')
nothing;
param_name current_value
TCP_PORT 2220
ID1 FIRST_NODE
HOST1 localhost
TCP_PORT1 2219
ID2 SECOND_NODE
HOST2 localhost
TCP_PORT2 2219
REPART_COL_TYPES -
REGEX_FIRST_NODE_ONLY /(create!insert!delete!refresh!set)\s/
NB_WORKERS 2
SQL_TIMEOUT 3
SAME_DATA_MODEL y
--#SB_CLUSTER set('same_data_model','n')
nothing;
response
done
--#SB_CLUSTER desc('parameter','same_data_model')
nothing;
param_name current_value
SAME_DATA_MODEL n
../02_sql/custom.js
module.exports = {};
module.exports.fnSetNodes = function (sql, nodeId2Execute) {
if (sql.match(/(\s|^)v_item_tags(\s|;)/im)) {
nodeId2Execute["FIRST_NODE"] = false;
}
if (sql.match(/(\s|^)v_items(\s|;)/im)) {
nodeId2Execute["SECOND_NODE"] = false;
}
}
desc
view
callback
where
(1,'v_item_tags'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count node_id
v_item_tags 1 2 n n n 10 0 12 12 0 0 FIRST_NODE
v_item_tags 1 2 n n n 10 0 12 12 0 0 SECOND_NODE
select
count(*) from
v_item_tags;
COUNT(*)
2
desc
view
callback
where
(1,'v_items'
);
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count node_id
v_items 1 2 n n n 10 0 12 12 0 0 FIRST_NODE
v_items 1 2 n n n 10 0 12 12 0 0 SECOND_NODE
select
count(*) from
v_items;
COUNT(*)
2
desc
table
sales;
table_name column_name column_type col_type_name node_id
sales item_id text t_item_id FIRST_NODE
sales customer_id text t_customer_id FIRST_NODE
sales sales_date text t_date FIRST_NODE
sales sales_qty number sys#type#sales#sales_qty FIRST_NODE
sales line_id text sys#type#sales#line_id FIRST_NODE
sales item_id text t_item_id SECOND_NODE
sales customer_id text t_customer_id SECOND_NODE
sales sales_date text t_date SECOND_NODE
sales sales_qty number sys#type#sales#sales_qty SECOND_NODE
sales line_id text sys#type#sales#line_id SECOND_NODE
--under construction
--#SB_CLUSTER broadcast merge('group,sum')
--#SB_CLUSTER broadcast merge('group,sum,sum') compute(4,'ratio(line.sum_stk,line.sum_sales)') compute(5,'ratio(365,line.doi)')
continue_on_disconnect;
--this sql is stopped because of timeout
--#SB_CLUSTER js_eval(require("child_process").execSync("sleep 10"); return "abcd")
nothing;
error
173: connection closed
stop_on_disconnect;
--but the cluster is still working (killed thread has been re forked)
select
count(*) from
v_sales;
COUNT(*)
14
;
2220
-- ###########################
-- RUNNING cd ../02_sql; ./cluster.sh init_pg_companion;
/*
killing cluster (pid:29741
29745
29746
29747
29748
29749)
done
cluster is not running on this server
init_pg_companion -> true
foreground -> true
[0] CLUSTER master port: 2220 (0)
[0] CLUSTER slave count: 1 (0)
[0] N1 --------- NEW CONNECTION ------------ CACHE SIZE : 0 / 0 (0)
[0] N1 connecting to localhost : 2219 (0)
[0] N1 -> authentication; (0)
[0] N1 <- data ( 103 ms ) (0)
[0] N1 -> exec show_data_model(); (0)
[0] N1 <- data ( 1 ms ) (0)
proxy server connect undefined
-------> conn1
-------> conn2
-------> get tables
-------> drop table v_fidelity_cards
-------> drop table v_item_customer_infos
-------> drop table v_item_tags
-------> drop table v_items
-------> drop table v_sales
-------> fn
-------> create table v_fidelity_cards
table v_item_customer_infos created C|CREATE TABLE[0]
-------> create table v_items
table v_item_customer_infos created C|CREATE TABLE[0]
-------> create table v_sales
table v_item_customer_infos created C|CREATE TABLE[0]
-------> create table v_item_tags
table v_item_customer_infos created C|CREATE TABLE[0]
-------> create table v_item_customer_infos
table v_item_customer_infos created C|CREATE TABLE[0]
-------> commit
-------> insert v_fidelity_cards, 3 lines
-------> insert v_items, 3 lines
-------> insert v_sales, 3 lines
-------> insert v_item_tags, 3 lines
no more calls
*/
-- ###########################
-- RUNNING cd ../02_sql; ./cluster.sh bounce;
/*
cluster is not running on this server
archiving previous log file
purging log archive (7 days)
starting stormbase cluster .. OK: cluster listening on tcp port 2220
*/
-- ###########################
-- RUNNING status.sh
/*
PID: 30157, CMD: STORMBASE, TCP_PORT: 2219
PID: 30244, CMD: CLUSTER, TCP_PORT: 2220
PID: 30248, CMD: CLUSTER, TCP_PORT: 2220
PID: 30249, CMD: CLUSTER, TCP_PORT: 2220
PID: 30250, CMD: CLUSTER, TCP_PORT: 2220
PID: 30251, CMD: CLUSTER, TCP_PORT: 2220
PID: 30252, CMD: CLUSTER, TCP_PORT: 2220
*/
-- ###########################
-- RUNNING doc_pg_companion.sql (with ./pg_sql.sh)
--
--file:../02_sql/cluster.conf
--#SB_CLUSTER debug
select
count(line_id) from
v_sales;
count
-------
3
(1 row)
--#SB_CLUSTER debug
select
count(line_id) from
v_sales;
count
-------
7
(1 row)
select
item_id,customer_id,sales_date,sales_qty,line_id from
v_sales;
item_id | customer_id | sales_date | sales_qty | line_id
---------+-------------+------------+-----------+---------
artA | C1 | 20191231 | 5 | ID01
artB | C2 | 20200102 | 6 | ID02
artB | C1 | 20191231 | 4 | ID03
artB | C2 | 20200102 | 7 | ID04
artA | C1 | 20191231 | 5 | ID06
| C1 | 20200102 | 8 | ID05
artA | C2 | 20191231 | 5 | ID07
(7 rows)
-- ###########################
-- RUNNING clear.sh
/*
sure mode, no confirmation prompt
clearing directory INTERNAL_FILES_DIR (../STORAGE/INT) ...
done
clearing directory MAX_PERF_FILES_DIR (../STORAGE/MAX_PERF) ...
done
clearing directory TRANSAC_FILES_DIR (../STORAGE/TRANSAC) ...
done
*/
-- ###########################
-- RUNNING doc_transac.sql
--
create
table
table_name(col1 text
, col2 text
, ...)
create
table
tb_message(id text
, json text
, status text
);
table_created
OK
insert
into
table_name('txt1'
, 'txt2'
, ...)
insert
into
tb_message values
('AAA'
,'text AAA'
,1);
line_inserted
OK
insert
into
tb_message values
('BBB'
,'text BBB'
,'1'
);
line_inserted
OK
insert
into
tb_message values
('CCC'
,'text BBB1'
,'1'
);
line_inserted
OK
select
<*|col1, col2, ...> from
table_name
<|where
col1='txt1'
and
col2='txt2'
...>
select
* from
tb_message;
id json status
AAA text AAA 1
BBB text BBB 1
CCC text BBB1 1
select
id, json, id, status from
tb_message where
id='CCC'
and
status=2;
id json id status
update
table_name set
col1='txt1'
, col2='txt2'
<|where
col1='txt1'
and
col2='txt2'
...>
update
tb_message set
status='2'
, json='new'
where
id='AAA'
;
#rows updated
1
update
tb_message set
status='2'
, json='newCCC'
where
id='CCC'
;
#rows updated
1
select
* from
tb_message;
id json status
AAA new 2
BBB text BBB 1
CCC newCCC 2
select
id, json, id, status from
tb_message where
id='CCC'
and
status=2;
id json id status
CCC newCCC CCC 2
delete
table_name
<|where
col1='txt1'
and
col2='txt2'
...>
delete
tb_message where
id='AAA'
;
#rows deleted
1
select
* from
tb_message;
id json status
BBB text BBB 1
CCC newCCC 2
delete
tb_message;
#rows deleted
2
select
* from
tb_message;
id json status
delete
tb_message;
#rows deleted
0
shutdown
;
-- ###########################
-- RUNNING clear.sh
/*
sure mode, no confirmation prompt
clearing directory INTERNAL_FILES_DIR (../STORAGE/INT) ...
done
clearing directory MAX_PERF_FILES_DIR (../STORAGE/MAX_PERF) ...
done
clearing directory TRANSAC_FILES_DIR (../STORAGE/TRANSAC) ...
done
*/
-- ###########################
-- RUNNING doc_data_init.sql
create
col_type
t_site_id as
text
;
reponse
success
create
col_type
t_dept_id as
text
;
reponse
success
create
col_type
t_item_id as
text
;
reponse
success
create
col_type
t_customer_id as
text
;
reponse
success
create
col_type
t_date as
text
;
reponse
success
create
col_type
t_customer_info as
text
;
reponse
success
create
col_type
end_user
as
text
;
reponse
success
create
merge table
items( item_id t_item_id, art_label text
, dept t_dept_id, avg_week_sales number
, sales_price number
);
reponse
success
create
merge table
customers( customer_id t_customer_id, customer_name text
);
reponse
success
create
table
item_tags( item_id t_item_id, tag text
);
reponse
success
create
table
fidelity_cards( customer_id t_customer_id, card_label t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
table
item_customer_infos( customer_id t_customer_id, item_id t_item_id, info t_customer_info, valid_from t_date, valid_until t_date);
reponse
success
create
big table
sales( item_id t_item_id, customer_id t_customer_id, sales_date t_date, sales_qty number
, line_id text
, packaging_id t_item_id);
reponse
success
create
big table
inventory( item_id t_item_id, inv_qty number
);
reponse
success
create
view
v_items as
select
* from
items;
reponse
success
create
view
v_item_tags as
select
* from
item_tags;
reponse
success
create
view
v_fidelity_cards as
select
* from
fidelity_cards;
reponse
success
create
view
v_item_customer_infos as
select
* from
item_customer_infos;
reponse
success
create
view
v_sales as
select
* from
sales, items, customers where
items.item_id=sales.item_id and
customers.customer_id=sales.customer_id;
reponse
success
create
view
v_inventory as
select
* from
inventory, items where
items.item_id=inventory.item_id;
reponse
success
insert
into
items values
('artA'
,'the article A'
,'dept #1'
,10,1.5);
reponse
success
insert
into
items values
('artB'
,'the article B'
,'dept #2'
,10,3.2);
reponse
success
insert
into
items values
('box1'
,'a box'
,'packaging'
,10,0);
reponse
success
insert
into
customers values
('C1'
,'customer #1'
)('C2'
,'customer #2'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #1'
);
reponse
success
insert
into
item_tags values
('artA'
,'tag #2'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'SILVER'
,'20191201'
,'20191231'
);
reponse
success
insert
into
fidelity_cards values
('C1'
,'GOLD'
,'20201201'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artA'
,'FREQUENT BUYER of artA in 2019'
,'20190101'
,'20191231'
);
reponse
success
insert
into
item_customer_infos values
('C1'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
item_customer_infos values
('C2'
,'artB'
,'FREQUENT BUYER of artB in 2020'
,'20200101'
,'20201231'
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID01'
,'box1'
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,6,'ID02'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C1'
,'20191231'
,4,'ID03'
,''
);
reponse
success
insert
into
sales values
('artB'
,'C2'
,'20200102'
,7,'ID04'
,'box1'
);
reponse
success
insert
into
sales values
('artC'
,'C1'
,'20200102'
,8,'ID05'
,''
);
reponse
success
insert
into
sales values
('artA'
,'C1'
,'20191231'
,5,'ID06'
,'box1'
);
reponse
success
insert
into
sales values
('artA'
,'C2'
,'20191231'
,5,'ID07'
,'box1'
);
reponse
success
insert
into
inventory values
('artA'
,32);
reponse
success
insert
into
inventory values
('artC'
,12);
reponse
success
refresh
dirty
view
;
reponse
success
-- ###########################
-- RUNNING doc_version.sql
--
./_SRC/version.h
#define STORMBASE_VERSION "v1.17.10_F29"
// v1.5.09 bug fix of core dumps
// v1.5.10 remove call f_check_disk_space (except the one at startup)
// v1.5.11 add if( insert_altered == TRUE )
// v1.5.12 coredump if query during refresh, see do_not_save
// v1.5.13 bug introduced by 1.15.11
// v1.5.14 more debug for orphans
// v1.5.15 bug at citymart, when select and unfrag at the same time
// v1.5.16 INITIAL_LOAD
// v1.5.17 UNIQUE_SOFT
// v1.5.18 remove last call to f_check_disk_space (bug City Mart)
// v1.5.19 remove disk check also
// v1.5.20 just a git test
// v1.5.21 bug fix in uniquesoft
// v1.5.22 bug fix City Mart error 29-00
// v1.5.23 AUDIT mode
// v1.5.24 Forgot to divide by column count, which generated too many threads
// v1.5.25 Fix rare parsing errors
// v1.5.26 Add TOTO function
// v1.5.27 Increase GLOBAL_COL_TYPE_CHECK_LIST_LEN
// v1.5.28 Debug error ERROR 29-131 in function : new_object
// v1.5.29 Allow orphans in tables linked to big tables, if and only if they are empty
// v1.5.30 Fix for ERROR 29-131 in function : new_object
// v1.5.31 Cleaning of unused CELL file at startup
// v1.5.32 Improve where check when I have same where done before without end_user check
// v1.5.33 Small evo
// v1.5.34 Bug fix in where management, exact match where not detected (always considered as partial matches)
// v1.5.35 NEW_COLUMN_NULL_STRING, default "NO_DATA"
// v1.5.36 Group by size not correct if where group by on disk
// v1.5.37 Error in TURBO creation because of null management
// v1.5.38 Bug in license check if max line > 4G
// v1.5.39 Bug cache size was zero after cache maintenance
// v1.5.40 Big bug in where clause, were considered as same : where c1 in ('v3','v1') / where c1 in ('v3')
// v1.5.41 Change cache management, priority to group by
// v1.5.42 Bug in where cache (take partial match before ready)
// v1.5.43 Bug City Mart when 30 sessions hit, core dump in f_create_col_type_check
// v1.5.44 Bug with turbo create ok but refresh ko when nothing was added in the table
// v1.5.45 Bug when several nn views in where clause
// v1.5.46 Evo, nn view, loop on nn_view instead of main view to find col_type in common
// v1.5.47 Bug non reproductible
// v1.5.48 Bug non reproductible
// v1.5.49 Bug Turbo, I get -1.9e-05 in the export, which fails during insert (change in strombase.lua)
// v1.5.50 Bug in refresh if more than 30 active sessions
// v1.5.51 Bad timing, bad error message, ALLOW_ORPHAN
// v1.5.52 CPU_COUNT param
// v1.5.53 Bug in LUA sort after v1.5.49
// v1.5.54 SB_NOT_IN_BETA & pwhere->pwhere_col_pos_to_is_not
// v1.5.55 EYC_OPTIM
// v1.5.56 EYC_OPTIM #2
// v1.5.57 EYC_OPTIM #3
// v1.5.58 MAX_PERFORMANCE
// v1.5.62 MAX_PERFORMANCE
// v1.5.63 SB_NOT_IN_BETA -> SB_NOT_IN (nin)
// v1.5.64 Several things ...
// v1.5.65 Bug fix in f_permission_has_changed
// v1.5.66 Many things
// v1.5.67 Many things & fix core dump REMA
// v1.6.00 First official version with MAX_PERF
// v1.6.02 Bug fix MAX_PERF
// v1.6.17.5 core CM
// v1.6.19 several bugs in where
// v1.6.20 refresh permissions integrated to refresh dirty views
// v1.6.21 less log
// v1.6.22 MAX_PERF->lval_count can't be used when loading max_perf from file (=0)
// v1.6.23 evaluate gby_booster_len better when booster not used
// v1.6.23.1 account for null value
// v1.6.24 bug fix and dev started for where indexes
// v1.6.25 dev end for where index + bug fix
// v1.6.25.1 where index stable
// v1.6.25.2 free missing
// v1.6.25.3 City Mart error 75722
// v1.6.25.4 trace for error 75722 investigate
// v1.6.25.5 trace for error 75722 investigate (origin of sessions)
// v1.6.25.6 move irel2 limit from 20 to 30
// v1.6.26 bug fix (core REMA UAT)
// v1.6.27_ bench inter
// v1.7.00 bench inter first stable version
// v1.7.01.1 memory leaks
// v1.7.02 use temp cell array for file load, to avoid fragmentation
// v1.7.03 core with delete in TEST_BENCH_INTER=y
// v1.7.04 PARETO_LIMIT LOG_VERBOSE
// v1.7.10 manage simple select (w/o group by)
// v1.7.10.1 bug in select sum(col) sqls
// v1.7.10.2 bug REMA, profile sql falls in wait, no more connections accepted
// v1.7.10.3 bugs
// bug #1 when a pareto column is in group by several times, memory pb in valgrind
// bug #2 double free error if group by with index
// bug #3 invalid result for unique & uniqsoft (linked to pareto values)
// v1.7.10.4 bug, NN view where clause : select other view drop twice
// v1.7.10.5 core with this SQL: SELECT MOY,sum() FROM V_SALES WHERE e(Y0,'2016') GROUP BY MOY CALLBACK limit(10000);
// v1.7.11 TEST_BENCH_INTER = 'y' by DEFAULT
// v1.7.12 memory problem
// v1.7.13 incorrect result for select without group by
// v1.7.14 memory check
// v1.7.15 memory check #2
// v1.7.16 core dump with WHERE end_user = 'NO_PROFILE_FOR_THIS_USER_APP'
// v1.7.17 core dump with select returning more than 2 million rows
// v1.7.18 in flat select, treat callback limit(x) during select execution, not after
// v1.7.19 bug with having and callback clause
// v1.7.20 manage nulls (sum=0 are displayed)
// v1.7.21 get rid of "port already in use issue"
// v1.7.22 bug fix after v1.7.20
// v1.8.00 preparation for cluster version, IVAL
// v1.8.01
//1/ core in unlucky cases,
//2/ manage top/bottom query on high sparsity group by w/o indexes
//3/ big bug on countdistinct(col of big table)
// v1.8.02 preparation for cluster version, ivallist/ivalbin
// v1.8.03 core when doing query with many columns (15) many rows (1000000)
// v1.8.04 move step2 limit from 30 to 40
// v1.8.05 bug (core or 0) when expression used several times in select
// v1.8.06 core in drop select
// v1.8.07 core at REMA in f_mem_get_object_id
// v1.8.08 correct v1.8.07
// v1.8.09 OOM with big countdistinct
// v1.8.10 add logs, core REMA when refresh without cache drop
// v1.8.11 col_type_check issue at LGEP
// v1.8.12 allow default value for orphan
// v1.8.13 free where index memory before save
// v1.8.14
// countdistinct should not count the NULL_STRING
// return error 140 for SQL during refresh drity views
// v1.8.15
// memory leak in data model exec
// DEBUG_MEM & CELL_BLOCK_TMP_ID
// v1.8.16 bug and memory leak in where_info
// v1.8.17 manage maxstr and minstr
// v1.8.18 countdistinct memory limit with MAX_COUNT_DISTINCT_THREAD_SIZE
// v1.8.19 Fix tentative for bug "_irel_ok_count != irel_ok_count" at Rema
// v1.8.20 Bug fix in "delete where nin"
// v1.8.21 Bug in count distinct with group by index
// v1.8.22 Bug if where col in ('A','A')
// v1.9.00 REFRESH_ONLINE (main development)
// v1.9.00 also remove TEST_BENCH_INTER
// v1.9.00 also remove "select from empty view error" in max_perf context
// v1.9.00 also fix a mutex related bug (problem at REMA) in f_select (see f_get_where_info_status(where_info))
// v1.9.00 also remove "select from empty view error" in max_perf context
// v1.9.00 also countd_first_ivals, reduce memory consumption for countdistinct on high sparsity columns (problem at LGEP)
// v1.9.00 also new where syntax : c like 'something' (equivalent but faster to regex(c,'.*something.*')
// v1.9.00 also manage sort up to 5 columns : sort( col1 , dir1, col2 , dir2, col3 , dir3, col4 , dir4, col5 , dir5 )
// v1.9.00 also removes small memory leaks
// v1.9.01 few bug fixes
// v1.9.02 control thread every 60 sec (6 sec before)
// v1.9.03 bug fix in refresh view sql creation
// v1.9.04 bug fix select on parent view was cached too early
// v1.9.05 removed automatic refresh dirty at startup (collision with clear_max_perf at Monoprix)
// v1.9.06 rework fix v1.9.04
// v1.9.07 bug fix linked to clear_max_perf.sh and refresh_online
// v1.9.08 bug in NN views
// v1.9.09 small memory leak fix
// v1.9.10 bug at startup when there is a big_online based view and a non big_online based view (with active child view) seen at REMA
// v1.9.11 core at REMA
// v1.9.12 core at REMA
// v1.9.13 bugs
// v1.9.14 change select status after tcp response (to avoid conflict with cache dropping)
// v1.9.15 manage not exists with NN join. Syntax: "where view!.col ..."
// v1.9.16 bug fix maxstr
// v1.9.17 problems NN views
// v1.9.18 problems with where view!.col when using index
// v1.9.19 bug fix "_irel_ok_count != irel_ok_count" error (REMA)
// v1.10.00
// review non big/merge views
// manage "with ... as ( ..."
// v1.10.01 bug fix "insert values" with : in it did not work
// v1.10.02 core with non big/merge views, bug with countdistinct in non big/merge views
// v1.10.03 core at REMA with online_views
// v1.10.04 insert optim for big merge tables
// v1.10.05 push max number of selects from 1E6 to 10E6
// v1.10.06 with pareto first rare line is seen as empty (regression after v1.10.02)
// v1.10.07 SB_DISTINCT_LIST
// v1.10.08 analytic sql rework
// v1.10.09 bug fix countdistinct (memory REMA)
// v1.10.10 manage sort with padding, for instance sort(1,'desc5')
// v1.10.11
//change default parameters values
//desc <|table_name|view_name> <|1|2>
//bug fix core REMA
//display integers w/o digits
//bug fix (invalid syntax in with trapping)
// v1.10.12
// rework on analytic functions
// support select ... and .. syntax (without where)
// v1.10.13
// avoid this kind of display with CLI: 6.000002
// manage select *
// v1.10.14
// allow refresh online with non big/merge table (try)
// function select don't look at child views
// U_INT_LIMIT
// v1.10.15 core at REMA in live refresh
// v1.10.16 core using with + profile
// v1.10.17 fn_lua
// v1.10.18 fn_lua rework
// v1.10.19 analytic rework
// v1.10.20 ALLOW_ORPHAN=y by default
// v1.10.21 new sort: sort(2,'asc5-') = asc, left pad of 5, use char before - for sort
// v1.10.22
// SB_NO_VIEW_OBJ
// cap memory to MAX_GBY_BOOSTER_LEN for select using index (20M) before (ITM)
// when ambiguity on column (art_cinv) change dimension priority
// v1.10.23
// more mutex protection (core REMA)
// v1.10.24
// recycle max_perf (core REMA)
// flag view as dirty now is independant from ALLOW_ORPHAN (otherwise daily_mvt becomes dirty after live refresh)
// v1.10.25
// bug fix on with
// v1.10.26
// bug fix describe
// bug delete where app_origin='GOLD'
// v1.10.27 don't check max session in f_create_col_type_check (created a deadlock at REMA, cf. SQL using "with clause")
// v1.11.00
// analytic big rework
// =null manaaement
// rework on refresh protection
// several other things
// v1.11.01
// optim of big groub by & count distincts
// v1.11.02 stormbase.lua limit case in ratio/fn_stock
// v1.11.03 bug fix in =null management with index
// v1.12.00 transactional initial version
// v1.13.00 PARTITION_LINE_COUNT
// v1.13.01 rework v1.7.10.3 bug #1, with a better fix
// v1.13.02 non big/merge view with sequential read optim
// v1.13.03 protect data_model_exec execution (anticipation of pb in HA mode at REMA)
// v1.13.04 bug fix error 140 returned if refresh dirty view w/o modification
// v1.13.05 fix blocking locking introduced by v1.13.03
// v1.13.06 parse error 146 fix in with clause
// v1.13.07 rework partition (delete, data model modif etc...)
// v1.13.08
// core at REMA (when exec show_table and refresh dirty in parallel)
// v1.14.00 initial version with dynamic C code
// v1.14.01 fn_pivot optim
// v1.14.02 core at REMA
// v1.14.03 core at REMA
// v1.14.04 core at REMA
// v1.14.05 core at REMA
// v1.14.06 core at REMA
// v1.14.07 '' management
// v1.14.08 core at Inter
// v1.14.09 PIN_MEMORY parameter, bug fix fn_store
// v1.14.10 optimization round
// v1.14.10 manage p(view.column) from no_view syntax
// v1.14.10 manage group by with_name.col_name
// v1.14.11 manage * in permissions
// v1.14.12 bug fix in permissions (unknown users were super users)
// v1.14.13 manage insert into table (col1, ..) values (...)
// v1.14.14 manage batch insert: insert into table (col1, ..) values (...) (...)
// v1.14.15 manage count(*)
// v1.14.16 manage driver version, and send column types to client
// v1.14.17 manage count(distinct col)
// v1.14.18 non big online table are not recalculated at startup
// v1.14.18 IREL_TO_ILINES_MAX_LENGTH ILINE_TO_IVALS_MAX_LENGTH, bug fix with big select *
// v1.14.19 bug fix SELECT_ALL
// v1.14.20 new rule on nn join, fact table and non empty dim have priority over empty dim
// v1.14.21 display problem of uniquesoft result
// v1.14.22 bug in f_max_perf_run_sequential_no_pareto_no_group_by_no_where_no_fn_loop_one_count_no_compression
// v1.14.23 add column in empty view generates empty files in MAX_PERF, next boot of SB fails, now this case is managed
// v1.14.24 rework // v1.14.23
// v1.14.25 dropping an altered object is no longer allowed (generates bugs)
// v1.14.26 update (start)
// v1.14.27 core at monop (thread_count in fn_init_fn_res in caped to CPU_COUNT+1)
// v1.14.28 bug fix, "save" without a "refresh dirty view" before will corrupt _DESC_Z file
// v1.14.29 removed dynamic library libnsl.so (not compatible with RedHat 8)
// v1.14.30 optim select using booster, cache col_type checks
// v1.14.31 add signature check to dynamic C function
// v1.14.32 big bug impacting delete with several columns in where clause
// v1.14.33 secure consistency in MAX_PERF
// v1.14.34 batch insert values optim
// v1.14.35 dates are converted to float by fn_merge
// v1.14.36 bug in col_type_check cache
// v1.14.37 bug in parsing of complex with (more than 50 () )
// v1.14.38 secure consistency in MAX_PERF #2
// v1.15.00 MAX_PERF_FILTER v0
// v1.15.01 compute max_perf view per view (in order to save volatile memory)
// v1.15.02 bug fix in parsing
// v1.15.03 bug fix when migration from pre v1.15.00, review IN_MEMORY_BIG_TABLES management
// v1.15.04 rework v1.15.03
// v1.15.05 rework fn_having (add sum and sumpareto)
// v1.15.06 allow using having several column with same col_type in a table
// v1.15.07 add log
// v1.15.08 err 53 if dim is aliased (tableau)
// v1.15.09 auto convert from min/max to minstr/maxstr (tableau)
// v1.15.10 pass line_count to loop function
// v1.15.11 error message after fopen
// v1.15.12 refresh_online + full delete + where clause matching nothing issue
// v1.15.13 bug fix in loop result
// v1.15.14 bug fix in formula on text expressions
// v1.15.15 small change to make test faster
// v1.15.16
// add sb_vals_version for compatibility check between SB and .so code
// manage multi thread of loop
// v1.15.17 SB_VALS_VERSION 2
// v1.15.18 manage empty file insert without sending error 61
// v1.15.19 bug fix in transac (impossible to create a 2nd table)
// v1.15.20 manage kill signals
// v1.15.21 core at REMA (sql with non existing profile)
// v1.15.22 bug in min and max
// v1.15.23 core with list on number column
// v1.15.24 rework v1.15.21 and add CHECK_CORRUPTION_INDEX
////////////// Jan 2020
// v1.15.25
// analytic function improvement
// v1.15.26 strange bug Inter (pré prod)
// v1.15.27 loop multi thread update
// v1.15.28 loop multi thread update #2
// v1.15.29 bug inter, many INDEX:GROUP_BY.INDEX.TMP.MAX_PERF indexes have been saved by error, so I had to increase GLOBAL_INDEX_LIST_LEN (for a bad reason)
// v1.15.30 bug fix in loop multi thread
// v1.15.31 optim in loop multi thread
// v1.15.32 remove printf, context lost issue (session change)
////////////// Feb 2020
// v1.15.33 problem if more than 10000 end users
// v1.15.34 bug fix in multi thread loop
// v1.15.35 bug fix in permission (very rare case, INTER)
// v1.15.36 bug fix in multi thread loop with where clause
// v1.15.37 allow table.col in select
// v1.15.38 optim refresh (multi thread, etc..)
// v1.15.39 change index col_type priority (1->table_count, then 0)
// v1.15.40 optim insert
// v1.15.41 improve lua error management
// v1.15.42 fix regression in partial view introduced in v1.15.38
// v1.15.43 MAX_PERF_FILTER_FN not working if MAX_PERF_COMPUTE_BIG_TABLES_JOIN set to n
// v1.15.44 NOT_IN_MEMORY_COLUMNS
// v1.15.45 MAX_PERF_USE_IVAL_OFTEN_LIMIT (pareto_conv_reverse & pareto_conv_normal not computed for some columns)
// v1.15.46 UNFRAG_BLOCK_PER_BLOCK
// v1.15.47 NOT_INDEXED_DIMENSIONS
// v1.15.48 bug fix
// v1.15.49 exp_iline_gby_counts U_INT -> long
// v1.15.50 loop/pass current values update
// v1.15.51 manage null in f_log_s (happen with lua errors)
// v1.15.52 manage \n\r
// v1.15.53 avoid "lua_pcall error for" error message (unnecessary formula check on group by columns)
// v1.15.54 SB_LAST, fn_key
////////////// March 2020
// v1.15.55 add refresh log in non verbose mode, improve other logs, bug in sb_last
// v1.15.56 null & NAN management in updates
// v1.15.57 fn_pivot improve, fn_custom out_multi_thread_autorized
// v1.15.58 sb_parallel
// v1.15.59 free dyn fn before and after
// v1.15.60 mem leak round
// v1.15.61 initial insert value, example INSERT_FORCE_VALUE.t_art_site_key:-1
// v1.15.62 allow update even if delete and also in refresh online
// v1.15.63 fix potential collision in save
// v1.15.64 bug fix table line count in loop/partition context
// v1.15.65 pre loop to load array data into memory
// v1.15.66 flush cache of "no_view" select after refresh dirty view
// v1.15.67 paritions_scope
// v1.15.68 check return code of pthread_create
// v1.15.69 fix typo in partitions_scope
// v1.15.70 limit INSERT_FORCE_VALUE to big tables and big_online tables
// v1.15.71 ERR_185
// v1.15.72 ERR_186 (select refused, too many active sessions)
// v1.15.73 mem leak round
// v1.15.74 thread/mutex round
// v1.15.75 line_count_after_last_refresh_dirty/rework loops (deleted lines not treated, possible to treat new lines only)
// v1.15.76 fix problem introduced in v1.15.48
// v1.15.77 occasional segfault for bad sql
// v1.15.78
// core at REMA
// v1.15.79 max_perf, drop the objects no longer computed (because of NOT_INDEXED_DIMENSIONS, NOT_IN_MEMORY_COLUMNS etc..),
// otherwise it is still used by selects
// v1.15.80 max_perf, if a big table column is not in memory replace it by joined dim column
// v1.15.81 fix lua exec with error generate segfault
// v1.15.82 ACTIVITY_SECURITY
// v1.15.83 array not in memory (iline) error between insert and refresh dirty
// v1.15.84 avoid memory peak in refresh
// v1.15.85 --#SB_f_get_max_perf tag
// v1.15.86 manage quotes in loop
// v1.15.87 REFRESH_FORCE
// v1.15.88 rework // v1.15.83
////////////// April 2020
// v1.15.89
// same as // v1.15.80 for select *
// v1.15.90
// ival refactor (fn_toto)
// v1.15.91
// rework permission view priority (cap_zone has site)
// v1.15.92
// bug fix SET_NULL_COLUMN
////////////// May 2020
// v1.15.93
// fn_refactor_col_type
// v1.16.00
// computed columns v0
// v1.16.01
// bug fix in table drop/create manipulation, refresh computed_columns, refresh cache
// v1.16.02
// like operator becomes case insensitive
// v1.16.03
// analytic functions results were always strings, never numbers
// v1.16.04
// incorrect caching in with clause based on other with clause
// v1.16.05
// allow fn_store function to be col_type
// v1.16.06
// change col_type_check cache logic
// v1.16.07
// bug fix
// v1.16.08
// SESSION_SECURITY
// v1.16.09
// use index in permission and all type of where clause
// v1.16.10
// --#SB_log_verbose_tag tag
// v1.16.11
// no more cache with SB tags
// v1.16.12
// optim countdistinct
// v1.16.13
// regression introduced by "non efficient index management"
// v1.16.14
// fix regression of v1.16.13
// v1.16.15
// index/where clause collision
// v1.16.16
// --#SB improve
// v1.16.17
// bug fix
// v1.16.18
// _MAX_COUNT_DISTINCT_THREAD_SIZE
// v1.16.19
// SEQUENCE_COLUMNS
// v1.16.20
// table#col syntax not working with partitions
// v1.16.21
// bug in avg
// v1.16.22
// error 43-138 is with (or with_something) is after select
// v1.16.23
// small bugs
// v1.16.24 manage lua compilation issue (avoid seg fault if this happens)
// v1.16.25
// bug fix in parsing
// v1.16.26
// piline_gby_to_op_col_pos_result_list -> RES_FLOAT_ARRAY (moved to double precision in aggregation)
////////////// June 2020
// v1.16.27
// core in SEQUENCE_COLUMNS context
// v1.16.28
// allow `lua formula` in all select, create tags `#DYN_SQL# and `#LOAD#
// v1.16.29
// avoid non fatal error mess display when testing the lua formula
// v1.16.30
// mem leak round
// v1.16.31
// bug fix ACTION store_code/dc_code with same ct in big table -> where clause problem using index
// v1.16.32
// support set param='val'
// v1.16.33
// fn_pivot, improve orphan management
// v1.16.34
// bug fixs analytic function
// v1.16.35
// index not computed (not compatible) for big/merge with sequence col, more verbose
// v1.16.36
// remove useless code for flat select
// v1.16.37
// SB tag explain
// v1.16.38
// params USE_INDEX_LIMIT, USE_INDEX_WITH_SORT_LIMIT, REPLACE_COUNTDISTINCT_BY_COUNTSEQUENCE
// v1.16.39
// remove obsolete code
// v1.16.40
// allow code mistakes for tmp arrays
// v1.16.41
// perf problem in sequence context
// v1.16.42
// seg fault in refresh_online + sequence context
// v1.16.43
// computed col rework, SO_FILE_NAME
////////////// July 2020
// v1.16.44
// bug: view partition include refresh_online view on partition
// v1.16.45
// manage callback in desc
// v1.16.46
// bounce command
// v1.16.47
// partition and refresh_online collision + bug fix core when insert triggers a new partition and next refresh is online in sequence context
// v1.16.48
// bounce command #2
// v1.16.49
// refresh_online view should not be flagged as dirty (needed for doc)
// v1.16.50
// potential bug when adding a column
// v1.16.51
// other bugs
// v1.16.52
// with clause not working after v1.16.47
// v1.16.53
// bugs/improvements allow fn_merge without with
// v1.16.54
// delete "invisible bug" fix
// v1.16.55
// SB_SYSTEM
// v1.16.56
// refresh dirty table
////////////// August 2020
// v1.16.57
// small things
// v1.16.58
// partition #10 does not trigger partition #11
// v1.16.59
// PERPETUAL license -> 1000 years
// v1.16.60
// SLEEP_AFTER_SQL (y/n)
// v1.16.61
// SB cluster, multi process mode (param NB_WORKERS in cluster.conf) & timeout management (param SQL_TIMEOUT in cluster.conf)
////////////// September 2020
// v1.16.62
// SB GENERATE_REPLAY for external sql only
// v1.16.63
// rework desc col_type
// v1.16.64
// mem leak round
// v1.16.65
// small things
// v1.16.66
// memory not cleaned in lua when table too big is allocated -> MAX_FN_DISPLAY_COL 1000
// v1.16.67
// keep lua_State attached to session (mem leak before)
// v1.16.68
// rework no cache mode
// v1.16.69
// manage some problems likes to restart after corruption
// v1.16.70
// backup command and _DESC.bck write
// v1.16.71
// save.lock
// v1.16.72
// investigate seg fault DG
// v1.16.73
// seg fault DG during purge
// v1.16.74
// computed columns rework, SB_VALS_VERSION 7,
// v1.16.75
// no delete if where clause returns zero rows, set_dirty,
// improve save.lock, prompt sure? in clear shells, manage save.lock in start.sh,
// change date format in sql.sh, add contexts to desc
// v1.16.76
// SEQUENCE_COLUMN moved back to pareto
// v1.16.77
// column type lost after v1.16.67 (all sorts were incorrect)
////////////// October 2020
// v1.16.78
// computed_column (without s), real column gets priority over computed col, INIT_FILE_PATH
// v1.16.79
// rework INIT_FILE_PATH, small bug fix for countsequence (keep logic order for partitions and online_refresh)
// v1.16.80
// change USE_INDEX_WITH_SORT_LIMIT default (before: 2, now: 10), new param SEQUENCE_COLUMN_COMPANIONS, mem leak round
// v1.16.81
// sql.sh regression time not in locale timezone,
// sql.sh no more socket close after 100 executions (kept without sql.sh)
// manage continue_on_error/stop_on_error as SQL (in C code, not in nodejs driver)
// fn_free_context_name sb_vals_version = 8
// mem check on computed columns
// rework transac sql
////////////// November 2020
// v1.16.82
// index on computed columns
// v1.16.83
// manage quote inside comments
// v1.16.84
// no log in init.sb (to avoid lines in doc after refresh dirty)
// v1.16.85
// NOT_IN_MEMORY_BIG_TABLES
// v1.16.86
// computed columns with param, big code change to allow this
////////////// December 2020
// v1.16.87
// computed columns not compatible with unique operator, bug in loop with where .. in (..)
// v1.16.88
// optim round + code clean, bug fix min->minstr converstion
// v1.16.89
// the rare lines are no longer inserted (with null values) in the often lines
// v1.16.90
// add fn_get_col_type_ival_count to U
// v1.16.91
// in loop: create thread_count*thread_count fn calls, to make sure I always have thread_count running
// , otherwise some thread (on old line for instance) may finish a lot before others
////////////// Jan 2021
// v1.16.92
// free sql_params in shutdown,
// , background done in start.sh and no longer in server.c (rewrite of start.sh)
// , use longjmp+return from main instead of exit (exit not working with LeakSanitizer)
// , improve housekeeping in transac sql
// , desc parameter <|verbose>
// , manage path/to/file insert with merge tables
// , explain sb_tag can be used even if there is an error in the sql execution
// , bug fix: hot change of log_verbose were not working
// , trap dlopen error
// v1.16.93
// replace | by ! in values returned (problem with table in html doc otherwise)
// , cluster: desc/set, custom.js
// , SPARSE_TEXT rework
// , drop COL_TYPE_INDEX
// , stormbase_common fix double call of error callback
// , fix potential corruption in defrag+save process
////////////// Feb 2021
// v1.16.94
// change big_online behavior, the full deletes are directly applied to table
// (not during refresh), and table are no longer volatile
// new STORAGE_FORMAT_VERSION (5)
// , error 205 (traps a theoretical case that should not happen in real life)
// , bug fix if clear_max_perf in refresh_online context
// v1.16.95
// bad description in stormbase.lua
// , explain sb_tag not working with driver calls
// , bug fix in sb_parallel
// , sql.sh --file:xx returns 0 instead of -1 if xx is not found
// , incorrect warning message when tcp TCP_PORT_TRANSAC is in use
// , error/segfault in fn_merge when col_type is empty
// , avoid lua exception in to_date when null value is passes
// , improve version.h
// , request_temporary_license.sh
// , license.c issue
// , bug fix in select comp_col('paramVal1'), comp_col('paramVal2') .. group by comp_col('paramVal1'), comp_col('paramVal2')
// , merge v1.16.94_HF1 segfault in multi user context (REMA)
////////////// Mar 2021
// v1.16.96
// upgrade nodejs,
// improve analytics documentation,
// change default param values TCP_PORT/PARTITION_LINE_COUNT,
// stop.sh refused during save,
// new param CELL_BLOCK_INCREMENT_SIZE_B for dev,
// improve some eror messages,
// delete old cell files instead of setting size to zero,
// add non regression test on fragmentation,
// cb shortcut for callback,
// allow column_name in all callbacks,
// allow '..`...',
// select_context_type in COMPUTED_COL_PREREQ (SB_LIB_VERSION 9),
// add sb_lib_version check,
// optimize select when same computed column appears several times,
// bug fix: loop freezes if SB is started in foreground mode,
// ALLOW_WHERE_ERROR, ALLOW_GROUP_BY_ERROR
// add fn_get_col_type_ival to UTILITY
// add UNKNOWN_IVAL in common.h
////////////// Apr 2021
// v1.16.97
// tableau compatibility: rework 02_sql/cluster.js 02_sql/lib/tcp_proxy.js 02_sql/lib/pgsql.js
// ESCAPE_SEPARATOR_INSIDE_DOUBLE_QUOTE (y/n)
// char text_update_sb_free; //SB_LIB_VERSION >= 10
// exp_ival3s removed
// new parameter: MAX_PERF_INDEX_ONLY
// rework SPARSE_TEXT type
// new command: set_sparse_text
// new command: defrag
// new parameter: DEBUG
// improve error login in csv insert
// new parameter: SKIP_LINE_KO
////////////// May 2021
// v1.16.98
// TRUNC_NUMBER param to support 4.04e-06 etc
// seg fault when p(computed_column)
// remove pareto conversion from analytic.c (done in max_perf_pre.c), note: big code change in analytic.c
// bug fix in computed columns
////////////// June 2021
// v1.16.99
// allow this: set 'insert_force_value.the_col_type'='something';
// bug fix: in computed col type A when needed column is compress
// bug fix: loop not done in full delete context
// ALLOW_ERROR param compatible with computed_columns (needed columns)
// callback can be replaced by |
// new callback fn grep
// bug fix in desc col_type (ival_count was text)
// secure thread creation during insert
// security (warning) for insertion of values longer that 1000 char -> treated as null
// several bug fix with select_slow
// cell arrays with file for sparse_text (pcarr->f_yn)
// pthread_create issue with 60K+ files in MAX_PERF (REMA)
// use infinite loop instead of recursivity in socket management (fn f_call) to avoid "Bus error"
// java jdbc driver, remove socket close/create every 100 calls (no longer needed because "Bus error" is fixed)
// new command set_text (number to text)
// optimize MAX_PERF init (VIEM_POS_2_IN_MEMORY_PARAMS)
// review comp col sharing (_f_comp_col_share_yn)
// bug fix comp col type A were not working with select_slow
// bug fix delete of empty table/partition was treated as full delete
// bug fix on with clause when col_type is empty
////////////// July 2021
// v1.17.00
// code clean
// core in where clause (dev case only)
// seg fault fix in partition/data model change/select * context
// fix regression introduced v1.16.99 in "with clause"/partition context
// bug fix: select_slow was not working with "with clause"
// seg fault in dev context (ARRAY_BLOCK size 2), because of NULL_IVAL
// optim query on col_type with high sparsity (ival_to_small_ival)
// limit number of threads for small selects
// GBY_ival_idx_to_gby_ivals_10 not used with high sparsity
// v1.17.01
// more logs in load
// parameter SPARSE_TEXT_KEEP + bug fix in SPARSE_TEXT
// TRUNC_NUMBER param is obsolete (1-e3 notation supported in all cases)
// support loop table(*) syntax
// add sb_export in common.c
// bug fix: where clause "=null" on join column match nothing (because index was used), now fact table is checked instead of using the index
// desc .. sql
// fix memory overlap issue (detected by valgrind)
////////////// September 2021
// v1.17.02
// fix regression of v1.17.00 on select_all
// ALLOW_WHERE_ERROR is not working with nn views
// no #lines limit for col_type checks without where clause
// better management of minstr/maxstr (using countd_first_ivals in merge operations when possible)
// bug fix, where clause issue in partition+select_slow context
////////////// October 2021
// v1.17.03
// bug fix in size.sh
// computed column type B
// add computed col parameters to fn_get_needed_columns (SB_LIB_VERSION 11)
// bug fix: sum(comp_col('param1','param2')) not working
// v1.17.04
// seg fault in countdistinct/select_slow context when #ivals > #lines in table
// bug fix in partition/"computed col with param" context
// small mem leaks fix
// disable index use for where clause containing null:
// if there is an orphan in a dimension, where col=null was returning orphan rows without index (or for irels rarely used)
// but nothing with index on irels often used
// seg fault on where clause in partition context
// manage unset with INSERT_FORCE_VALUE parameter
// computed col type B not working on pareto columns
// regression IN_MEMORY_BIG_TABLES params not taken into account
// seg fault with unique+countdistinct on same type A computed column
// pb only happened because pareto conversion was applied twice
////////////// December 2021
// v1.17.05
// lock not released after refresh permission if no permission changes
// debug mode in permission + refresh_force mode
// introduce PERMISSION_SUPER_USER_IVAL to avoid computing array permission in wildcard context
// IN_MEMORY_BIG_TABLES does not apply to child partitions
// add fn_update_idx1
// bug fix, with clause problem in partition context (problem is general but visible only in select_slow context)
// new parameter QUERY_TABLE_ALLOWED (tables can be queried directly)
// new parameter SKIP_REFRESH_FOR_NOT_IN_MEMORY_BIG_TABLES (views on big table are not refresh)
// trap system() error
// load init file before max_perf during startup (otherwise after a clear_max_perf in_memory parameters overload is not taken into account)
// change limit for select_all (20E6 -> MAX_GBY_BOOSTER_LEN/10)
// allows to specify column on which the with clause should be apply (when several columns have the same col_type in the view) (see alias_group_by_col)
// seg fault with #--SB explain, in specific context (very rare)
// for number columns and in some cases only, null values (zero len strings read from file) were inserted as zero
// new parameter ALLOW_EXP_ERROR
// group by clause can by omitted
// bug fix in export callback function, a line with OK was added in the export file
// bug fix in computed columns param fn('aaa,bbb') was treated as fn('aaa','bbb')
// new callback functions: open_join and keep_columns
////////////// March 2022
// v1.17.06
// get_sb_opt management in compile.sh
// add ./_SO_LINUX to LD_LIBRARY_PATH (other needed .so can be put in _SO_LINUX)
// select without group by not working when more than 2 group by columns
// OOM in open_join context
// null number column were displayed as # in open_join context, now it is zero
// improve #DYN_SQL# and #LOAD# management
// union callback function
// change GENERATE_REPLAY behavior, generates 2 files now :
// - stormbase_replay1.sql (before dyn sql)
// - stormbase_replay2.sql (after dyn sql)
// - tag #LOAD_KEEP is also introduced
// remove some logs "warning view in memory with non merge dimension (same line count) ..."
// fix seg fault with many group by columns
// show lua error even when log_verbose is n
// bug fix in union
// string formatting done to early in the callback process
// remove callback compilation check before execution
// line. can be omitted in add_number/text formulas
// new callback function rename
// "nan" strings are now treated as zeros for number columns
// bug fix round(0,2) was returning 0.5 instead of 0
// problems with non in memory tables (indexes were lost)
// rework of monitor.sh
// bug fix when using batch insert values with numbers
// improve UTILITY (SB_LIB_VERSION 12)
// standard update function moved to common.c (in order to provide code samples)
// automatic computed column creation from fn_computed_col (only A type for the moment)
// COMB_COUNT moved from 5 to 6
// work_area added to SB_VALS (SB_LIB_VERSION 13)
// display ms in log if DEBUG=y (see STARTUP_TIME_MS)
// bug fix in unsigned int limit check
// ./sql.sh change retry delay from 1 minute to 10 minutes
// fn_update_idx1_num
// bug fix open_join in partition context not working -> SB crash
// set param_name=default
// max_perf not calculated for empty views
// -> select changed to select_slow on empty views
// -> select not run against empty partition/child view to avoid bug in select_slow context
// change MAX_GBY_BOOSTER_LEN from int to long
// v1.17.06 useless init in fn_store/fn_merge context
// v1.17.06 RESULT_PARAM optim, members moved to MAX_PERF_FN_PARAM
// v1.17.06 rewrite of index/booster algo
// v1.17.06 combined index (replaces select w/o booster)
// v1.17.06 no select against empty partitions
// v1.17.06 optim where clause on ival with high cardinality in index context
// v1.17.06 put back "optimize MAX_PERF init (VIEM_POS_2_IN_MEMORY_PARAMS)", view_pos issue recycle fixed
// v1.17.06 new param ACTIVATE_NEW_DEV, used for combined index activation in this verson
// v1.17.06 select on not in memory views return empty --> now returns a clear non blocking message
// v1.17.06 set_dirty will also flag partitions as dirty
// v1.17.06 add SKIP return code in loops
// v1.17.06 "where_index with null value, index can't be used" check, kept only for "col in or col =" where clauses
// v1.17.06 allow semi column in quoted string (with sql.sh), ex. system 'echo "aaa;"';
// v1.17.06 allow spaces before "with" keyword
// v1.17.06 small mem leaks
////////////// August 2022
// v1.17.07
// v1.17.07 hash group by (POC)
// v1.17.07 bug fix, disable this optim for computed columns where origin and dest col_type are not same:
// v1.17.06 optim where clause on ival with high cardinality in index context
// v1.17.07 bug fix, incorrect result in fn_store+no_booster context
// v1.17.07 skipping index on computed column type A if >106
// v1.17.07 new standard update function fn_copy_col
// v1.17.07 reverse index on computed columns created with fn_computed_col
// v1.17.07 top/bottom with zero (or invalid integer) return error (seg fault before)
// v1.17.07 ALLOW_WHERE_ERROR was not managed for computed col created with fn_computed_col
// F for final
// v1.17.07_F seg fault in ALLOW_WHERE_ERROR context and "double col_type check"
// v1.17.07_F improve fn_build_idx1_num
// v1.17.07_F computed columns created by fn_computed_col -> column name in param becomes case non sensitive
// v1.17.07_F free VIEM_POS_2_IN_MEMORY_PARAMS after view drop (otherwise VIEM_POS_2_IN_MEMORY_PARAMS_LEN limit is reached in refresh_online context)
// v1.17.07_F regression in refresh dirty view context (because view are no longer drop)
// v1.17.07_F bug fix in delete+refresh dirty table+select_slow context -> rel_matrix and big table are out of sync
// v1.17.07_F col_type_check wait for cache
// v1.17.07_F seg fault in export+empty table context
// v1.17.07_F seg fault in 2 SB on same INT/MAX_PERF context, rework of PIN_MEMORY parameter
// v1.17.07_F9 problem for empty table with empty file insert in refresh online context
// v1.17.07_F10 seg fault in select (context not understood)
////////////// November 2022
// v1.17.08
// v1.17.08 allow changing column order in tables, and removing columns
// v1.17.08 no_view potential problem (detected by valgrind)
// v1.17.08_F1 allow changing column order in tables, and removing columns (rework)
// v1.17.08_F1 sb_hgb_context_free
// v1.17.08_F2 seg fault with hash_group_by when table is empty
// v1.17.08_F3 escape "-" in regex lua function
// v1.17.08_F4 add lua function is_not
// v1.17.08_F5 fix regression on with clause using function
// v1.17.08_F6 avoid seg fault when last partition of with/fn_store is select_slow+view dirty (DHZ DFAI)
// v1.17.08_F7 seg fault in flat select with limit clause (CAS DFAI)
// v1.17.08_F8 minor modifications on common.h and common.c
// v1.17.08_F9 manage tables bigger than 2GB lines in hash_group_by
// v1.17.08_F10 bug fix, context (hard to reproduce): view empty/is_mp_dirty=n, partition not empty /is_mp_dirty=y, with/fn_store/fn_merge
// -> sql returns error 151 instead of seg fault
// v1.17.08_F11 bug fix, table signature change was not detected when we change a column name
// v1.17.08_F12 bug fix, problem loading error message in stormbase_common.js (when called from external program)
////////////// March 2023
// v1.17.09 add SB_VALS.line_fetch (SB_LIB_VERSION 14)
// v1.17.09 bug fix SB_VALS.work_area not passed to partition loop
// v1.17.09 multi threading of fn_hash_group_by
// v1.17.09 max_perf index on non in memory big tables are no longer loaded in PIN_MEMORY context (reduce memory for SPN)
// v1.17.09 reduce memory usage in this context: loop on full scope with where clause
// v1.17.09 add is_drop_yn info to context and fn_get_context_is_drop_yn to U
// v1.17.09 U can be access directly from custom code
// v1.17.09_F1 add parameter retry to sql.sh (stormbase_sql.js)
// v1.17.09_F2 improve performance of hash group by (hash generated by fn_hash_modular were too small in many cases)
// v1.17.09_F3 Bug fix: seg fault when view with non merge dimension+delete on table+refresh dirty table+select on view
// v1.17.09_F4 Bug fix: error 157 when same column appeared twice in sub select
// v1.17.09_F5 Bug fix: seg fault in cb_c col_type_check using count
// v1.17.09_F6 Add warning in standard update function (to avoid seg faults when usage is incorrect)
// v1.17.09_F6 Add hwm (high water mark) monitoring in monitor.sh
// v1.17.09_F7 Add MEM_OS_AVAILABLE in monitor_high_water_mark.txt (monitor.sh)
// v1.17.09_F8 Very rare seg fault (DHZ store prod) when select during computed col free
////////////// August 2023
// v1.17.10 Support for SB_CONF_PATH, SB_LICENSE_PATH, /sb_host_etc
// v1.17.10 Bux fix: data not exported with export.sh when two tables have closed names (foo and foo_something for instance)
// v1.17.10_F1 remove compilation warnings
// v1.17.10_F2 minor changes in common.c to avoid compilation warnings
// v1.17.10_F2 Add TABLE_CSV management to UTILITY
// v1.17.10_F2 Buf fix: manage SB_CONF_PATH in shells
////////////// October 2023
// v1.17.10_F3 set priority_over_real_column_yn = 'y' for computed columns type A and B
// v1.17.10_F4 rollback set priority_over_real_column_yn = 'y' for computed columns type A and B
// v1.17.10_F4 Bug fix: export.sh, bad management of table/view partitions
// v1.17.10_F5 Bug fix: allow / and . in computed columns parameters
// v1.17.10_F6 fn_build_idx1 allow setting context name with one parameter
// v1.17.10_F6 add fn_export (same as sb_export) to common.c, sb_export kept for compatibility
// v1.17.10_F6 add fn_cartesian_from_contexts from common.c
// v1.17.10_F7 allow to set SB_LICENSE_PATH as parameter
// v1.17.10_F8 bug fix: seg fault in fn_cartesian_from_contexts when a context is empty
// v1.17.10_F9 introduce CONTINUE_IVAL in computed columns
// v1.17.10_F9 fn_eval initial beta version
// v1.17.10_F10 CONTINUE_IVAL bug fix
// v1.17.10_F11 optimize refresh in high cardinality col_type context
// v1.17.10_F11 force clean_periodic after refresh_online (otherwise the 5 possible online views may be in use in the same minute and next refresh online will fail)
// v1.17.10_F12 fix regression (seg fault) from "optimize refresh in high cardinality col_type context"
// v1.17.10_F13 push limit for error 268 from 1 billion to 1.5 (REMA issue)
// v1.17.10_F14 add mutex on call to clean_periodic from SQL (collision with background thread + seg fault)
// v1.17.10_F15 change clean_periodic so that previous view is not drop (check on active sql is not 100%)
// v1.17.10_F16 seg fault when pre select fn returns NULL and computed col is not type A or B
// v1.17.10_F17 bug fix in monitor.sh regarding partition management
// v1.17.10_F18 allow non string parameters in btwe_s
// v1.17.10_F19 return sql error instead of stopping SB when an in clause contains thousands of values (and does not fit in a single memory cell)
// v1.17.10_F20 seg fault when calling several "number computed columns" in one sql
// v1.17.10_F21 bug fix: possible incorrect results in dimensions in multi partitions context (problems are a bit different in select and select_slow but both may be incorrect)
// v1.17.10_F22 rework v1.17.10_F19 return sql error instead of stopping SB when an in clause contains thousands of values (and does not fit in a single memory cell)
// v1.17.10_F23 housekeeping in shell scripts
// v1.17.10_F24 small bug fix in get_server_info.sh
// v1.17.10_F25 bug fix: seg fault for files with \r (CR) only as end of line
// v1.17.10_F26 fix optimization introduced in v1.17.10_F11 (happens in MAX_PERF_FILTER context only)
// v1.17.10_F27 error code 222 and file stormbase.debug are introduced to manage dynamic sql errors
// v1.17.10_F28 bug fix: cache is now managed using the original sql (with the #LOAD# etc..)
// v1.17.10_F29 evo: add execute_SQL_sync in stormbase_common.js
-- ###########################
-- RUNNING shutdown.sql
shutdown
;
-- ###########################
-- RUNNING clear.sh
/*
sure mode, no confirmation prompt
clearing directory INTERNAL_FILES_DIR (../STORAGE/INT) ...
done
clearing directory MAX_PERF_FILES_DIR (../STORAGE/MAX_PERF) ...
done
clearing directory TRANSAC_FILES_DIR (../STORAGE/TRANSAC) ...
done
*/
-- ###########################
-- RUNNING doc_bench.sql
--
stop_on_error;
reponse
success
create
col_type
t_location_id as
text
;
reponse
success
create
col_type
t_pickup_location_id as
text
;
reponse
success
create
col_type
t_dropoff_location_id as
text
;
reponse
success
create
col_type
t_date as
text
;
reponse
success
create
big table
trips (
cab_type_id text
,
vendor_id text
,
pickup_date t_date,
pickup_time text
,
dropoff_date text
,
dropoff_time text
,
store_and_fwd_flag text
,
rate_code_id text
,
pickup_longitude number
,
pickup_latitude number
,
dropoff_longitude number
,
dropoff_latitude number
,
passenger_count number
,
trip_distance number
,
fare_amount number
,
extra number
,
mta_tax number
,
tip_amount number
,
tolls_amount number
,
ehail_fee number
,
improvement_surcharge number
,
total_amount number
,
payment_type text
,
trip_type text
,
pickup_nyct2010_gid text
,
dropoff_nyct2010_gid text
,
pickup_location_id t_pickup_location_id,
dropoff_location_id t_dropoff_location_id
);
reponse
success
create
merge table
pickup_location(
pickup_location_id t_pickup_location_id,
pickup_location_id2 t_location_id
);
reponse
success
create
merge table
dropoff_location(
dropoff_location_id t_dropoff_location_id,
dropoff_location_id2 t_location_id
);
reponse
success
create
merge table
calendar(
date t_date,
week_day text
,
month_name text
,
full_year text
);
reponse
success
create
view
v_trips as
select
* from
trips, pickup_location, dropoff_location, calendar
where
pickup_location.pickup_location_id=trips.pickup_location_id
and
dropoff_location.dropoff_location_id=trips.dropoff_location_id
and
calendar.date=trips.pickup_date
;
reponse
success
swapoff -a
On Centos (as
root)
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag
On Redhat (as
root)
echo never > /sys/kernel/mm/redhat_transparent_hugepage/enabled
echo never > /sys/kernel/mm/redhat_transparent_hugepage/defrag
set
file_separator=';'
;
reponse
success
insert
into
pickup_location select
* from
file('pickup_location.csv'
);
reponse
success
insert
into
dropoff_location select
* from
file('dropoff_location.csv'
);
reponse
success
insert
into
calendar select
* from
file('calendar.csv'
);
reponse
success
insert
into
trips select
* from
file('trips.csv'
) where
rownum<10;
reponse
success
refresh
dirty
views;
reponse
success
save
;
reponse
success
desc
table
;
table_name column_count line_count has_delete has_update parent_view_hidden
trips 28 9 n n n
pickup_location 2 202 n n n
dropoff_location 2 255 n n n
calendar 4 4 n n n
desc
view
;
view_name table_count rel_count is_dirty is_mp_dirty is_partial first_iline mp_rare_count mp_often_count mp_count r_online_count mp_r_online_count
v_trips 4 9 n n n 10 5 14 19 0 0
select
* from
v_trips;
cab_type_id vendor_id pickup_date pickup_time dropoff_date dropoff_time store_and_fwd_flag rate_code_id pickup_longitude pickup_latitude dropoff_longitude dropoff_latitude passenger_count trip_distance fare_amount extra mta_tax tip_amount tolls_amount ehail_fee improvement_surcharge total_amount payment_type trip_type pickup_nyct2010_gid dropoff_nyct2010_gid pickup_location_id dropoff_location_id pickup_location_id pickup_location_id2 dropoff_location_id dropoff_location_id2 date week_day month_name full_year
2 2 20160804 05:00 20160804 05:16 N 1 0 0 0 0 1 2.130 12 0.500 0.500 0 0 0 0.300 13.300 2 1 # # 129 223 129 129 223 223 20160804 Thursday August 2016
2 2 20160804 05:13 20160804 05:14 N 1 0 0 0 0 1 0.120 2.500 0.500 0.500 10 0 0 0.300 13.800 1 1 # # 244 244 244 244 244 244 20160804 Thursday August 2016
2 2 20160804 05:31 20160804 05:45 N 1 0 0 0 0 1 2.440 12 0.500 0.500 0 0 0 0.300 13.300 1 1 # # 181 228 181 181 228 228 20160804 Thursday August 2016
2 2 20160804 05:08 20160804 05:14 N 1 0 0 0 0 1 0.830 6 0.500 0.500 0 0 0 0.300 7.300 2 1 # # 41 166 41 41 166 166 20160804 Thursday August 2016
2 2 20160804 05:46 20160804 06:05 N 1 0 0 0 0 1 5.620 18.500 0.500 0.500 0 0 0 0.300 19.800 2 1 # # 7 164 7 7 164 164 20160804 Thursday August 2016
2 2 20160804 05:59 20160804 05:59 N 5 0 0 0 0 1 0 1 0 0 0 0 0 0 1 1 2 # # 21 21 21 21 21 21 20160804 Thursday August 2016
2 2 20160804 05:19 20160804 05:30 N 1 0 0 0 0 1 2.140 10 0.500 0.500 0 0 0 0.300 11.300 2 1 # # 112 225 112 112 225 225 20160804 Thursday August 2016
2 2 20160804 05:42 20160804 05:50 N 1 0 0 0 0 1 1.340 7.500 0.500 0.500 0 0 0 0.300 8.800 2 1 # # 82 173 82 82 173 173 20160804 Thursday August 2016
2 2 20160804 05:17 20160804 05:21 N 1 0 0 0 0 1 1.420 6 0.500 0.500 1.460 0 0 0.300 8.760 1 1 # # 166 116 166 166 116 116 20160804 Thursday August 2016
--important parameters
../../../40_BENCH/nyc_files/stormbase.conf
MAX_PERF_USE_COMPRESSION:y
CSV_FILES_DIR:/home/postgres/CSV
FILE_SEPARATOR:;
--script
../../../40_BENCH/nyc_files/insert_all.sql
create col_type t_location_id as text;
create col_type t_pickup_location_id as text;
create col_type t_dropoff_location_id as text;
create col_type t_date as text;
create big table trips (
 cab_type_id text,
 vendor_id text,
 pickup_date t_date,
 pickup_time text,
 dropoff_date text,
 dropoff_time text,
 store_and_fwd_flag text,
 rate_code_id text,
 pickup_longitude number,
 pickup_latitude number,
 dropoff_longitude number,
 dropoff_latitude number,
 passenger_count number,
 trip_distance number,
 fare_amount number,
 extra number,
 mta_tax number,
 tip_amount number,
 tolls_amount number,
 ehail_fee number,
 improvement_surcharge number,
 total_amount number,
 payment_type text,
 trip_type text,
 pickup_nyct2010_gid text,
 dropoff_nyct2010_gid text,
 pickup_location_id t_pickup_location_id,
 dropoff_location_id t_dropoff_location_id
);
create merge table pickup_location(
 pickup_location_id t_pickup_location_id,
 pickup_location_id2 t_location_id
);
create merge table dropoff_location(
 dropoff_location_id t_dropoff_location_id,
 dropoff_location_id2 t_location_id
);
create merge table calendar(
date t_date,
week_day text,
month_name text,
full_year text
);
create view v_calendar as select * from calendar;
create view v_pickup_location as select * from pickup_location;
create view v_dropoff_location as select * from dropoff_location;
create view v_trips as
 select * from trips, pickup_location, dropoff_location, calendar
 where pickup_location.pickup_location_id=trips.pickup_location_id
 and dropoff_location.dropoff_location_id=trips.dropoff_location_id
 and calendar.date=trips.pickup_date
;
insert into pickup_location select * from file('pickup_location.csv');
insert into dropoff_location select * from file('dropoff_location.csv');
insert into calendar select * from file('calendar.csv');
insert into trips select * from file('trips_2009.csv');
insert into trips select * from file('trips_2010.csv');
insert into trips select * from file('trips_2011.csv');
insert into trips select * from file('trips_2012.csv');
insert into trips select * from file('trips_2013.csv');
insert into trips select * from file('trips_2014.csv');
insert into trips select * from file('trips_2015.csv');
insert into trips select * from file('trips_2016.csv');
insert into trips select * from file('trips_2017.csv');
insert into trips select * from file('trips_2018.csv');
save;
refresh dirty view;
save;
--logs
../../../40_BENCH/nyc_files/insert_all.log
3319
[2021-01-22 09:40:24] --------- NEW CONNECTION ------------ CACHE SIZE : 0 / 0
[2021-01-22 09:40:24] connecting to localhost : 3319
[2021-01-22 09:40:24] -> authentication;
[2021-01-22 09:40:24] <- data ( 2 ms )
[2021-01-22 09:40:24] -> create col_type t_location_id as text;
[2021-01-22 09:40:24] <- data ( 101 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 09:40:24] -> create col_type t_pickup_location_id as text;
[2021-01-22 09:40:24] <- data ( 100 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 09:40:24] -> create col_type t_dropoff_location_id as text;
[2021-01-22 09:40:24] <- data ( 101 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 09:40:24] -> create col_type t_date as text;
[2021-01-22 09:40:24] <- data ( 101 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 09:40:24] ->
create big table trips (
 cab_type_id text,
 vendor_id text,
 pickup_date t_date,
 pickup_time text,
 dropoff_date text,
 dropoff_time text,
 store_and_fwd_flag text,
 rate_code_id text,
 pickup_longitude number,
 pickup_latitude number,
 dropoff_longitude number,
 dropoff_latitude number,
 passenger_count number,
 trip_distance number,
 fare_amount number,
 extra number,
 mta_tax number,
 tip_amount number,
 tolls_amount number,
 ehail_fee number,
 improvement_surcharge number,
 total_amount number,
 payment_type text,
 trip_type text,
 pickup_nyct2010_gid text,
 dropoff_nyct2010_gid text,
 pickup_location_id t_pickup_location_id,
 dropoff_location_id t_dropoff_location_id
);
[2021-01-22 09:40:24] <- data ( 175 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 09:40:24] ->
create merge table pickup_location(
 pickup_location_id t_pickup_location_id,
 pickup_location_id2 t_location_id
);
[2021-01-22 09:40:24] <- data ( 102 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 09:40:24] ->
create merge table dropoff_location(
 dropoff_location_id t_dropoff_location_id,
 dropoff_location_id2 t_location_id
);
[2021-01-22 09:40:24] <- data ( 101 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 09:40:24] ->
create merge table calendar(
date t_date,
week_day text,
month_name text,
full_year text
);
[2021-01-22 09:40:24] <- data ( 103 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 09:40:24] -> create view v_calendar as select * from calendar;
[2021-01-22 09:40:25] <- data ( 102 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 09:40:25] -> create view v_pickup_location as select * from pickup_location;
[2021-01-22 09:40:25] <- data ( 102 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 09:40:25] -> create view v_dropoff_location as select * from dropoff_location;
[2021-01-22 09:40:25] <- data ( 103 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 09:40:25] ->
create view v_trips as
 select * from trips, pickup_location, dropoff_location, calendar
 where pickup_location.pickup_location_id=trips.pickup_location_id
 and dropoff_location.dropoff_location_id=trips.dropoff_location_id
 and calendar.date=trips.pickup_date;
[2021-01-22 09:40:25] <- data ( 104 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 09:40:25] -> insert into pickup_location select * from file('pickup_location.csv');
[2021-01-22 09:40:25] <- data ( 102 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 09:40:25] -> insert into dropoff_location select * from file('dropoff_location.csv');
[2021-01-22 09:40:25] <- data ( 103 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 09:40:25] -> insert into calendar select * from file('calendar.csv');
[2021-01-22 09:40:25] <- data ( 262 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 09:40:25] -> insert into trips select * from file('trips_2009.csv');
[2021-01-22 09:52:10] <- data ( 704998 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 09:52:10] -> insert into trips select * from file('trips_2010.csv');
[2021-01-22 10:03:04] <- data ( 653457 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 10:03:04] -> insert into trips select * from file('trips_2011.csv');
[2021-01-22 10:16:52] <- data ( 828521 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 10:16:52] -> insert into trips select * from file('trips_2012.csv');
[2021-01-22 10:30:13] <- data ( 801036 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 10:30:13] -> insert into trips select * from file('trips_2013.csv');
[2021-01-22 10:42:40] <- data ( 746230 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 10:42:40] -> insert into trips select * from file('trips_2014.csv');
[2021-01-22 10:55:55] <- data ( 795400 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 10:55:55] -> insert into trips select * from file('trips_2015.csv');
[2021-01-22 11:07:49] <- data ( 714364 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 11:07:49] -> insert into trips select * from file('trips_2016.csv');
[2021-01-22 11:16:18] <- data ( 508225 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 11:16:18] -> insert into trips select * from file('trips_2017.csv');
[2021-01-22 11:21:55] <- data ( 337340 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 11:21:55] -> insert into trips select * from file('trips_2018.csv');
[2021-01-22 11:26:57] <- data ( 301737 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 11:26:57] -> save;
[2021-01-22 12:04:44] <- data ( 2266823 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 12:04:44] -> refresh dirty view;
SIGHUP received (ignored)
[2021-01-22 12:35:21] <- data ( 1837801 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
[2021-01-22 12:35:22] -> save;
[2021-01-22 12:50:19] <- data ( 897616 ms )
| reponse|
|--------------------------------------------------|
| success|
ROWCOUNT : 1
--size info
../../../40_BENCH/nyc_files/size.log
bash-4.2$ ./size.sh
CSV_FILES_DIR /home/postgres/CSV 260782276 KB
INTERNAL_FILES_DIR ../INT 197165912 KB
MAX_PERF_FILES_DIR ../MAX_PERF 91560016 KB
TMP_FILES_DIR ../TMP 4 KB
-- ###########################
-- RUNNING shutdown.sql
shutdown
;