-
Notifications
You must be signed in to change notification settings - Fork 342
/
Makefile.PL
1573 lines (1346 loc) · 74.6 KB
/
Makefile.PL
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
use ExtUtils::MakeMaker qw(prompt WriteMakefile);
my @ALLOWED_ARGS = ('CONFDIR','DOCDIR','DESTDIR','QUIET','INSTALLDIRS','INSTALL_BASE','PREFIX');
# Parse command line arguments and store them as environment variables
while ($_ = shift) {
my ($k,$v) = split(/=/, $_, 2);
if (grep(/^$k$/, @ALLOWED_ARGS)) {
$ENV{$k} = $v;
}
}
# Default install path
my $CONFDIR = $ENV{CONFDIR} || '/etc/ora2pg';
my $RPM_CONFDIR = $CONFDIR;
my $DOCDIR = $ENV{DOCDIR} || '/usr/local/share/doc/ora2pg';
my $DEST_CONF_FILE = 'ora2pg.conf.dist';
my $DATA_LIMIT_DEFAULT = 10000;
if ($^O =~ /MSWin32|dos/i) {
$DATA_LIMIT_DEFAULT = 2000;
}
my $PREFIX = $ENV{DESTDIR} || $ENV{PREFIX} || $ENV{INSTALL_BASE} || '';
$PREFIX =~ s/\/$//;
$ENV{INSTALLDIRS} ||= 'site';
# Try to set the default configuration directory following $PREFIX
if ($^O =~ /MSWin32|dos/i) {
# Force default path
$CONFDIR = 'C:\ora2pg';
$DOCDIR = 'C:\ora2pg';
} elsif ($PREFIX) {
if (!$ENV{CONFDIR}) {
$CONFDIR = $PREFIX . '/etc/ora2pg';
} else {
$CONFDIR = $PREFIX . '/' . $ENV{CONFDIR};
}
if (!$ENV{DOCDIR}) {
$DOCDIR = $PREFIX . '/doc/ora2pg';
} else {
$DOCDIR = $PREFIX . '/' . $ENV{DOCDIR};
}
}
# Try to find all binary used by Ora2Pg
my $bzip2 = '';
if ($^O !~ /MSWin32|dos/i) {
my $bzip2 = `which bzip2`;
chomp($bzip2);
$bzip2 ||= '/usr/bin/bzip2';
}
my $oracle_home = $ENV{ORACLE_HOME} || '/usr/local/oracle/10g';
# Setup ok. generating default ora2pg.conf config file
unless(open(OUTCFG, ">$DEST_CONF_FILE")) {
print "\nError: can't write config file $DEST_CONF_FILE, $!\n";
exit 0;
}
print OUTCFG qq{
#################### Ora2Pg Configuration file #####################
# Support for including a common config file that may contain any
# of the following configuration directives.
#IMPORT common.conf
#------------------------------------------------------------------------------
# INPUT SECTION (Oracle connection or input file)
#------------------------------------------------------------------------------
# Set this directive to a file containing PL/SQL Oracle Code like function,
# procedure or a full package body to prevent Ora2Pg from connecting to an
# Oracle database end just apply his conversion tool to the content of the
# file. This can only be used with the following export type: PROCEDURE,
# FUNCTION or PACKAGE. If you don't know what you do don't use this directive.
#INPUT_FILE ora_plsql_src.sql
# Set the Oracle home directory
ORACLE_HOME $oracle_home
# Set Oracle database connection (datasource, user, password)
ORACLE_DSN dbi:Oracle:host=mydb.mydom.fr;sid=SIDNAME;port=1521
ORACLE_USER system
ORACLE_PWD manager
# Set this to 1 if you connect as simple user and can not extract things
# from the DBA_... tables. It will use tables ALL_... This will not works
# with GRANT export, you should use an Oracle DBA username at ORACLE_USER
USER_GRANTS 0
# Trace all to stderr
DEBUG 0
# This directive can be used to send an initial command to Oracle, just after
# the connection. For example to unlock a policy before reading objects or
# to set some session parameters. This directive can be used multiple time.
#ORA_INITIAL_COMMAND
#------------------------------------------------------------------------------
# SCHEMA SECTION (Oracle schema to export and use of schema in PostgreSQL)
#------------------------------------------------------------------------------
# Export Oracle schema to PostgreSQL schema
EXPORT_SCHEMA 0
# Oracle schema/owner to use
#SCHEMA SCHEMA_NAME
# Enable/disable the CREATE SCHEMA SQL order at starting of the output file.
# It is enable by default and concern on TABLE export type.
CREATE_SCHEMA 1
# Enable this directive to force Oracle to compile schema before exporting code.
# When this directive is enabled and SCHEMA is set to a specific schema name,
# only invalid objects in this schema will be recompiled. If SCHEMA is not set
# then all schema will be recompiled. To force recompile invalid object in a
# specific schema, set COMPILE_SCHEMA to the schema name you want to recompile.
# This will ask to Oracle to validate the PL/SQL that could have been invalidate
# after a export/import for example. The 'VALID' or 'INVALID' status applies to
# functions, procedures, packages and user defined types.
COMPILE_SCHEMA 1
# By default if you set EXPORT_SCHEMA to 1 the PostgreSQL search_path will be
# set to the schema name exported set as value of the SCHEMA directive. You can
# defined/force the PostgreSQL schema to use by using this directive.
#
# The value can be a comma delimited list of schema but not when using TABLE
# export type because in this case it will generate the CREATE SCHEMA statement
# and it doesn't support multiple schema name. For example, if you set PG_SCHEMA
# to something like "user_schema, public", the search path will be set like this
# SET search_path = user_schema, public;
# forcing the use of an other schema (here user_schema) than the one from Oracle
# schema set in the SCHEMA directive. You can also set the default search_path
# for the PostgreSQL user you are using to connect to the destination database
# by using:
# ALTER ROLE username SET search_path TO user_schema, public;
#in this case you don't have to set PG_SCHEMA.
#PG_SCHEMA
# Use this directive to add a specific schema to the search path to look
# for PostGis functions.
#POSTGIS_SCHEMA
# Allow to add a comma separated list of system user to exclude from
# Oracle extraction. Oracle have many of them following the modules
# installed. By default it will suppress all object owned by the following
# system users:
# 'SYSTEM','CTXSYS','DBSNMP','EXFSYS','LBACSYS','MDSYS','MGMT_VIEW',
# 'OLAPSYS','ORDDATA','OWBSYS','ORDPLUGINS','ORDSYS','OUTLN',
# 'SI_INFORMTN_SCHEMA','SYS','SYSMAN','WK_TEST','WKSYS','WKPROXY',
# 'WMSYS','XDB','APEX_PUBLIC_USER','DIP','FLOWS_020100','FLOWS_030000',
# 'FLOWS_040100','FLOWS_010600','FLOWS_FILES','MDDATA','ORACLE_OCM',
# 'SPATIAL_CSW_ADMIN_USR','SPATIAL_WFS_ADMIN_USR','XS\$NULL','PERFSTAT',
# 'SQLTXPLAIN','DMSYS','TSMSYS','WKSYS','APEX_040000','APEX_040200',
# 'DVSYS','OJVMSYS','GSMADMIN_INTERNAL','APPQOSSYS','DVSYS','DVF',
# 'AUDSYS','APEX_030200','MGMT_VIEW','ODM','ODM_MTR','TRACESRV','MTMSYS',
# 'OWBSYS_AUDIT','WEBSYS','WK_PROXY','OSE\$HTTP\$ADMIN',
# 'AURORA\$JIS\$UTILITY\$','AURORA\$ORB\$UNAUTHENTICATED',
# 'DBMS_PRIVILEGE_CAPTURE','CSMIG','MGDSYS','SDE','DBSFWUSER'
# Other list of users set to this directive will be added to this list.
#SYSUSERS OE,HR
# List of schema to get functions/procedures meta information that are used
# in the current schema export. When replacing call to function with OUT
# parameters, if a function is declared in an other package then the function
# call rewriting can not be done because Ora2Pg only know about functions
# declared in the current schema. By setting a comma separated list of schema
# as value of this directive, Ora2Pg will look forward in these packages for
# all functions/procedures/packages declaration before proceeding to current
# schema export.
#LOOK_FORWARD_FUNCTION SCOTT,OE
# Force Ora2Pg to not look for function declaration. Note that this will prevent
# Ora2Pg to rewrite function replacement call if needed. Do not enable it unless
# looking forward at function breaks other export.
NO_FUNCTION_METADATA 0
#------------------------------------------------------------------------------
# ENCODING SECTION (Define client encoding at Oracle and PostgreSQL side)
#------------------------------------------------------------------------------
# Enforce default language setting following the Oracle database encoding. This
# may be used with multibyte characters like UTF8. Here are the default values
# used by Ora2Pg, you may not change them unless you have problem with this
# encoding. This will set \$ENV{NLS_LANG} to the given value.
#NLS_LANG AMERICAN_AMERICA.AL32UTF8
# This will set \$ENV{NLS_NCHAR} to the given value.
#NLS_NCHAR AL32UTF8
# By default PostgreSQL client encoding is automatically set to UTF8 to avoid
# encoding issue. If you have changed the value of NLS_LANG you might have to
# change the encoding of the PostgreSQL client.
#CLIENT_ENCODING UTF8
# To force utf8 encoding of the PL/SQL code exported, enable this directive.
# Could be helpful in some rare condition.
FORCE_PLSQL_ENCODING 0
#------------------------------------------------------------------------------
# EXPORT SECTION (Export type and filters)
#------------------------------------------------------------------------------
# Type of export. Values can be the following keyword:
# TABLE Export tables, constraints, indexes, ...
# PACKAGE Export packages
# INSERT Export data from table as INSERT statement
# COPY Export data from table as COPY statement
# VIEW Export views
# GRANT Export grants
# SEQUENCE Export sequences
# TRIGGER Export triggers
# FUNCTION Export functions
# PROCEDURE Export procedures
# TABLESPACE Export tablespace (PostgreSQL >= 8 only)
# TYPE Export user defined Oracle types
# PARTITION Export range or list partition (PostgreSQL >= v8.4)
# FDW Export table as foreign data wrapper tables
# MVIEW Export materialized view as snapshot refresh view
# QUERY Convert Oracle SQL queries from a file.
# KETTLE Generate XML ktr template files to be used by Kettle.
# DBLINK Generate oracle foreign data wrapper server to use as dblink.
# SYNONYM Export Oracle's synonyms as views on other schema's objects.
# DIRECTORY Export Oracle's directories as external_file extension objects.
# LOAD Dispatch a list of queries over multiple PostgreSQl connections.
# TEST perform a diff between Oracle and PostgreSQL database.
# TEST_COUNT perform only a row count between Oracle and PostgreSQL tables.
# TEST_VIEW perform a count on both side of number of rows returned by views
# TEST_DATA perform data validation check on rows at both sides.
# SEQUENCE_VALUES export DDL to set the last values of sequences
TYPE TABLE
# Set this to 1 if you don't want to export comments associated to tables and
# column definitions. Default is enabled.
DISABLE_COMMENT 0
# Set which object to export from. By default Ora2Pg export all objects.
# Value must be a list of object name or regex separated by space. Note
# that regex will not works with 8i database, use % placeholder instead
# Ora2Pg will use the LIKE operator. There is also some extended use of
# this directive, see chapter "Limiting object to export" in documentation.
#ALLOW TABLE_TEST
# Set which object to exclude from export process. By default none. Value
# must be a list of object name or regexp separated by space. Note that regex
# will not works with 8i database, use % placeholder instead Ora2Pg will use
# the NOT LIKE operator. There is also some extended use of this directive,
# see chapter "Limiting object to export" in documentation.
#EXCLUDE OTHER_TABLES
# By default Ora2Pg exclude from export some Oracle "garbage" tables that should
# never be part of an export. This behavior generates a lot of REGEXP_LIKE
# expressions which are slowing down the export when looking at tables. To disable
# this behavior enable this directive, you will have to exclude or clean up later
# by yourself the unwanted tables. The regexp used to exclude the table are
# defined in the array @EXCLUDED_TABLES in lib/Ora2Pg.pm. Note this is behavior
# is independant to the EXCLUDE configuration directive.
NO_EXCLUDED_TABLE 0
# Set which view to export as table. By default none. Value must be a list of
# view name or regexp separated by space. If the object name is a view and the
# export type is TABLE, the view will be exported as a create table statement.
# If export type is COPY or INSERT, the corresponding data will be exported.
#VIEW_AS_TABLE VIEW_NAME
# Set which materialized view to export as table. By default none. Value must
# be a list of materialized view name or regexp separated by space. If the
# object name is a materialized view and the export type is TABLE, the view
# will be exported as a create table statement. If export type is COPY or
# INSERT, the corresponding data will be exported.
#MVIEW_AS_TABLE VIEW_NAME
# By default Ora2Pg try to order views to avoid error at import time with
# nested views. With a huge number of view this can take a very long time,
# you can bypass this ordering by enabling this directive.
NO_VIEW_ORDERING 0
# When exporting GRANTs you can specify a comma separated list of objects
# for which privilege will be exported. Default is export for all objects.
# Here are the possibles values TABLE, VIEW, MATERIALIZED VIEW, SEQUENCE,
# PROCEDURE, FUNCTION, PACKAGE BODY, TYPE, SYNONYM, DIRECTORY. Only one object
# type is allowed at a time. For example set it to TABLE if you just want to
# export privilege on tables. You can use the -g option to overwrite it.
# When used this directive prevent the export of users unless it is set to
# USER. In this case only users definitions are exported.
#GRANT_OBJECT TABLE
# By default Ora2Pg will export your external table as file_fdw tables. If
# you don't want to export those tables at all, set the directive to 0.
EXTERNAL_TO_FDW 1
# Add a TRUNCATE TABLE instruction before loading data on COPY and INSERT
# export. When activated, the instruction will be added only if there's no
# global DELETE clause or one specific to the current table (see bellow).
TRUNCATE_TABLE 0
# Support for include a DELETE FROM ... WHERE clause filter before importing
# data and perform a delete of some lines instead of truncatinf tables.
# Value is construct as follow: TABLE_NAME[DELETE_WHERE_CLAUSE], or
# if you have only one where clause for all tables just put the delete
# clause as single value. Both are possible too. Here are some examples:
#DELETE 1=1 # Apply to all tables and delete all tuples
#DELETE TABLE_TEST[ID1='001'] # Apply only on table TABLE_TEST
#DELETE TABLE_TEST[ID1='001' OR ID1='002] DATE_CREATE > '2001-01-01' TABLE_INFO[NAME='test']
# The last applies two different delete where clause on tables TABLE_TEST and
# TABLE_INFO and a generic delete where clause on DATE_CREATE to all other tables.
# If TRUNCATE_TABLE is enabled it will be applied to all tables not covered by
# the DELETE definition.
# When enabled this directive forces ora2pg to export all tables, index
# constraints, and indexes using the tablespace name defined in Oracle database.
# This works only with tablespaces that are not TEMP, USERS and SYSTEM.
USE_TABLESPACE 0
# Enable this directive to reorder columns and minimized the footprint
# on disk, so that more rows fit on a data page, which is the most important
# factor for speed. Default is same order than in Oracle table definition,
# that should be enough for most usage.
REORDERING_COLUMNS 0
# Support for include a WHERE clause filter when dumping the contents
# of tables. Value is construct as follow: TABLE_NAME[WHERE_CLAUSE], or
# if you have only one where clause for each table just put the where
# clause as value. Both are possible too. Here are some examples:
#WHERE 1=1 # Apply to all tables
#WHERE TABLE_TEST[ID1='001'] # Apply only on table TABLE_TEST
#WHERE TABLE_TEST[ID1='001' OR ID1='002] DATE_CREATE > '2001-01-01' TABLE_INFO[NAME='test']
# The last applies two different where clause on tables TABLE_TEST and
# TABLE_INFO and a generic where clause on DATE_CREATE to all other tables
# Sometime you may want to extract data from an Oracle table but you need a
# a custom query for that. Not just a "SELECT * FROM table" like Ora2Pg does
# but a more complex query. This directive allows you to override the query
# used by Ora2Pg to extract data. The format is TABLENAME[SQL_QUERY].
# If you have multiple tables to extract by replacing the Ora2Pg query, you can
# define multiple REPLACE_QUERY lines.
#REPLACE_QUERY EMPLOYEES[SELECT e.id,e.fisrtname,lastname FROM EMPLOYEES e JOIN EMP_UPDT u ON (e.id=u.id AND u.cdate>'2014-08-01 00:00:00')]
# To add a DROP <OBJECT> IF EXISTS before creating the object, enable
# this directive. Can be useful in an iterative work. Default is disabled.
DROP_IF_EXISTS 0
# PostgreSQL do not supports Global Temporary Table natively but you can use
# the pgtt extension to emulate this behavior. Enable this directive to export
# global temporary table.
EXPORT_GTT 0
# By default the pgtt extension is load using the superuser privilege, set it
# to 1 if you run the SQL scripts generated using a non superuser user. It will
# use LOAD '\$libdir/plugins/pgtt' instead of LOAD 'pgtt'.
PGTT_NOSUPERUSER 0
#------------------------------------------------------------------------------
# FULL TEXT SEARCH SECTION (Control full text search export behaviors)
#------------------------------------------------------------------------------
# Force Ora2Pg to translate Oracle Text indexes into PostgreSQL indexes using
# pg_trgm extension. Default is to translate CONTEXT indexes into FTS indexes
# and CTXCAT indexes using pg_trgm. Most of the time using pg_trgm is enough,
# this is why this directive stand for.
#
CONTEXT_AS_TRGM 0
# By default Ora2Pg creates a function-based index to translate Oracle Text
# indexes.
# CREATE INDEX ON t_document
# USING gin(to_tsvector('french', title));
# You will have to rewrite the CONTAIN() clause using to_tsvector(), example:
# SELECT id,title FROM t_document
# WHERE to_tsvector(title)) @@ to_tsquery('search_word');
#
# To force Ora2Pg to create an extra tsvector column with a dedicated triggers
# for FTS indexes, disable this directive. In this case, Ora2Pg will add the
# column as follow: ALTER TABLE t_document ADD COLUMN tsv_title tsvector;
# Then update the column to compute FTS vectors if data have been loaded before
# UPDATE t_document SET tsv_title =
# to_tsvector('french', coalesce(title,''));
# To automatically update the column when a modification in the title column
# appears, Ora2Pg adds the following trigger:
#
# CREATE FUNCTION tsv_t_document_title() RETURNS trigger AS \$\$
# BEGIN
# IF TG_OP = 'INSERT' OR new.title != old.title THEN
# new.tsv_title :=
# to_tsvector('french', coalesce(new.title,''));
# END IF;
# return new;
# END
# \$\$ LANGUAGE plpgsql;
# CREATE TRIGGER trig_tsv_t_document_title BEFORE INSERT OR UPDATE
# ON t_document
# FOR EACH ROW EXECUTE PROCEDURE tsv_t_document_title();
#
# When the Oracle text index is defined over multiple column, Ora2Pg will use
# setweight() to set a weight in the order of the column declaration.
#
FTS_INDEX_ONLY 1
# Use this directive to force text search configuration to use. When it is not
# set, Ora2Pg will autodetect the stemmer used by Oracle for each index and
# pg_catalog.english if nothing is found.
#
#FTS_CONFIG pg_catalog.french
# If you want to perform your text search in an accent insensitive way, enable
# this directive. Ora2Pg will create an helper function over unaccent() and
# creates the pg_trgm indexes using this function. With FTS Ora2Pg will
# redefine your text search configuration, for example:
#
# CREATE TEXT SEARCH CONFIGURATION fr (COPY = pg_catalog.french);
# ALTER TEXT SEARCH CONFIGURATION fr
# ALTER MAPPING FOR hword, hword_part, word WITH unaccent, french_stem;
#
# When enabled, Ora2pg will create the wrapper function:
#
# CREATE OR REPLACE FUNCTION unaccent_immutable(text)
# RETURNS text AS
# \$\$
# SELECT public.unaccent('public.unaccent', $1)
# \$\$ LANGUAGE sql IMMUTABLE
# COST 1;
#
# indexes are exported as follow:
#
# CREATE INDEX t_document_title_unaccent_trgm_idx ON t_document
# USING gin (unaccent_immutable(title) gin_trgm_ops);
#
# In your queries you will need to use the same function in the search to
# be able to use the function-based index. Example:
#
# SELECT * FROM t_document
# WHERE unaccent_immutable(title) LIKE '%donnees%';
#
USE_UNACCENT 0
# Same as above but call lower() in the unaccent_immutable() function:
#
# CREATE OR REPLACE FUNCTION unaccent_immutable(text)
# RETURNS text AS
# \$\$
# SELECT lower(public.unaccent('public.unaccent', $1));
# \$\$ LANGUAGE sql IMMUTABLE;
#
USE_LOWER_UNACCENT 0
#------------------------------------------------------------------------------
# DATA DIFF SECTION (only delete and insert actually changed rows)
#------------------------------------------------------------------------------
# EXPERIMENTAL! Not yet working correctly with partitioned tables, parallelism,
# and direct Postgres connection! Test before using in production!
# This feature affects SQL output for data (INSERT or COPY).
# The deletion and (re-)importing of data is redirected to temporary tables
# (with configurable suffix) and matching entries (i.e. quasi-unchanged rows)
# eliminated before actual application of the DELETE, UPDATE and INSERT.
# Optional functions can be specified that are called before or after the
# actual DELETE, UPDATE and INSERT per table, or after all tables have been
# processed.
#
# Enable DATADIFF functionality
DATADIFF 0
# Use UPDATE where changed columns can be matched by the primary key
# (otherwise rows are DELETEd and re-INSERTed, which may interfere with
# inverse foreign keys relationships!)
DATADIFF_UPDATE_BY_PKEY 0
# Suffix for temporary tables holding rows to be deleted and to be inserted.
# Pay attention to your tables names:
# 1) There better be no two tables with names such that name1 + suffix = name2
# 2) length(suffix) + length(tablename) < NAMEDATALEN (usually 64)
DATADIFF_DEL_SUFFIX _del
DATADIFF_UPD_SUFFIX _upd
DATADIFF_INS_SUFFIX _ins
# Allow setting the work_mem and temp_buffers parameters
# to keep temp tables in memory and have efficient sorting, etc.
DATADIFF_WORK_MEM 256 MB
DATADIFF_TEMP_BUFFERS 512 MB
# The following are names of functions that will be called (via SELECT)
# after the temporary tables have been reduced (by removing matching rows)
# and right before or right after the actual DELETE and INSERT are performed.
# They must take four arguments, which should ideally be of type "regclass",
# representing the real table, the "deletions", the "updates", and the
# "insertions" temp table names, respectively. They are called before
# re-activation of triggers, indexes, etc. (if configured).
#DATADIFF_BEFORE my_datadiff_handler_function
#DATADIFF_AFTER my_datadiff_handler_function
# Another function can be called (via SELECT) right before the entire COMMIT
# (i.e., after re-activation of indexes, triggers, etc.), which will be
# passed in Postgres ARRAYs of the table names of the real tables, the
# "deletions", the "updates" and the "insertions" temp tables, respectively,
# with same array index positions belonging together. So this function should
# take four arguments of type regclass[]
#DATADIFF_AFTER_ALL my_datadiff_bunch_handler_function
# If in doubt, use schema-qualified function names here.
# The search_path will have been set to PG_SCHEMA if EXPORT_SCHEMA == 1
# (as defined by you in those config parameters, see above),
# i.e., the "public" schema is not contained if EXPORT_SCHEMA == 1
#------------------------------------------------------------------------------
# CONSTRAINT SECTION (Control constraints export and import behaviors)
#------------------------------------------------------------------------------
# Support for turning off certain schema features in the postgres side
# during schema export. Values can be : fkeys, pkeys, ukeys, indexes, checks
# separated by a space character.
# fkeys : turn off foreign key constraints
# pkeys : turn off primary keys
# ukeys : turn off unique column constraints
# indexes : turn off all other index types
# checks : turn off check constraints
#SKIP fkeys pkeys ukeys indexes checks
# By default names of the primary and unique key in the source Oracle database
# are ignored and key names are autogenerated in the target PostgreSQL database
# with the PostgreSQL internal default naming rules. If you want to preserve
# Oracle primary and unique key names set this option to 1.
# Please note if value of USE_TABLESPACE is set to 1 the value of this option is
# enforced to 1 to preserve correct primary and unique key allocation to tablespace.
KEEP_PKEY_NAMES 0
# Enable this directive if you want to add primary key definitions inside the
# create table statements. If disabled (the default) primary key definition
# will be added with an alter table statement. Enable it if you are exporting
# to GreenPlum PostgreSQL database.
PKEY_IN_CREATE 0
# This directive allow you to add an ON UPDATE CASCADE option to a foreign
# key when a ON DELETE CASCADE is defined or always. Oracle do not support
# this feature, you have to use trigger to operate the ON UPDATE CASCADE.
# As PostgreSQL has this feature, you can choose how to add the foreign
# key option. There is three value to this directive: never, the default
# that mean that foreign keys will be declared exactly like in Oracle.
# The second value is delete, that mean that the ON UPDATE CASCADE option
# will be added only if the ON DELETE CASCADE is already defined on the
# foreign Keys. The last value, always, will force all foreign keys to be
# defined using the update option.
FKEY_ADD_UPDATE never
# When exporting tables, Ora2Pg normally exports constraints as they are;
# if they are non-deferrable they are exported as non-deferrable.
# However, non-deferrable constraints will probably cause problems when
# attempting to import data to PostgreSQL. The following option set to 1
# will cause all foreign key constraints to be exported as deferrable
FKEY_DEFERRABLE 0
# In addition when exporting data the DEFER_FKEY option set to 1 will add
# a command to defer all foreign key constraints during data export and
# the import will be done in a single transaction. This will work only if
# foreign keys have been exported as deferrable and you are not using direct
# import to PostgreSQL (PG_DSN is not defined). Constraints will then be
# checked at the end of the transaction. This directive can also be enabled
# if you want to force all foreign keys to be created as deferrable and
# initially deferred during schema export (TABLE export type).
DEFER_FKEY 0
# If deferring foreign keys is not possible due to the amount of data in a
# single transaction, you've not exported foreign keys as deferrable or you
# are using direct import to PostgreSQL, you can use the DROP_FKEY directive.
# It will drop all foreign keys before all data import and recreate them at
# the end of the import.
DROP_FKEY 0
#------------------------------------------------------------------------------
# TRIGGERS AND SEQUENCES SECTION (Control triggers and sequences behaviors)
#------------------------------------------------------------------------------
# Disables alter of sequences on all tables in COPY or INSERT mode.
# Set to 1 if you want to disable update of sequence during data migration.
DISABLE_SEQUENCE 0
# Disables triggers on all tables in COPY or INSERT mode. Available modes
# are USER (user defined triggers) and ALL (includes RI system
# triggers). Default is 0 do not add SQL statement to disable trigger.
# If you want to disable triggers during data migration, set the value to
# USER if your are connected as non superuser and ALL if you are connected
# as PostgreSQL superuser. A value of 1 is equal to USER.
DISABLE_TRIGGERS 0
#------------------------------------------------------------------------------
# OBJECT MODIFICATION SECTION (Control objects structure or name modifications)
#------------------------------------------------------------------------------
# You may wish to just extract data from some fields, the following directives
# will help you to do that. Works only with export type INSERT or COPY
# Modify output from the following tables(fields separate by space or comma)
#MODIFY_STRUCT TABLE_TEST(dico,dossier)
# When there is a lot of columns dropped in the target database compared to the
# Oracle database, being able to exclude columns from data export will simplify
# the configuration compared to MODIFY_STRUCT. The following directive can be
# used to define the columns per table that must be excluded from data export.
#EXCLUDE_COLUMNS TABLE_TEST(dropcol1,dropcol2)
# You may wish to change table names during data extraction, especally for
# replication use. Give a list of tables separate by space as follow.
#REPLACE_TABLES ORIG_TB_NAME1:NEW_TB_NAME1 ORIG_TB_NAME2:NEW_TB_NAME2
# You may wish to change column names during export. Give a list of tables
# and columns separate by comma as follow.
#REPLACE_COLS TB_NAME(ORIG_COLNAME1:NEW_COLNAME1,ORIG_COLNAME2:NEW_COLNAME2)
# By default all object names are converted to lower case, if you
# want to preserve Oracle object name as-is set this to 1. Not recommended
# unless you always quote all tables and columns on all your scripts.
PRESERVE_CASE 0
# Add the given value as suffix to index names. Useful if you have indexes
# with same name as tables. Not so common but it can help.
#INDEXES_SUFFIX _idx
# Enable this directive to rename all indexes using tablename_columns_names.
# Could be very useful for database that have multiple time the same index name
# or that use the same name than a table, which is not allowed by PostgreSQL
# Disabled by default.
INDEXES_RENAMING 0
# Operator classes text_pattern_ops, varchar_pattern_ops, and bpchar_pattern_ops
# support B-tree indexes on the corresponding types. The difference from the
# default operator classes is that the values are compared strictly character by
# character rather than according to the locale-specific collation rules. This
# makes these operator classes suitable for use by queries involving pattern
# matching expressions (LIKE or POSIX regular expressions) when the database
# does not use the standard "C" locale. If you enable, with value 1, this will
# force Ora2Pg to export all indexes defined on varchar2() and char() columns
# using those operators. If you set it to a value greater than 1 it will only
# change indexes on columns where the charactere limit is greater or equal than
# this value. For example, set it to 128 to create these kind of indexes on
# columns of type varchar2(N) where N >= 128.
USE_INDEX_OPCLASS 0
# Enable this directive if you want that your partition tables will be
# renamed. Disabled by default. If you have multiple partitioned table,
# when exported to PostgreSQL some partitions could have the same name
# but different parent tables. This is not allowed, table name must be
# unique, in this case enable this directive.
RENAME_PARTITION 0
# If you don't want to reproduce the partitioning like in Oracle and want to
# export all partitionned Oracle data into the main single table in PostgreSQL
# enable this directive. Ora2Pg will export all data into the main table name.
# Default is to use partitionning, Ora2Pg will export data from each partition
# and import them into the PostgreSQL dedicated partition table.
DISABLE_PARTITION 0
# How to export partition by reference. Possible values are none, duplicate or
# the number of hash partition to create. Default is 'none' to not export the
# partitions by reference.
PARTITION_BY_REFERENCE none
# Activating this directive will force Ora2Pg to add WITH (OIDS) when creating
# tables or views as tables. Default is same as PostgreSQL, disabled.
WITH_OID 0
# Allow escaping of column name using Oracle reserved words.
ORA_RESERVED_WORDS audit,comment,references
# Enable this directive if you have tables or column names that are a reserved
# word for PostgreSQL. Ora2Pg will double quote the name of the object.
USE_RESERVED_WORDS 0
# By default Ora2Pg export Oracle tables with the NOLOGGING attribute as
# UNLOGGED tables. You may want to fully disable this feature because
# you will lost all data from unlogged table in case of PostgreSQL crash.
# Set it to 1 to export all tables as normal table.
DISABLE_UNLOGGED 0
# Increase varchar max character constraints to support PostgreSQL two bytes
# character encoding when the source database applies the length constraint
# on characters not bytes. Default disabled.
DOUBLE_MAX_VARCHAR 0
#------------------------------------------------------------------------------
# OUTPUT SECTION (Control output to file or PostgreSQL database)
#------------------------------------------------------------------------------
# Define the following directive to send export directly to a PostgreSQL
# database, this will disable file output. Note that these directives are only
# used for data export, other export need to be imported manually through the
# use of psql or any other PostgreSQL client.
#PG_DSN dbi:Pg:dbname=test_db;host=localhost;port=5432
#PG_USER test
#PG_PWD test
# By default all output is dump to STDOUT if not send directly to postgresql
# database (see above). Give a filename to save export to it. If you want
# a Gzip'd compressed file just add the extension .gz to the filename (you
# need perl module Compress::Zlib from CPAN). Add extension .bz2 to use Bzip2
# compression.
OUTPUT output.sql
# Base directory where all dumped files must be written
#OUTPUT_DIR /var/tmp
# Path to the bzip2 program. See OUTPUT directive above.
BZIP2 $bzip2
# Allow object constraints to be saved in a separate file during schema export.
# The file will be named CONSTRAINTS_OUTPUT. Where OUTPUT is the value of the
# corresponding configuration directive. You can use .gz xor .bz2 extension to
# enable compression. Default is to save all data in the OUTPUT file. This
# directive is usable only with TABLE export type.
FILE_PER_CONSTRAINT 0
# Allow indexes to be saved in a separate file during schema export. The file
# will be named INDEXES_OUTPUT. Where OUTPUT is the value of the corresponding
# configuration directive. You can use the .gz, .xor, or .bz2 file extension to
# enable compression. Default is to save all data in the OUTPUT file. This
# directive is usable only with TABLE or TABLESPACE export type. With the
# TABLESPACE export, it is used to write "ALTER INDEX ... TABLESPACE ..." into
# a separate file named TBSP_INDEXES_OUTPUT that can be loaded at end of the
# migration after the indexes creation to move the indexes.
FILE_PER_INDEX 0
# Allow foreign key declaration to be saved in a separate file during
# schema export. By default foreign keys are exported into the main
# output file or in the CONSTRAINT_output.sql file. When enabled foreign
# keys will be exported into a file named FKEYS_output.sql
FILE_PER_FKEYS 0
# Allow data export to be saved in one file per table/view. The files
# will be named as tablename_OUTPUT. Where OUTPUT is the value of the
# corresponding configuration directive. You can use .gz xor .bz2
# extension to enable compression. Default is to save all data in one
# file. This is usable only during INSERT or COPY export type.
FILE_PER_TABLE 0
# Allow function export to be saved in one file per function/procedure.
# The files will be named as funcname_OUTPUT. Where OUTPUT is the value
# of the corresponding configuration directive. You can use .gz xor .bz2
# extension to enable compression. Default is to save all data in one
# file. It is usable during FUNCTION, PROCEDURE, TRIGGER and PACKAGE
# export type.
FILE_PER_FUNCTION 0
# By default Ora2Pg will force Perl to use utf8 I/O encoding. This is done through
# a call to the Perl pragma:
#
# use open ':utf8';
#
# You can override this encoding by using the BINMODE directive, for example you
# can set it to :locale to use your locale or iso-8859-7, it will respectively use
#
# use open ':locale';
# use open ':encoding(iso-8859-7)';
#
# If you have change the NLS_LANG in non UTF8 encoding, you might want to set this
# directive. See http://perldoc.perl.org/5.14.2/open.html for more information.
# Most of the time, you might leave this directive commented.
#BINMODE utf8
# Set it to 0 to not include the call to \\set ON_ERROR_STOP ON in all SQL
# scripts. By default this order is always present.
STOP_ON_ERROR 1
# Enable this directive to use COPY FREEZE instead of a simple COPY to
# export data with rows already frozen. This is intended as a performance
# option for initial data loading. Rows will be frozen only if the table
# being loaded has been created or truncated in the current subtransaction.
# This will only works with export to file and when -J or ORACLE_COPIES is
# not set or default to 1. It can be used with direct import into PostgreSQL
# under the same condition but -j or JOBS must also be unset or default to 1.
COPY_FREEZE 0
# By default Ora2Pg use CREATE OR REPLACE in functions and views DDL, if you
# need not to override existing functions or views disable this configuration
# directive, DDL will not include OR REPLACE.
CREATE_OR_REPLACE 1
# This directive can be used to send an initial command to PostgreSQL, just
# after the connection. For example to set some session parameters. This
# directive can be used multiple time.
#PG_INITIAL_COMMAND
# Add an ON CONFLICT DO NOTHING to all INSERT statements generated for this
# type of data export.
INSERT_ON_CONFLICT 0
#------------------------------------------------------------------------------
# TYPE SECTION (Control type behaviors and redefinitions)
#------------------------------------------------------------------------------
# If you're experiencing problems in data type export, the following directive
# will help you to redefine data type translation used in Ora2pg. The syntax is
# a comma separated list of "Oracle datatype:Postgresql data type". Here are the
# data type that can be redefined and their default value. If you want to
# replace a type with a precision and scale you need to escape the coma with
# a backslash. For example, if you want to replace all NUMBER(*,0) into bigint
# instead of numeric(38)add the following:
# DATA_TYPE NUMBER(*\\,0):bigint
# Here is the default replacement for all Oracle's types. You don't have to
# recopy all type conversion but just the one you want to rewrite.
#DATA_TYPE VARCHAR2:varchar,NVARCHAR2:varchar,NVARCHAR:varchar,NCHAR:char,DATE:timestamp(0),LONG:text,LONG RAW:bytea,CLOB:text,NCLOB:text,BLOB:bytea,BFILE:bytea,RAW(16):uuid,RAW(32):uuid,RAW:bytea,UROWID:oid,ROWID:oid,FLOAT:double precision,DEC:decimal,DECIMAL:decimal,DOUBLE PRECISION:double precision,INT:integer,INTEGER:integer,REAL:real,SMALLINT:smallint,BINARY_FLOAT:double precision,BINARY_DOUBLE:double precision,TIMESTAMP:timestamp,XMLTYPE:xml,BINARY_INTEGER:integer,PLS_INTEGER:integer,TIMESTAMP WITH TIME ZONE:timestamp with time zone,TIMESTAMP WITH LOCAL TIME ZONE:timestamp with time zone
# If set to 1 replace portable numeric type into PostgreSQL internal type.
# Oracle data type NUMBER(p,s) is approximatively converted to real and
# float PostgreSQL data type. If you have monetary fields or don't want
# rounding issues with the extra decimals you should preserve the same
# numeric(p,s) PostgreSQL data type. Do that only if you need exactness
# because using numeric(p,s) is slower than using real or double.
PG_NUMERIC_TYPE 1
# If set to 1 replace portable numeric type into PostgreSQL internal type.
# Oracle data type NUMBER(p) or NUMBER are converted to smallint, integer
# or bigint PostgreSQL data type following the length of the precision. If
# NUMBER without precision are set to DEFAULT_NUMERIC (see bellow).
PG_INTEGER_TYPE 1
# NUMBER() without precision are converted by default to bigint only if
# PG_INTEGER_TYPE is true. You can overwrite this value to any PG type,
# like integer or float.
DEFAULT_NUMERIC bigint
# Set it to 0 if you don't want to export milliseconds from Oracle timestamp
# columns. Timestamp will be formated with to_char(..., 'YYYY-MM-DD HH24:MI:SS')
# Enabling this directive, the default, format is 'YYYY-MM-DD HH24:MI:SS.FF'.
ENABLE_MICROSECOND 1
# If you want to replace some columns as PostgreSQL boolean define here a list
# of tables and column separated by space as follows. You can also give a type
# and a precision to automatically convert all fields of that type as a boolean.
# For example: NUMBER:1 or CHAR:1 will replace any field of type number(1) or
# char(1) as a boolean in all exported tables.
#REPLACE_AS_BOOLEAN TB_NAME1:COL_NAME1 TB_NAME1:COL_NAME2 TB_NAME2:COL_NAME2
# Use this to add additional definitions of the possible boolean values in Oracle
# field. You must set a space separated list of TRUE:FALSE values. BY default:
#BOOLEAN_VALUES yes:no y:n 1:0 true:false enabled:disabled
# When Ora2Pg find a "zero" date: 0000-00-00 00:00:00 it is replaced by a NULL.
# This could be a problem if your column is defined with NOT NULL constraint.
# If you can not remove the constraint, use this directive to set an arbitral
# date that will be used instead. You can also use -INFINITY if you don't want
# to use a fake date.
#REPLACE_ZERO_DATE 1970-01-01 00:00:00
# Some time you need to force the destination type, for example a column
# exported as timestamp by Ora2Pg can be forced into type date. Value is
# a comma-separated list of TABLE:COLUMN:TYPE structure. If you need to use
# comma or space inside type definition you will have to backslash them.
#
# MODIFY_TYPE TABLE1:COL3:varchar,TABLE1:COL4:decimal(9\,6)
#
# Type of table1.col3 will be replaced by a varchar and table1.col4 by
# a decimal with precision and scale.
#
# If the column's type is a user defined type Ora2Pg will autodetect the
# composite type and will export its data using ROW(). Some Oracle user
# defined types are just array of a native type, in this case you may want
# to transform this column in simple array of a PostgreSQL native type.
# To do so, just redefine the destination type as wanted and Ora2Pg will
# also transform the data as an array. For example, with the following
# definition in Oracle:
#
# CREATE OR REPLACE TYPE mem_type IS VARRAY(10) of VARCHAR2(15);
# CREATE TABLE club (Name VARCHAR2(10),
# Address VARCHAR2(20),
# City VARCHAR2(20),
# Phone VARCHAR2(8),
# Members mem_type
# );
#
# custom type "mem_type" is just a string array and can be translated into
# the following in PostgreSQL:
#
# CREATE TABLE club (
# name varchar(10),
# address varchar(20),
# city varchar(20),
# phone varchar(8),
# members text[]
# ) ;
#
# To do so, just use the directive as follow:
#
# MODIFY_TYPE CLUB:MEMBERS:text[]
#
# Ora2Pg will take care to transform all data of this column in the correct
# format. Only arrays of characters and numerics types are supported.
#MODIFY_TYPE
# By default Oracle call to function TO_NUMBER will be translated as a cast
# into numeric. For example, TO_NUMBER('10.1234') is converted into PostgreSQL
# call to_number('10.1234')::numeric. If you want you can cast the call to integer
# or bigint by changing the value of the configuration directive. If you need
# better control of the format, just set it as value, for example:
# TO_NUMBER_CONVERSION 99999999999999999999.9999999999
# will convert the code above as:
# TO_NUMBER('10.1234', '99999999999999999999.9999999999')
# Any value of the directive that it is not numeric, integer or bigint will
# be taken as a mask format. If set to none, no conversion will be done.
TO_NUMBER_CONVERSION numeric
# By default varchar2 without size constraint are tranlated into text. If you
# want to keep the varchar name, disable this directive.
VARCHAR_TO_TEXT 1
# Usually identity column must be bigint to correspond to an auto increment
# sequence. If, for any reason you want Ora2Pg respect the DATA_TYPE you have
# set, disable this directive.
FORCE_IDENTITY_BIGINT 1
# Remove timezone part into the format of the TO_CHAR() function
TO_CHAR_NOTIMEZONE 1
#------------------------------------------------------------------------------
# GRANT SECTION (Control priviledge and owner export)
#------------------------------------------------------------------------------
# Set this to 1 to replace default password for all extracted user
# during GRANT export
GEN_USER_PWD 0
# By default the owner of database objects is the one you're using to connect
# to PostgreSQL. If you use an other user (e.g. postgres) you can force
# Ora2Pg to set the object owner to be the one used in the Oracle database by
# setting the directive to 1, or to a completely different username by setting
# the directive value # to that username.
FORCE_OWNER 0
# Ora2Pg use the function's security privileges set in Oracle and it is often
# defined as SECURITY DEFINER. If you want to override those security privileges
# for all functions and use SECURITY DEFINER instead, enable this directive.
FORCE_SECURITY_INVOKER 0
#------------------------------------------------------------------------------
# DATA SECTION (Control data export behaviors)
#------------------------------------------------------------------------------
# Extract data by bulk of DATA_LIMIT tuples at once. Default 10000. If you set
# a high value be sure to have enough memory if you have million of rows.
DATA_LIMIT $DATA_LIMIT_DEFAULT
# When Ora2Pg detect a table with some BLOB it will automatically reduce the
# value of this directive by dividing it by 10 until his value is below 1000.
# You can control this value by setting BLOB_LIMIT. Exporting BLOB use lot of
# ressources, setting it to a too high value can produce OOM.
#BLOB_LIMIT 500
# Apply same behavior on CLOB than BLOB with BLOB_LIMIT settings. This is
# especially useful if you have large CLOB data.
CLOB_AS_BLOB 1
# By default all data that are not of type date or time are escaped. If you
# experience any problem with that you can set it to 1 to disable it. This
# directive is only used during a COPY export type.
# See STANDARD_CONFORMING_STRINGS for enabling/disabling escape with INSERT
# statements.
NOESCAPE 0
# This directive may be used if you want to change the default isolation
# level of the data export transaction. Default is now to set the level
# to a serializable transaction to ensure data consistency. Here are the
# allowed value of this directive: readonly, readwrite, serializable and
# committed (read committed).
TRANSACTION serializable
# This controls whether ordinary string literals ('...') treat backslashes
# literally, as specified in SQL standard. This was the default before Ora2Pg
# v8.5 so that all strings was escaped first, now this is currently on, causing
# Ora2Pg to use the escape string syntax (E'...') if this parameter is not
# set to 0. This is the exact behavior of the same option in PostgreSQL.
# This directive is only used during INSERT export to build INSERT statements.
# See NOESCAPE for enabling/disabling escape in COPY statements.
STANDARD_CONFORMING_STRINGS 1
# Use this directive to set the database handle's 'LongReadLen' attribute to
# a value that will be the larger than the expected size of the LOB. The default
# is 1MB witch may not be enough to extract BLOB objects. If the size of the LOB
# exceeds the 'LongReadLen' DBD::Oracle will return a 'ORA-24345: A Truncation'
# error. Default: 1023*1024 bytes. Take a look at this page to learn more:
# http://search.cpan.org/~pythian/DBD-Oracle-1.22/Oracle.pm#Data_Interface_for_Persistent_LOBs
#
# Important note: If you increase the value of this directive take care that
# DATA_LIMIT will probably needs to be reduced. Even if you only have a 1MB blob
# trying to read 10000 of them (the default DATA_LIMIT) all at once will require
# 10GB of memory. You may extract data from those table separately and set a
# DATA_LIMIT to 500 or lower, otherwise you may experience some out of memory.
#LONGREADLEN 1047552
# If you want to bypass the 'ORA-24345: A Truncation' error, set this directive
# to 1, it will truncate the data extracted to the LongReadLen value.
#LONGTRUNCOK 0
# Disable this if you want to load full content of BLOB and CLOB and not use
# LOB locators. In this case you will have to set LONGREADLEN to the right
# value. Note that this will not improve speed of BLOB export as most of the time is always
# consumed by the bytea escaping and in this case export is done line by line
# and not by chunk of DATA_LIMIT rows. For more information on how it works, see
# http://search.cpan.org/~pythian/DBD-Oracle-1.74/lib/DBD/Oracle.pm#Data_Interface_for_LOB_Locators
# Default is enabled, it use LOB locators.
USE_LOB_LOCATOR 1