SET TERMOUT OFF;
COLUMN current_instance NEW_VALUE current_instance NOPRINT;
SELECT rpad(instance_name, 17) current_instance FROM v$instance;
SET TERMOUT ON;
PROMPT
PROMPT +------------------------------------------------------------------------+
PROMPT | Report : Concurrent Manager Job Status |
PROMPT | Instance : ¤t_instance |
PROMPT +------------------------------------------------------------------------+
SET ECHO OFF
SET FEEDBACK 6
SET HEADING ON
SET LINESIZE 180
SET PAGESIZE 50000
SET TERMOUT ON
SET TIMING OFF
SET TRIMOUT ON
SET TRIMSPOOL ON
SET VERIFY OFF
CLEAR COLUMNS
CLEAR BREAKS
CLEAR COMPUTES
COLUMN start_time FORMAT a20 HEADING "Start|Time"
COLUMN program_name FORMAT a50 HEADING "Program|Name"
COLUMN reqid FORMAT 9999999999 HEADING "Request|ID"
COLUMN tot_mins FORMAT 9999999 HEADING "Total|Run-Time|in Mins"
COLUMN hrs FORMAT 99999 HEADING "Running|Hrs"
COLUMN mins FORMAT 99999 HEADING "Running|Mins"
COLUMN secs FORMAT 99999 HEADING "Running|Secs"
COLUMN user_name FORMAT a18 HEADING "User|Name"
COLUMN oracle_sid FORMAT 99999 HEADING "Oracle|SID"
COLUMN serial# FORMAT 9999999 HEADING "Serial|#"
COLUMN phase FORMAT a5 HEADING "Phase|Code"
COLUMN status FORMAT a6 HEADING "Status|Code"
SELECT
r.request_id reqid
, TO_CHAR(r.actual_start_date, 'DD-MON-YYYY HH24:MI:SS') start_time
, u.user_name user_name
, r.phase_code phase
, r.status_code status
, FLOOR(((SYSDATE - r.actual_start_date)*24*60*60)/3600) hrs
, FLOOR((((SYSDATE - r.actual_start_date)*24*60*60) - FLOOR(((SYSDATE - r.actual_start_date)*24*60*60)/3600)*3600)/60) mins
, ROUND((((SYSDATE - r.actual_start_date)*24*60*60) - FLOOR(((SYSDATE - r.actual_start_date)*24*60*60)/3600)*3600 - (FLOOR((((SYSDATE - r.actual_start_date)*24*60*60) - FLOOR(((SYSDATE - r.actual_start_date)*24*60*60)/3600)*3600)/60)*60) )) secs
, (SYSDATE - r.actual_start_date)*24*60 tot_mins
, /* p.concurrent_program_id progid,*/
DECODE( p.user_concurrent_program_name
, 'Request Set Stage', 'RSS - '||r.description
, 'Report Set', 'RS - '||r.description
, p.user_concurrent_program_name ) program_name
, s.sid oracle_sid
, s.serial#
FROM
v$session s
, apps.fnd_user u
, apps.fnd_concurrent_processes pr
, apps.fnd_concurrent_programs_vl p
, apps.fnd_concurrent_requests r
WHERE
s.process = pr.os_process_id
AND pr.concurrent_process_id = r.controlling_manager
AND r.phase_code = 'R' -- and r.status_code = 'R'
AND r.requested_by = u.user_id
AND p.concurrent_program_id = r.concurrent_program_id
ORDER BY
1
/
↧
erp_conc_manager_job_status.sql
↧
erp_conc_manager_user_query.sql
SET TERMOUT OFF;
COLUMN current_instance NEW_VALUE current_instance NOPRINT;
SELECT rpad(instance_name, 17) current_instance FROM v$instance;
SET TERMOUT ON;
PROMPT
PROMPT +------------------------------------------------------------------------+
PROMPT | Report : Concurrent Manager Processes |
PROMPT | Instance : ¤t_instance |
PROMPT +------------------------------------------------------------------------+
SET ECHO OFF
SET FEEDBACK 6
SET HEADING ON
SET LINESIZE 180
SET PAGESIZE 50000
SET TERMOUT ON
SET TIMING OFF
SET TRIMOUT ON
SET TRIMSPOOL ON
SET VERIFY OFF
CLEAR COLUMNS
CLEAR BREAKS
CLEAR COMPUTES
COLUMN oracle_process_id FORMAT 9999999 HEADING 'PID';
COLUMN session_id FORMAT 9999999 HEADING 'Session ID';
COLUMN oracle_id FORMAT 9999999 HEADING 'Oracle ID';
COLUMN os_process_id FORMAT a10 HEADING 'OS PID';
COLUMN request_id FORMAT 9999999999 HEADING 'Request ID';
COLUMN requested_by FORMAT 9999999 HEADING 'Requested By';
COLUMN status_code FORMAT a6 HEADING 'Status';
COLUMN completion_text FORMAT a15 HEADING 'Text';
COLUMN user_id FORMAT 9999999 HEADING 'User ID';
COLUMN user_name FORMAT a10 HEADING 'User Name';
SELECT
c.os_process_id
, a.oracle_id
, a.request_id
, a.requested_by
, b.user_name
, a.phase_code
, a.completion_text
FROM
applsys.fnd_concurrent_requests a
, applsys.fnd_user b
, applsys.fnd_concurrent_processes c
WHERE
a.requested_by = b.user_id
AND c.concurrent_process_id = a.controlling_manager
AND a.phase_code in ('R', 'T')
ORDER BY
c.os_process_id
/
↧
↧
sess_current_user_transactions.sql
SET TERMOUT OFF;
COLUMN current_instance NEW_VALUE current_instance NOPRINT;
SELECT rpad(instance_name, 17) current_instance FROM v$instance;
SET TERMOUT ON;
PROMPT
PROMPT +------------------------------------------------------------------------+
PROMPT | Report : User Transactions |
PROMPT | Instance : ¤t_instance |
PROMPT +------------------------------------------------------------------------+
SET ECHO OFF
SET FEEDBACK 6
SET HEADING ON
SET LINESIZE 256
SET PAGESIZE 50000
SET TERMOUT ON
SET TIMING OFF
SET TRIMOUT ON
SET TRIMSPOOL ON
SET VERIFY OFF
CLEAR COLUMNS
CLEAR BREAKS
CLEAR COMPUTES
COLUMN sid FORMAT 999999 HEADING 'SID'
COLUMN serial_id FORMAT 99999999 HEADING 'Serial ID'
COLUMN oracle_username FORMAT a18 HEADING 'Oracle User'
COLUMN logon_time FORMAT a18 HEADING 'Login Time'
COLUMN owner FORMAT a20 HEADING 'Owner'
COLUMN object_type FORMAT a11 HEADING 'Object Type'
COLUMN object_name FORMAT a25 HEADING 'Object Name'
COLUMN locked_mode FORMAT a11 HEADING 'Locked Mode'
prompt
prompt +----------------------------------------------------+
prompt | Table Locking Information |
prompt +----------------------------------------------------+
SELECT
a.session_id sid
, c.serial# serial_id
, a.oracle_username oracle_username
, TO_CHAR(
c.logon_time,'mm/dd/yy hh24:mi:ss'
) logon_time
, b.owner owner
, b.object_type object_type
, b.object_name object_name
, DECODE(
a.locked_mode
, 0, 'None'
, 1, 'Null'
, 2, 'Row-S'
, 3, 'Row-X'
, 4, 'Share'
, 5, 'S/Row-X'
, 6, 'Exclusive'
) locked_mode
FROM
v$locked_object a
, dba_objects b
, v$session c
WHERE
a.object_id = b.object_id
AND a.session_id = c.sid
ORDER BY
b.owner
, b.object_type
, b.object_name
/
prompt
prompt +----------------------------------------------------+
prompt | User Transactions Information |
prompt +----------------------------------------------------+
COLUMN sid FORMAT 999999 HEADING 'SID'
COLUMN serial_id FORMAT 99999999 HEADING 'Serial ID'
COLUMN session_status FORMAT a9 HEADING 'Status'
COLUMN oracle_username FORMAT a18 HEADING 'Oracle User'
COLUMN os_username FORMAT a18 HEADING 'O/S User'
COLUMN os_pid FORMAT a8 HEADING 'O/S PID'
COLUMN trnx_start_time FORMAT a18 HEADING 'Trnx Start Time'
COLUMN current_time FORMAT a18 HEADING 'Current Time'
COLUMN elapsed_time FORMAT 999999999.99 HEADING 'Elapsed(mins)'
COLUMN undo_name FORMAT a10 HEADING 'Undo Name' TRUNC
COLUMN number_of_undo_records FORMAT 999,999,999,999 HEADING '# Undo Records'
COLUMN used_undo_blks FORMAT 999,999,999 HEADING 'Used Undo Blks'
COLUMN used_undo_size FORMAT 999,999,999 HEADING 'Used Undo (MB)'
COLUMN logical_io_blks FORMAT 999,999,999 HEADING 'Logical I/O (Blks)'
COLUMN logical_io_size FORMAT 999,999,999,999 HEADING 'Logical I/O (MB)'
COLUMN physical_io_blks FORMAT 999,999,999 HEADING 'Physical I/O (Blks)'
COLUMN physical_io_size FORMAT 999,999,999,999 HEADING 'Physical I/O (MB)'
COLUMN session_program FORMAT a26 HEADING 'Session Program' TRUNC
SELECT
s.sid sid
, s.status session_status
, s.username oracle_username
, p.spid os_pid
, TO_CHAR(
TO_DATE(
b.start_time
,'mm/dd/yy hh24:mi:ss'
)
, 'mm/dd/yy hh24:mi:ss'
) trnx_start_time
, ROUND(60*24*(sysdate-to_date(b.start_time,'mm/dd/yy hh24:mi:ss')),2) elapsed_time
, c.segment_name undo_name
, b.used_urec number_of_undo_records
, (b.used_ublk * d.value)/1024/1024 used_undo_size
, (b.log_io*d.value)/1024/1024 logical_io_size
, (b.phy_io*d.value)/1024/1024 physical_io_size
, s.program session_program
FROM
v$session s
, v$transaction b
, dba_rollback_segs c
, v$parameter d
, v$process p
WHERE
b.ses_addr = s.saddr
AND b.xidusn = c.segment_id
AND d.name = 'db_block_size'
AND p.ADDR = s.PADDR
ORDER BY 1
/
↧
Article 5
XDB error in transportable table space:
Export: Release 11.2.0.4.0 - Production on Thu Aug 23 14:10:01 2018
Copyright (c) 1982, 2011, Oracle and/or its affiliates. All rights reserved.
;;;
Connected to: Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
With the Partitioning, OLAP, Data Mining and Real Application Testing options
Starting "SYS"."SYS_EXPORT_TRANSPORTABLE_01": /******** AS SYSDBA parfile=dp_ttsexp.par
Processing object type TRANSPORTABLE_EXPORT/PLUGTS_BLK
ORA-39127: unexpected error from call to export_string :=XDB.DBMS_CSX_ADMIN.INSTANCE_INFO_EXP('ATS_OTH_BRAND_CHGS_XML','ATS',0,1,'11.02.00.00.00',newblock)
ORA-06502: PL/SQL: numeric or value error: character string buffer too small
ORA-06512: at line 1
ORA-06512: at "SYS.DBMS_METADATA", line 9876
ORA-39127: unexpected error from call to export_string :=XDB.DBMS_CSX_ADMIN.INSTANCE_INFO_EXP('ATS_OTH_BRAND_CHGS_XML_BKP','FDBA',0,1,'11.02.00.00.00',newblock)
ORA-06502: PL/SQL: numeric or value error: character string buffer too small
ORA-06512: at line 1
ORA-06512: at "SYS.DBMS_METADATA", line 9876
ORA-39127: unexpected error from call to export_string :=XDB.DBMS_CSX_ADMIN.INSTANCE_INFO_EXP('WWV_FLOW_COLLECTION_MEMBERS$','APEX_050000',0,1,'11.02.00.00.00',newblock)
ORA-06502: PL/SQL: numeric or value error: character string buffer too small
ORA-06512: at line 1
ORA-06512: at "SYS.DBMS_METADATA", line 9876
Processing object type TRANSPORTABLE_EXPORT/PROCACT_INSTANCE
Processing object type TRANSPORTABLE_EXPORT/TABLE
Processing object type TRANSPORTABLE_EXPORT/PRE_TABLE_ACTION
TTS will not be successful until the XDB error is resolved.
So we need to uninstall XDB and then again install XDB once tt is done.
Uinstalling XDB:
XDB Removal
The catnoqm.sql script drops XDB.
spool xdb_removal.log
set echo on;
connect / as sysdba
shutdown immediate;
startup
@?/rdbms/admin/catnoqm.sql
@?/rdbms/admin/catproc.sql
@?/rdbms/admin/utlrp.sql
set pagesize 1000
col owner format a8
col object_name format a35
select owner, object_name, object_type, status
from dba_objects
where status = 'INVALID' and owner = 'SYS';
spool off;
Then again start the import.
Export: Release 11.2.0.4.0 - Production on Thu Aug 23 14:10:01 2018
Copyright (c) 1982, 2011, Oracle and/or its affiliates. All rights reserved.
;;;
Connected to: Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
With the Partitioning, OLAP, Data Mining and Real Application Testing options
Starting "SYS"."SYS_EXPORT_TRANSPORTABLE_01": /******** AS SYSDBA parfile=dp_ttsexp.par
Processing object type TRANSPORTABLE_EXPORT/PLUGTS_BLK
ORA-39127: unexpected error from call to export_string :=XDB.DBMS_CSX_ADMIN.INSTANCE_INFO_EXP('ATS_OTH_BRAND_CHGS_XML','ATS',0,1,'11.02.00.00.00',newblock)
ORA-06502: PL/SQL: numeric or value error: character string buffer too small
ORA-06512: at line 1
ORA-06512: at "SYS.DBMS_METADATA", line 9876
ORA-39127: unexpected error from call to export_string :=XDB.DBMS_CSX_ADMIN.INSTANCE_INFO_EXP('ATS_OTH_BRAND_CHGS_XML_BKP','FDBA',0,1,'11.02.00.00.00',newblock)
ORA-06502: PL/SQL: numeric or value error: character string buffer too small
ORA-06512: at line 1
ORA-06512: at "SYS.DBMS_METADATA", line 9876
ORA-39127: unexpected error from call to export_string :=XDB.DBMS_CSX_ADMIN.INSTANCE_INFO_EXP('WWV_FLOW_COLLECTION_MEMBERS$','APEX_050000',0,1,'11.02.00.00.00',newblock)
ORA-06502: PL/SQL: numeric or value error: character string buffer too small
ORA-06512: at line 1
ORA-06512: at "SYS.DBMS_METADATA", line 9876
Processing object type TRANSPORTABLE_EXPORT/PROCACT_INSTANCE
Processing object type TRANSPORTABLE_EXPORT/TABLE
Processing object type TRANSPORTABLE_EXPORT/PRE_TABLE_ACTION
TTS will not be successful until the XDB error is resolved.
So we need to uninstall XDB and then again install XDB once tt is done.
Uinstalling XDB:
XDB Removal
The catnoqm.sql script drops XDB.
spool xdb_removal.log
set echo on;
connect / as sysdba
shutdown immediate;
startup
@?/rdbms/admin/catnoqm.sql
@?/rdbms/admin/catproc.sql
@?/rdbms/admin/utlrp.sql
set pagesize 1000
col owner format a8
col object_name format a35
select owner, object_name, object_type, status
from dba_objects
where status = 'INVALID' and owner = 'SYS';
spool off;
Then again start the import.
↧
Article 4
ORA-39920: Rollback segment BIG_RBS1 in tablespace not allowed in transportable set:
oracle@dbatlsfnydev01:/ATLDEV12c/bkp$ expdp parfile=dp_ttsexp.par
Export: Release 11.2.0.4.0 - Production on Fri Jul 13 11:35:36 2018
Copyright (c) 1982, 2011, Oracle and/or its affiliates. All rights reserved.
Username: /as sysdba
Connected to: Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
With the Partitioning, OLAP, Data Mining and Real Application Testing options
Starting "SYS"."SYS_EXPORT_TRANSPORTABLE_01": /******** AS SYSDBA parfile=dp_ttsexp.par
ORA-39123: Data Pump transportable tablespace job aborted
ORA-39187: The transportable set is not self-contained, violation list is
ORA-39920: Rollback segment BIG_RBS1 in tablespace BATCH_RBS not allowed in transportable set.
ORA-39920: Rollback segment BIG_RBS2 in tablespace BATCH_RBS not allowed in transportable set.
ORA-39920: Rollback segment BIG_RBS3 in tablespace BATCH_RBS not allowed in transportable set.
ORA-39920: Rollback segment R71 in tablespace BATCH_RBS not allowed in transportable set.
ORA-39920: Rollback segment R72 in tablespace BATCH_RBS not allowed in transportable set.
ORA-39920: Rollback segment R73 in tablespace BATCH_RBS not allowed in transportable set.
ORA-39920: Rollback segment R74 in tablespace BATCH_RBS not allowed in transportable set.
ORA-39920: Rollback segment R75 in tablespace BATCH_RBS not allowed in transportable set.
ORA-39920: Rollback segment R76 in tablespace BATCH_RBS not allowed in transportable set.
Job "SYS"."SYS_EXPORT_TRANSPORTABLE_01" stopped due to fatal error at Mon Jul 16 10:43:57 2018 elapsed 0 00:03:33
The follwoing error occurs because BATCH_RBS is an undo tablespace.
We need to exclude the particular tablespace while doing transportable table and start the export.
oracle@dbatlsfnydev01:/ATLDEV12c/bkp$ expdp parfile=dp_ttsexp.par
Export: Release 11.2.0.4.0 - Production on Fri Jul 13 11:35:36 2018
Copyright (c) 1982, 2011, Oracle and/or its affiliates. All rights reserved.
Username: /as sysdba
Connected to: Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
With the Partitioning, OLAP, Data Mining and Real Application Testing options
Starting "SYS"."SYS_EXPORT_TRANSPORTABLE_01": /******** AS SYSDBA parfile=dp_ttsexp.par
ORA-39123: Data Pump transportable tablespace job aborted
ORA-39187: The transportable set is not self-contained, violation list is
ORA-39920: Rollback segment BIG_RBS1 in tablespace BATCH_RBS not allowed in transportable set.
ORA-39920: Rollback segment BIG_RBS2 in tablespace BATCH_RBS not allowed in transportable set.
ORA-39920: Rollback segment BIG_RBS3 in tablespace BATCH_RBS not allowed in transportable set.
ORA-39920: Rollback segment R71 in tablespace BATCH_RBS not allowed in transportable set.
ORA-39920: Rollback segment R72 in tablespace BATCH_RBS not allowed in transportable set.
ORA-39920: Rollback segment R73 in tablespace BATCH_RBS not allowed in transportable set.
ORA-39920: Rollback segment R74 in tablespace BATCH_RBS not allowed in transportable set.
ORA-39920: Rollback segment R75 in tablespace BATCH_RBS not allowed in transportable set.
ORA-39920: Rollback segment R76 in tablespace BATCH_RBS not allowed in transportable set.
Job "SYS"."SYS_EXPORT_TRANSPORTABLE_01" stopped due to fatal error at Mon Jul 16 10:43:57 2018 elapsed 0 00:03:33
The follwoing error occurs because BATCH_RBS is an undo tablespace.
We need to exclude the particular tablespace while doing transportable table and start the export.
↧
↧
Article 3
ORA-01110: data file 4: '/ATLDEV/oradata/atldevre/800P_tools01.dbf':
oracle@dbatlsfnydev01:/ATLDEV12c/bkp$ expdp parfile=dp_ttsexp.par
Export: Release 11.2.0.4.0 - Production on Fri Jul 13 11:35:36 2018
Copyright (c) 1982, 2011, Oracle and/or its affiliates. All rights reserved.
Username: /as sysdba
Connected to: Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
With the Partitioning, OLAP, Data Mining and Real Application Testing options
Starting "SYS"."SYS_EXPORT_TRANSPORTABLE_01": /******** AS SYSDBA parfile=dp_ttsexp.par
Connected to: Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
With the Partitioning, OLAP, Data Mining and Real Application Testing options
ORA-31626: job does not exist
ORA-31637: cannot create job SYS_EXPORT_SCHEMA_01 for user SYS
ORA-06512: at "SYS.DBMS_SYS_ERROR", line 95
ORA-06512: at "SYS.KUPV$FT_INT", line 798
ORA-39080: failed to create queues "KUPC$C_1_20180712152010" and "" for Data Pump job
ORA-06512: at "SYS.DBMS_SYS_ERROR", line 95
ORA-06512: at "SYS.KUPC$QUE_INT", line 1534
ORA-00604: error occurred at recursive SQL level 2
ORA-00372: file 4 cannot be modified at this time
ORA-01110: data file 4: '/ATLDEV/oradata/atldevre/800P_tools01.dbf'
Need to check for AQ objects in tools tablespaces:
select segment_name, tablespace_name from dba_segments where segment_name ='AQ$_QUEUES';
If it is table move to different tablespace.
ALTER TABLE AQ$_QUEUES MOVE TABLESAPCE SYSTEM;
If it is index rebuild it in different tablespace
ALTER INDEX AQ$_QUEUE_TABLES_PRIMARY REBUILD TABLESPACE SYSTEM;
oracle@dbatlsfnydev01:/ATLDEV12c/bkp$ expdp parfile=dp_ttsexp.par
Export: Release 11.2.0.4.0 - Production on Fri Jul 13 11:35:36 2018
Copyright (c) 1982, 2011, Oracle and/or its affiliates. All rights reserved.
Username: /as sysdba
Connected to: Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
With the Partitioning, OLAP, Data Mining and Real Application Testing options
Starting "SYS"."SYS_EXPORT_TRANSPORTABLE_01": /******** AS SYSDBA parfile=dp_ttsexp.par
Connected to: Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
With the Partitioning, OLAP, Data Mining and Real Application Testing options
ORA-31626: job does not exist
ORA-31637: cannot create job SYS_EXPORT_SCHEMA_01 for user SYS
ORA-06512: at "SYS.DBMS_SYS_ERROR", line 95
ORA-06512: at "SYS.KUPV$FT_INT", line 798
ORA-39080: failed to create queues "KUPC$C_1_20180712152010" and "" for Data Pump job
ORA-06512: at "SYS.DBMS_SYS_ERROR", line 95
ORA-06512: at "SYS.KUPC$QUE_INT", line 1534
ORA-00604: error occurred at recursive SQL level 2
ORA-00372: file 4 cannot be modified at this time
ORA-01110: data file 4: '/ATLDEV/oradata/atldevre/800P_tools01.dbf'
Need to check for AQ objects in tools tablespaces:
select segment_name, tablespace_name from dba_segments where segment_name ='AQ$_QUEUES';
If it is table move to different tablespace.
ALTER TABLE AQ$_QUEUES MOVE TABLESAPCE SYSTEM;
If it is index rebuild it in different tablespace
ALTER INDEX AQ$_QUEUE_TABLES_PRIMARY REBUILD TABLESPACE SYSTEM;
↧
Article 2
TRANSPORTABLE TABLE SPACE STEPS WITH IN SAME PLATFORM:
Step 1:
Stop materialized view refresh jobs
drop materialized view ATS_GEO_COMPONENT_MV preserve table;
Step 2:
Determine the endian format of both the source and destination databases with the following queries.
select * from v$transportable_platform order by platform_id;
SELECT tp.platform_id,substr(d.PLATFORM_NAME,1,30), ENDIAN_FORMAT FROM V$TRANSPORTABLE_PLATFORM tp, V$DATABASE d WHERE tp.PLATFORM_NAME = d.PLATFORM_NAME;
Step 3:
Run this script to list all of the tablespaces that are available to be transported
select tablespace_name, block_size from dba_tablespaces where tablespace_name not in ('SYSTEM','SYSAUX') and contents = 'PERMANENT';
Step 4:
Create valid directory for datapump
SELECT * FROM DBA_DIRECTORIES WHERE DIRECTORY_NAME = 'DATA';
OWNER DIRECTORY_NAME DIRECTORY_PATH
---------- ---------------- -----------------------------------
SYS DATA /ATLDEV12c/bkp
GRANT read, write on directory DATA TO sys;
Step 5:
Generate script to create TTS export, and TTS import Data Pump parameter files, using the below script.
REM
REM Create TTS Data Pump export and import PAR files
REM
set feedback off trimspool on
set serveroutput on size 1000000
REM
REM Data Pump parameter file for TTS export
REM
spool dp_ttsexp.par
declare
tsname varchar(30);
i number := 0;
begin
dbms_output.put_line('directory=DATA_PUMP_DIR');
dbms_output.put_line('dumpfile=dp_tts.dmp');
dbms_output.put_line('logfile=dp_ttsexp.log');
dbms_output.put_line('transport_full_check=no');
dbms_output.put('transport_tablespaces=');
for ts in
(select tablespace_name from dba_tablespaces
where tablespace_name not in ('SYSTEM','SYSAUX')
and contents = 'PERMANENT'
order by tablespace_name)
loop
if (i!=0) then
dbms_output.put_line(tsname||',');
end if;
i := 1;
tsname := ts.tablespace_name;
end loop;
dbms_output.put_line(tsname);
dbms_output.put_line('');
end;
/
spool off
REM
REM Data Pump parameter file for TTS import
REM
spool dp_ttsimp.par
declare
fname varchar(513);
i number := 0;
begin
dbms_output.put_line('directory=DATA_PUMP_DIR');
dbms_output.put_line('dumpfile=dp_tts.dmp');
dbms_output.put_line('logfile=dp_ttsimp.log');
dbms_output.put('transport_datafiles=');
for df in
(select file_name from dba_tablespaces a, dba_data_files b
where a.tablespace_name = b.tablespace_name
and a.tablespace_name not in ('SYSTEM','SYSAUX')
and contents = 'PERMANENT'
order by a.tablespace_name)
loop
if (i!=0) then
dbms_output.put_line(''''||fname||''',');
end if;
i := 1;
fname := df.file_name;
end loop;
dbms_output.put_line(''''||fname||'''');
dbms_output.put_line('');
end;
/
Step 6:
check for user-created objects in the system and sysaux tablespaces.
select owner, segment_name, segment_type from dba_segments where tablespace_name in ('SYSTEM', 'SYSAUX') and owner not in (select name
from system.logstdby$skip_support
where action=0);
Step 7:
Create the 'Create user script.
spool def_Tbs.sql
select 'CREATE USER '||username||' IDENTIFIED BY test DEFAULT TABLESPACE SYSTEM;' from dba_users where username not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC');
spool off
Step 8:
Check containment.
declare
checklist varchar2(4000);
i number := 0;
begin
for ts in
(select tablespace_name
from dba_tablespaces
where tablespace_name not in ('SYSTEM','SYSAUX') and contents = 'PERMANENT')
loop
if (i=0) then
checklist := ts.tablespace_name;
else
checklist := checklist||','||ts.tablespace_name;
end if;
i := 1;
end loop;
dbms_tts.transport_set_check(checklist,TRUE,TRUE);
end;
/
select * from transport_set_violations;
Step 9:
Export source metadata.
expdp DIRECTORY=DATA LOGFILE=dp_fullexp_meta.log DUMPFILE=dp_full.dmp FULL=y CONTENT=METADATA_ONLY
Step 10:
Check for tablespaces using a non-default block size
show parameter cache_size;
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
client_result_cache_size big integer 0
db_16k_cache_size big integer 0
db_2k_cache_size big integer 0
db_32k_cache_size big integer 1024M
db_4k_cache_size big integer 0
db_8k_cache_size big integer 0
db_cache_size big integer 16G
db_keep_cache_size big integer 16G
db_recycle_cache_size big integer 0
select tablespace_name, block_size from dba_tablespaces;
TABLESPACE_NAME BLOCK_SIZE
------------------------------ ----------
SYSTEM 8192
SYSAUX 8192
UNDOTBS1 8192
TEMP 8192
USERS 8192
USER_DATA 8192
BIG_DATA 32768
Step 11:
Create tablespace scripts:
Tablespace read only script.
set heading off
feedback off
trimspool on
linesize 500
spool tts_tsro.sql
prompt /* =================================== */
prompt /* Make all user tablespaces READ ONLY */
prompt /* =================================== */
select 'ALTER TABLESPACE ' || tablespace_name || ' READ ONLY;' from dba_tablespaces
where tablespace_name not in ('SYSTEM','SYSAUX')
and contents = 'PERMANENT';
spool off
Step 12:
Run read only script
@tts_tsro.sql
Step 13:
Tablespace read write script.
set heading off
feedback off
trimspool on
linesize 500
spool tts_tsrw.sql
prompt /* ==================================== */
prompt /* Make all user tablespaces READ WRITE */
prompt /* ==================================== */
select 'ALTER TABLESPACE ' || tablespace_name || ' READ WRITE;' from dba_tablespaces
where tablespace_name not in ('SYSTEM','SYSAUX')
and contents = 'PERMANENT';
spool off
Step 14:
Export the tablespaces.
EXPDP parfile=dp_ttsexp.par
Step 15:
Copy the following files to a place that is accessible to the destination database.
dp_ttsimp.par import parameter file
def_Tbs.sql create user script
dp_full.dmp metadata dump file
tts_tsrw.sql script to make tablespaces read write
Step 16:
Create directory in destination database.
SELECT * FROM DBA_DIRECTORIES WHERE DIRECTORY_NAME = 'DATA';
OWNER DIRECTORY_NAME DIRECTORY_PATH
---------- ---------------- -----------------------------------
SYS DATA /ATLDEV12c/bkp
GRANT read, write on directory DATA TO sys;
Step 17:
Add the db_32k_cache_size parameter to the target database:
ALTER SYSTEM SET db_32k_cache_size='1024M' SCOPE=BOTH;
Step 18:
Create users in the destination database using the following script.
@def_Tbs.sql
Step 19:
Import the tablespaces.
IMPDP parfile=dp_ttsimp.par
Step 20:
Make tablespace read write using the following script.
@tts_tsrw.sql;
Step 21:
Import source metadata to destination database.
impdp DIRECTORY=DATA LOGFILE=dp_fullexp_meta.log DUMPFILE=dp_full.dmp FULL=y CONTENT=METADATA_ONLY
Step 22:
Compile all the invalids.
@utlrp.sql
Step 23:
Check the object count on both matches the source database.
Step 1:
Stop materialized view refresh jobs
drop materialized view ATS_GEO_COMPONENT_MV preserve table;
Step 2:
Determine the endian format of both the source and destination databases with the following queries.
select * from v$transportable_platform order by platform_id;
SELECT tp.platform_id,substr(d.PLATFORM_NAME,1,30), ENDIAN_FORMAT FROM V$TRANSPORTABLE_PLATFORM tp, V$DATABASE d WHERE tp.PLATFORM_NAME = d.PLATFORM_NAME;
Step 3:
Run this script to list all of the tablespaces that are available to be transported
select tablespace_name, block_size from dba_tablespaces where tablespace_name not in ('SYSTEM','SYSAUX') and contents = 'PERMANENT';
Step 4:
Create valid directory for datapump
SELECT * FROM DBA_DIRECTORIES WHERE DIRECTORY_NAME = 'DATA';
OWNER DIRECTORY_NAME DIRECTORY_PATH
---------- ---------------- -----------------------------------
SYS DATA /ATLDEV12c/bkp
GRANT read, write on directory DATA TO sys;
Step 5:
Generate script to create TTS export, and TTS import Data Pump parameter files, using the below script.
REM
REM Create TTS Data Pump export and import PAR files
REM
set feedback off trimspool on
set serveroutput on size 1000000
REM
REM Data Pump parameter file for TTS export
REM
spool dp_ttsexp.par
declare
tsname varchar(30);
i number := 0;
begin
dbms_output.put_line('directory=DATA_PUMP_DIR');
dbms_output.put_line('dumpfile=dp_tts.dmp');
dbms_output.put_line('logfile=dp_ttsexp.log');
dbms_output.put_line('transport_full_check=no');
dbms_output.put('transport_tablespaces=');
for ts in
(select tablespace_name from dba_tablespaces
where tablespace_name not in ('SYSTEM','SYSAUX')
and contents = 'PERMANENT'
order by tablespace_name)
loop
if (i!=0) then
dbms_output.put_line(tsname||',');
end if;
i := 1;
tsname := ts.tablespace_name;
end loop;
dbms_output.put_line(tsname);
dbms_output.put_line('');
end;
/
spool off
REM
REM Data Pump parameter file for TTS import
REM
spool dp_ttsimp.par
declare
fname varchar(513);
i number := 0;
begin
dbms_output.put_line('directory=DATA_PUMP_DIR');
dbms_output.put_line('dumpfile=dp_tts.dmp');
dbms_output.put_line('logfile=dp_ttsimp.log');
dbms_output.put('transport_datafiles=');
for df in
(select file_name from dba_tablespaces a, dba_data_files b
where a.tablespace_name = b.tablespace_name
and a.tablespace_name not in ('SYSTEM','SYSAUX')
and contents = 'PERMANENT'
order by a.tablespace_name)
loop
if (i!=0) then
dbms_output.put_line(''''||fname||''',');
end if;
i := 1;
fname := df.file_name;
end loop;
dbms_output.put_line(''''||fname||'''');
dbms_output.put_line('');
end;
/
Step 6:
check for user-created objects in the system and sysaux tablespaces.
select owner, segment_name, segment_type from dba_segments where tablespace_name in ('SYSTEM', 'SYSAUX') and owner not in (select name
from system.logstdby$skip_support
where action=0);
Step 7:
Create the 'Create user script.
spool def_Tbs.sql
select 'CREATE USER '||username||' IDENTIFIED BY test DEFAULT TABLESPACE SYSTEM;' from dba_users where username not in ('SYS', 'SYSTEM', 'DBSNMP','SYSMAN','OUTLN','MDSYS','ORDSYS','EXFSYS','DMSYS','WMSYS','CTXSYS','ANONYMOUS','XDB','ORDPLUGINS','OLAPSYS','PUBLIC');
spool off
Step 8:
Check containment.
declare
checklist varchar2(4000);
i number := 0;
begin
for ts in
(select tablespace_name
from dba_tablespaces
where tablespace_name not in ('SYSTEM','SYSAUX') and contents = 'PERMANENT')
loop
if (i=0) then
checklist := ts.tablespace_name;
else
checklist := checklist||','||ts.tablespace_name;
end if;
i := 1;
end loop;
dbms_tts.transport_set_check(checklist,TRUE,TRUE);
end;
/
select * from transport_set_violations;
Step 9:
Export source metadata.
expdp DIRECTORY=DATA LOGFILE=dp_fullexp_meta.log DUMPFILE=dp_full.dmp FULL=y CONTENT=METADATA_ONLY
Step 10:
Check for tablespaces using a non-default block size
show parameter cache_size;
NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
client_result_cache_size big integer 0
db_16k_cache_size big integer 0
db_2k_cache_size big integer 0
db_32k_cache_size big integer 1024M
db_4k_cache_size big integer 0
db_8k_cache_size big integer 0
db_cache_size big integer 16G
db_keep_cache_size big integer 16G
db_recycle_cache_size big integer 0
select tablespace_name, block_size from dba_tablespaces;
TABLESPACE_NAME BLOCK_SIZE
------------------------------ ----------
SYSTEM 8192
SYSAUX 8192
UNDOTBS1 8192
TEMP 8192
USERS 8192
USER_DATA 8192
BIG_DATA 32768
Step 11:
Create tablespace scripts:
Tablespace read only script.
set heading off
feedback off
trimspool on
linesize 500
spool tts_tsro.sql
prompt /* =================================== */
prompt /* Make all user tablespaces READ ONLY */
prompt /* =================================== */
select 'ALTER TABLESPACE ' || tablespace_name || ' READ ONLY;' from dba_tablespaces
where tablespace_name not in ('SYSTEM','SYSAUX')
and contents = 'PERMANENT';
spool off
Step 12:
Run read only script
@tts_tsro.sql
Step 13:
Tablespace read write script.
set heading off
feedback off
trimspool on
linesize 500
spool tts_tsrw.sql
prompt /* ==================================== */
prompt /* Make all user tablespaces READ WRITE */
prompt /* ==================================== */
select 'ALTER TABLESPACE ' || tablespace_name || ' READ WRITE;' from dba_tablespaces
where tablespace_name not in ('SYSTEM','SYSAUX')
and contents = 'PERMANENT';
spool off
Step 14:
Export the tablespaces.
EXPDP parfile=dp_ttsexp.par
Step 15:
Copy the following files to a place that is accessible to the destination database.
dp_ttsimp.par import parameter file
def_Tbs.sql create user script
dp_full.dmp metadata dump file
tts_tsrw.sql script to make tablespaces read write
Step 16:
Create directory in destination database.
SELECT * FROM DBA_DIRECTORIES WHERE DIRECTORY_NAME = 'DATA';
OWNER DIRECTORY_NAME DIRECTORY_PATH
---------- ---------------- -----------------------------------
SYS DATA /ATLDEV12c/bkp
GRANT read, write on directory DATA TO sys;
Step 17:
Add the db_32k_cache_size parameter to the target database:
ALTER SYSTEM SET db_32k_cache_size='1024M' SCOPE=BOTH;
Step 18:
Create users in the destination database using the following script.
@def_Tbs.sql
Step 19:
Import the tablespaces.
IMPDP parfile=dp_ttsimp.par
Step 20:
Make tablespace read write using the following script.
@tts_tsrw.sql;
Step 21:
Import source metadata to destination database.
impdp DIRECTORY=DATA LOGFILE=dp_fullexp_meta.log DUMPFILE=dp_full.dmp FULL=y CONTENT=METADATA_ONLY
Step 22:
Compile all the invalids.
@utlrp.sql
Step 23:
Check the object count on both matches the source database.
↧
Article 1
STEPS TO UPGRADE TIME ZONE AFTER UPGRADING FROM 11.2.0.4 TO 12.2.0 DB:
conn / as sysdba
purge dba_recyclebin;
EXEC DBMS_APPLICATION_INFO.SET_CLIENT_INFO('upg_tzv') ;
alter session set "_with_subquery"=materialize;
alter session set "_simple_view_merging"=TRUE;
exec DBMS_DST.BEGIN_PREPARE(26);
SELECT PROPERTY_NAME, SUBSTR(property_value, 1, 30) value FROM DATABASE_PROPERTIES WHERE PROPERTY_NAME LIKE 'DST_%' ORDER BY PROPERTY_NAME;
TRUNCATE TABLE SYS.DST$TRIGGER_TABLE;
TRUNCATE TABLE sys.dst$affected_tables;
TRUNCATE TABLE sys.dst$error_table;
set serveroutput on
BEGIN
DBMS_DST.FIND_AFFECTED_TABLES (affected_tables => 'sys.dst$affected_tables', log_errors => TRUE, log_errors_table => 'sys.dst$error_table');
END;
/
SELECT * FROM sys.dst$affected_tables;
SELECT * FROM sys.dst$error_table;
EXEC DBMS_DST.END_PREPARE;
SELECT PROPERTY_NAME, SUBSTR(property_value, 1, 30) value FROM DATABASE_PROPERTIES WHERE PROPERTY_NAME LIKE 'DST_%' ORDER BY PROPERTY_NAME;
conn / as sysdba
shutdown immediate;
startup upgrade;
set serveroutput on
SELECT PROPERTY_NAME, SUBSTR(property_value, 1, 30) value FROM DATABASE_PROPERTIES WHERE PROPERTY_NAME LIKE 'DST_%' ORDER BY PROPERTY_NAME;
purge dba_recyclebin;
TRUNCATE TABLE SYS.DST$TRIGGER_TABLE;
TRUNCATE TABLE sys.dst$affected_tables;
TRUNCATE TABLE sys.dst$error_table;
EXEC DBMS_APPLICATION_INFO.SET_CLIENT_INFO('upg_tzv')
alter session set "_with_subquery"=materialize;
alter session set "_simple_view_merging"=TRUE;
EXEC DBMS_DST.BEGIN_UPGRADE(26);
SELECT * FROM sys.dst$error_table;
SELECT PROPERTY_NAME, SUBSTR(property_value, 1, 30) value FROM DATABASE_PROPERTIES WHERE PROPERTY_NAME LIKE 'DST_%' ORDER BY PROPERTY_NAME;
SELECT OWNER, TABLE_NAME, UPGRADE_IN_PROGRESS FROM ALL_TSTZ_TABLES where UPGRADE_IN_PROGRESS='YES';
shutdown immediate
startup
alter session set "_with_subquery"=materialize;
alter session set "_simple_view_merging"=TRUE;
" set serveroutput on
VAR numfail number
BEGIN
DBMS_DST.UPGRADE_DATABASE(:numfail, parallel => TRUE, log_errors => TRUE, log_errors_table => 'SYS.DST$ERROR_TABLE', log_triggers_table => 'SYS.DST$TRIGGER_TABLE', error_on_overlap_time => FALSE, error_on_nonexisting_time => FALSE);
DBMS_OUTPUT.PUT_LINE('Failures:'|| :numfail);
END;
/"
SELECT * FROM sys.dst$error_table;
" VAR fail number
BEGIN
DBMS_DST.END_UPGRADE(:fail);
DBMS_OUTPUT.PUT_LINE('Failures:'|| :fail);
END;
/"
SELECT PROPERTY_NAME, SUBSTR(property_value, 1, 30) value FROM DATABASE_PROPERTIES WHERE PROPERTY_NAME LIKE 'DST_%' ORDER BY PROPERTY_NAME;
SELECT * FROM v$timezone_file;
connect / as sysdba
SELECT VERSION FROM v$timezone_file;
select TZ_VERSION from registry$database;
conn / as sysdba
update registry$database set TZ_VERSION = (select version FROM v$timezone_file);
commit;
conn / as sysdba
purge dba_recyclebin;
EXEC DBMS_APPLICATION_INFO.SET_CLIENT_INFO('upg_tzv') ;
alter session set "_with_subquery"=materialize;
alter session set "_simple_view_merging"=TRUE;
exec DBMS_DST.BEGIN_PREPARE(26);
SELECT PROPERTY_NAME, SUBSTR(property_value, 1, 30) value FROM DATABASE_PROPERTIES WHERE PROPERTY_NAME LIKE 'DST_%' ORDER BY PROPERTY_NAME;
TRUNCATE TABLE SYS.DST$TRIGGER_TABLE;
TRUNCATE TABLE sys.dst$affected_tables;
TRUNCATE TABLE sys.dst$error_table;
set serveroutput on
BEGIN
DBMS_DST.FIND_AFFECTED_TABLES (affected_tables => 'sys.dst$affected_tables', log_errors => TRUE, log_errors_table => 'sys.dst$error_table');
END;
/
SELECT * FROM sys.dst$affected_tables;
SELECT * FROM sys.dst$error_table;
EXEC DBMS_DST.END_PREPARE;
SELECT PROPERTY_NAME, SUBSTR(property_value, 1, 30) value FROM DATABASE_PROPERTIES WHERE PROPERTY_NAME LIKE 'DST_%' ORDER BY PROPERTY_NAME;
conn / as sysdba
shutdown immediate;
startup upgrade;
set serveroutput on
SELECT PROPERTY_NAME, SUBSTR(property_value, 1, 30) value FROM DATABASE_PROPERTIES WHERE PROPERTY_NAME LIKE 'DST_%' ORDER BY PROPERTY_NAME;
purge dba_recyclebin;
TRUNCATE TABLE SYS.DST$TRIGGER_TABLE;
TRUNCATE TABLE sys.dst$affected_tables;
TRUNCATE TABLE sys.dst$error_table;
EXEC DBMS_APPLICATION_INFO.SET_CLIENT_INFO('upg_tzv')
alter session set "_with_subquery"=materialize;
alter session set "_simple_view_merging"=TRUE;
EXEC DBMS_DST.BEGIN_UPGRADE(26);
SELECT * FROM sys.dst$error_table;
SELECT PROPERTY_NAME, SUBSTR(property_value, 1, 30) value FROM DATABASE_PROPERTIES WHERE PROPERTY_NAME LIKE 'DST_%' ORDER BY PROPERTY_NAME;
SELECT OWNER, TABLE_NAME, UPGRADE_IN_PROGRESS FROM ALL_TSTZ_TABLES where UPGRADE_IN_PROGRESS='YES';
shutdown immediate
startup
alter session set "_with_subquery"=materialize;
alter session set "_simple_view_merging"=TRUE;
" set serveroutput on
VAR numfail number
BEGIN
DBMS_DST.UPGRADE_DATABASE(:numfail, parallel => TRUE, log_errors => TRUE, log_errors_table => 'SYS.DST$ERROR_TABLE', log_triggers_table => 'SYS.DST$TRIGGER_TABLE', error_on_overlap_time => FALSE, error_on_nonexisting_time => FALSE);
DBMS_OUTPUT.PUT_LINE('Failures:'|| :numfail);
END;
/"
SELECT * FROM sys.dst$error_table;
" VAR fail number
BEGIN
DBMS_DST.END_UPGRADE(:fail);
DBMS_OUTPUT.PUT_LINE('Failures:'|| :fail);
END;
/"
SELECT PROPERTY_NAME, SUBSTR(property_value, 1, 30) value FROM DATABASE_PROPERTIES WHERE PROPERTY_NAME LIKE 'DST_%' ORDER BY PROPERTY_NAME;
SELECT * FROM v$timezone_file;
connect / as sysdba
SELECT VERSION FROM v$timezone_file;
select TZ_VERSION from registry$database;
conn / as sysdba
update registry$database set TZ_VERSION = (select version FROM v$timezone_file);
commit;
↧
Article 0
QUERY to identify sql plan changes:
SET SERVEROUTPUT ON
DECLARE
v_count number := 0;
CURSOR SQLID IS
SELECT SQL_ID, COUNT(DISTINCT PLAN_HASH_VALUE) cnt
FROM DBA_HIST_SQLSTAT STAT, DBA_HIST_SNAPSHOT SS
WHERE STAT.SNAP_ID = SS.SNAP_ID AND
SS.DBID = STAT.DBID AND
SS.INSTANCE_NUMBER = STAT.INSTANCE_NUMBER AND
SS.BEGIN_INTERVAL_TIME > = SYSDATE-7 AND
ss.END_INTERVAL_TIME<=sysdate AND
STAT.PLAN_HASH_VALUE <> 0 AND
STAT.executions_delta > 0 AND
STAT.PARSING_SCHEMA_NAME NOT IN ('SYS','SYSTEM')
GROUP BY SQL_ID
ORDER BY 1;
BEGIN
FOR I IN SQLID
loop
IF I.cnt > 1 THEN
DBMS_OUTPUT.PUT_LINE ('We see multiple plan for this sql :- '||I.sql_id||' . Here Considered last 7 days. Please login to DB and check any difference in ETIME by running change.sql.');
--ELSE
/** DBMS_OUTPUT.PUT_LINE ('This sqlid '||I.sql_id||' has more than one plan, but the plan was not changed since last 7days.');**/
END IF;
end loop;
END;
/
SET SERVEROUTPUT ON
DECLARE
v_count number := 0;
CURSOR SQLID IS
SELECT SQL_ID, COUNT(DISTINCT PLAN_HASH_VALUE) cnt
FROM DBA_HIST_SQLSTAT STAT, DBA_HIST_SNAPSHOT SS
WHERE STAT.SNAP_ID = SS.SNAP_ID AND
SS.DBID = STAT.DBID AND
SS.INSTANCE_NUMBER = STAT.INSTANCE_NUMBER AND
SS.BEGIN_INTERVAL_TIME > = SYSDATE-7 AND
ss.END_INTERVAL_TIME<=sysdate AND
STAT.PLAN_HASH_VALUE <> 0 AND
STAT.executions_delta > 0 AND
STAT.PARSING_SCHEMA_NAME NOT IN ('SYS','SYSTEM')
GROUP BY SQL_ID
ORDER BY 1;
BEGIN
FOR I IN SQLID
loop
IF I.cnt > 1 THEN
DBMS_OUTPUT.PUT_LINE ('We see multiple plan for this sql :- '||I.sql_id||' . Here Considered last 7 days. Please login to DB and check any difference in ETIME by running change.sql.');
--ELSE
/** DBMS_OUTPUT.PUT_LINE ('This sqlid '||I.sql_id||' has more than one plan, but the plan was not changed since last 7days.');**/
END IF;
end loop;
END;
/
↧
↧
LsInventorySession failed: RawInventory gets null OracleHomeInfo
Issue:
LsInventorySession failed: RawInventory gets null OracleHomeInfo
“Opatch lsinventory” command failed with the error as “LsInventorySession failed: RawInventory gets null OracleHomeInfo” and “OPatch failed with error code 73”.
Error:
[oracle@XXXX.com] $ env | grep ORA
ORACLE_BASE=/opt/oracle
ORACLE_SID=xxxx
ORACLE_TERM=xterm
ORACLE_HOME=/opt/oracle/product/12.1.0.2
ORAENV_ASK=YES
[oracle@XXXX.com] $
[oracle@XXXX.com] $ cd $ORACLE_HOME/OPatch
[oracle@XXXX.com] $ pwd
/opt/oracle/product/12.1.0.2/OPatch
[oracle@XXXX.com] $ opatch lsinventory
Oracle Interim Patch Installer version 12.2.0.1.8
Copyright (c) 2016, Oracle Corporation. All rights reserved.
Oracle Home : /opt/oracle/product/12.1.0.2
Central Inventory : /opt/oracle/oraInventory
from : /opt/oracle/product/12.1.0.2/oraInst.loc
OPatch version : 12.2.0.1.11
OUI version : 12.1.0.2.0
Log file location : /opt/oracle/product/12.1.0.2/cfgtoollogs/opatch/opatch2016-11-23_11-15-31AM_1.log
List of Homes on this system:
Home name= OraDb11g_home1, Location= "/opt/oracle/product/11.2.0.1"
Home name= OraDb11g_home2, Location= "/opt/oracle/product/11.2.0.1se"
Home name= OraDb11g_home3, Location= "/opt/oracle/product/11.2.0.3"
Home name= OraDb11g_home4, Location= "/opt/oracle/product/11.2.0.3se"
Home name= OraDb11g_home5, Location= "/opt/oracle/product/11.2.0.4"
LsInventorySession failed: RawInventory gets null OracleHomeInfo
OPatch failed with error code 73
FIX:
attach the oracle_home by using below command in mentioned location
[oracle@XXXX.com] $ cd $ORACLE_HOME/oui/bin
[oracle@XXXX.com] $ pwd
/opt/oracle/product/12.1.0.2/oui/bin
[oracle@XXXX.com] $ cat attachHome.sh
#!/bin/sh
OHOME=/opt/oracle/product/12.1.0.2
OHOMENAME=OraDB12Home1
CUR_DIR=`pwd`
cd $OHOME/oui/bin
./runInstaller -detachhome ORACLE_HOME=$OHOME ORACLE_HOME_NAME=$OHOMENAME $* > /dev/null 2>&1
./runInstaller -attachhome ORACLE_HOME=$OHOME ORACLE_HOME_NAME=$OHOMENAME $*
cd $CUR_DIR
[oracle@XXXX.com] $
[oracle@XXXX.com] $ ./attachHome.sh
Starting Oracle Universal Installer...
Checking swap space: must be greater than 500 MB. Actual 8192 MB Passed
The inventory pointer is located at /var/opt/oracle/oraInst.loc
'AttachHome' was successful.
LsInventorySession failed: RawInventory gets null OracleHomeInfo
“Opatch lsinventory” command failed with the error as “LsInventorySession failed: RawInventory gets null OracleHomeInfo” and “OPatch failed with error code 73”.
Error:
[oracle@XXXX.com] $ env | grep ORA
ORACLE_BASE=/opt/oracle
ORACLE_SID=xxxx
ORACLE_TERM=xterm
ORACLE_HOME=/opt/oracle/product/12.1.0.2
ORAENV_ASK=YES
[oracle@XXXX.com] $
[oracle@XXXX.com] $ cd $ORACLE_HOME/OPatch
[oracle@XXXX.com] $ pwd
/opt/oracle/product/12.1.0.2/OPatch
[oracle@XXXX.com] $ opatch lsinventory
Oracle Interim Patch Installer version 12.2.0.1.8
Copyright (c) 2016, Oracle Corporation. All rights reserved.
Oracle Home : /opt/oracle/product/12.1.0.2
Central Inventory : /opt/oracle/oraInventory
from : /opt/oracle/product/12.1.0.2/oraInst.loc
OPatch version : 12.2.0.1.11
OUI version : 12.1.0.2.0
Log file location : /opt/oracle/product/12.1.0.2/cfgtoollogs/opatch/opatch2016-11-23_11-15-31AM_1.log
List of Homes on this system:
Home name= OraDb11g_home1, Location= "/opt/oracle/product/11.2.0.1"
Home name= OraDb11g_home2, Location= "/opt/oracle/product/11.2.0.1se"
Home name= OraDb11g_home3, Location= "/opt/oracle/product/11.2.0.3"
Home name= OraDb11g_home4, Location= "/opt/oracle/product/11.2.0.3se"
Home name= OraDb11g_home5, Location= "/opt/oracle/product/11.2.0.4"
LsInventorySession failed: RawInventory gets null OracleHomeInfo
OPatch failed with error code 73
FIX:
attach the oracle_home by using below command in mentioned location
[oracle@XXXX.com] $ cd $ORACLE_HOME/oui/bin
[oracle@XXXX.com] $ pwd
/opt/oracle/product/12.1.0.2/oui/bin
[oracle@XXXX.com] $ cat attachHome.sh
#!/bin/sh
OHOME=/opt/oracle/product/12.1.0.2
OHOMENAME=OraDB12Home1
CUR_DIR=`pwd`
cd $OHOME/oui/bin
./runInstaller -detachhome ORACLE_HOME=$OHOME ORACLE_HOME_NAME=$OHOMENAME $* > /dev/null 2>&1
./runInstaller -attachhome ORACLE_HOME=$OHOME ORACLE_HOME_NAME=$OHOMENAME $*
cd $CUR_DIR
[oracle@XXXX.com] $
[oracle@XXXX.com] $ ./attachHome.sh
Starting Oracle Universal Installer...
Checking swap space: must be greater than 500 MB. Actual 8192 MB Passed
The inventory pointer is located at /var/opt/oracle/oraInst.loc
'AttachHome' was successful.
↧
Installation and Configuration of VNC Server in RHEL 7.5
1. Install the VNC Server.
# yum install tigervnc-server
2. Create a new configuration file for each of the display numbers you want to enable. In the following case, I am setting up the display number ":3". Notice how the display number is included in the configuration file name.
# cp /lib/systemd/system/vncserver@.service /lib/systemd/system/vncserver@:3.service
3. Edit the new configuration file, amending the user and startup arguments as necessary. An example of the changed lines is shown below. All other lines should be unmodified. The geometry is set below, but this line doesn't have to be modified if the default geometry of "1280x1024" is acceptable.
User=oracle
ExecStart=/usr/bin/vncserver %i -geometry 1440x1080
PIDFile=/home/oracle/.vnc/%H%i.pid
4. Run the following command.
# systemctl daemon-reload
5. Run the following command.
# vncserver :2 -geometry 1280x720
6. Set the VNC password for the user defined in the new configuration file.
# su - oracle
$ vncpasswd
Password:
Verify:
Would you like to enter a view-only password (y/n)? n
$ exit
logout
7. Enable the service for autostart and start the service.
# systemctl enable vncserver@:3.service
# systemctl start vncserver@:3.service
8. You should now be able to use a VNC viewer to connect to system using the display number and password defined. Use the following commands to stop the service and disable autostart.
# systemctl stop vncserver@:3.service
# systemctl disable vncserver@:3.service
↧
Query to find idle time of oracle applications users
select
disabled_flag,
to_char(first_connect,'MM/DD/YYYY HH:MI:SS') Start_Time,
to_char(sysdate,'HH:MI:SS') Current_Time,
USER_NAME,
session_id,
(SYSDATE-last_connect)*24*60 Mins_Idle
from
icx.ICX_SESSIONS a, apps.fnd_User b
where
a.user_id=b.user_id
and last_connect > sysdate-1/24;
disabled_flag,
to_char(first_connect,'MM/DD/YYYY HH:MI:SS') Start_Time,
to_char(sysdate,'HH:MI:SS') Current_Time,
USER_NAME,
session_id,
(SYSDATE-last_connect)*24*60 Mins_Idle
from
icx.ICX_SESSIONS a, apps.fnd_User b
where
a.user_id=b.user_id
and last_connect > sysdate-1/24;
↧
Query To Find Out Workflow Error out Process Activity Status for A Particular Item Type/Item Key
SELECT execution_time,
to_char(ias.begin_date,
'DD-MON-RR HH24:MI:SS') begin_date,
ap.display_name || '/' || ac.display_name activity,
wias.activity_status status,
wias.activity_result_code RESULT,
wias.assigned_user ass_user
FROM wf_item_activity_statuses wias,
wf_process_activities wpa,
wf_activities_vl wac,
wf_activities_vl wap,
wf_items wi
WHERE wias.item_type = wi.item_type
AND wias.item_key = wi.item_key
AND wias.process_activity = wpa.instance_id
AND wpa.activity_name = wac.name
AND wpa.activity_item_type = wac.item_type
AND wpa.process_name = wap.name
AND wpa.process_item_type = wap.item_type
AND wpa.process_version = wap.version
AND wi.parent_item_type = '&item_type'
AND wi.parent_item_key = '&item_key'
AND wi.begin_date >= wac.begin_date
AND wi.begin_date < nvl(wac.end_date,
wi.begin_date + 1)
UNION ALL
SELECT execution_time,
to_char(wias.begin_date,
'DD-MON-RR HH24:MI:SS') begin_date,
wap.display_name || '/' || wac.display_name activity,
wias.activity_status status,
wias.activity_result_code RESULT,
wias.assigned_user ass_user
FROM wf_item_activity_statuses_h wias,
wf_process_activities wpa,
wf_activities_vl wac,
wf_activities_vl wap,
wf_items wi
WHERE wias.item_type = wi.item_type
AND wias.item_key = wi.item_key
AND wias.process_activity = wpa.instance_id
AND wpa.activity_name = wac.name
AND wpa.activity_item_type = wac.item_type
AND wpa.process_name = wap.name
AND wpa.process_item_type = wap.item_type
AND wpa.process_version = wap.version
AND wi.parent_item_type = '&item_type'
AND wi.parent_item_key = '&item_key'
AND wi.begin_date >= wac.begin_date
AND wi.begin_date < nvl(wac.end_date,
wi.begin_date + 1)
ORDER BY 2,
1
/
to_char(ias.begin_date,
'DD-MON-RR HH24:MI:SS') begin_date,
ap.display_name || '/' || ac.display_name activity,
wias.activity_status status,
wias.activity_result_code RESULT,
wias.assigned_user ass_user
FROM wf_item_activity_statuses wias,
wf_process_activities wpa,
wf_activities_vl wac,
wf_activities_vl wap,
wf_items wi
WHERE wias.item_type = wi.item_type
AND wias.item_key = wi.item_key
AND wias.process_activity = wpa.instance_id
AND wpa.activity_name = wac.name
AND wpa.activity_item_type = wac.item_type
AND wpa.process_name = wap.name
AND wpa.process_item_type = wap.item_type
AND wpa.process_version = wap.version
AND wi.parent_item_type = '&item_type'
AND wi.parent_item_key = '&item_key'
AND wi.begin_date >= wac.begin_date
AND wi.begin_date < nvl(wac.end_date,
wi.begin_date + 1)
UNION ALL
SELECT execution_time,
to_char(wias.begin_date,
'DD-MON-RR HH24:MI:SS') begin_date,
wap.display_name || '/' || wac.display_name activity,
wias.activity_status status,
wias.activity_result_code RESULT,
wias.assigned_user ass_user
FROM wf_item_activity_statuses_h wias,
wf_process_activities wpa,
wf_activities_vl wac,
wf_activities_vl wap,
wf_items wi
WHERE wias.item_type = wi.item_type
AND wias.item_key = wi.item_key
AND wias.process_activity = wpa.instance_id
AND wpa.activity_name = wac.name
AND wpa.activity_item_type = wac.item_type
AND wpa.process_name = wap.name
AND wpa.process_item_type = wap.item_type
AND wpa.process_version = wap.version
AND wi.parent_item_type = '&item_type'
AND wi.parent_item_key = '&item_key'
AND wi.begin_date >= wac.begin_date
AND wi.begin_date < nvl(wac.end_date,
wi.begin_date + 1)
ORDER BY 2,
1
/
↧
↧
Article 9
Query to Find out the running transaction in the database
Note:
This query is used to find out the running transaction in the database.
Query:
select sesion.sid,
sesion.username,
optimizer_mode,
hash_value,
address,
cpu_time,
elapsed_time,
sql_text
from v$sqlarea sqlarea, v$session sesion
where sesion.sql_hash_value = sqlarea.hash_value
and sesion.sql_address = sqlarea.address
and sesion.username is not null
/
Note:
This query is used to find out the running transaction in the database.
Query:
select sesion.sid,
sesion.username,
optimizer_mode,
hash_value,
address,
cpu_time,
elapsed_time,
sql_text
from v$sqlarea sqlarea, v$session sesion
where sesion.sql_hash_value = sqlarea.hash_value
and sesion.sql_address = sqlarea.address
and sesion.username is not null
/
↧
Article 8
Error during timezone update in 11.2.0.4 to 12.2.0.1
Error:
ERROR at line 1:
ORA-04045: errors during recompilation/revalidation of
APPADMIN.PW_SYNC_AFTER_ALTER
ORA-06512: at "SYS.DBMS_DST", line 1139
ORA-06512: at "SYS.DBMS_DST", line 571
ORA-01031: insufficient privileges
Solution :
1. Backup the DDL for trigger APPADMIN.PW_SYNC_AFTER_ALTER..
2. Drop trigger APPADMIN.PW_SYNC_AFTER_ALTER, Make Sure applications are stopped.
3. And run the following again.
CONN / as sysdba
alter session set "_with_subquery"=materialize;
alter session set "_simple_view_merging"=TRUE;
set serveroutput on
VAR numfail number
BEGIN
DBMS_DST.UPGRADE_DATABASE(:numfail,
parallel => TRUE,
log_errors => TRUE,
log_errors_table => 'SYS.DST$ERROR_TABLE',
log_triggers_table => 'SYS.DST$TRIGGER_TABLE',
error_on_overlap_time => FALSE,
error_on_nonexisting_time => FALSE);
DBMS_OUTPUT.PUT_LINE('Failures:'|| :numfail);
END;
/
VAR fail number
BEGIN
DBMS_DST.END_UPGRADE(:fail);
DBMS_OUTPUT.PUT_LINE('Failures:'|| :fail);
END;
/
SELECT PROPERTY_NAME, SUBSTR(property_value, 1, 30) value
FROM DATABASE_PROPERTIES
WHERE PROPERTY_NAME LIKE 'DST_%'
ORDER BY PROPERTY_NAME;
4) Create trigger APPADMIN.PW_SYNC_AFTER_ALTER
Trigger created
grant administer database trigger to appadmin;
Issue resolved.
Error:
ERROR at line 1:
ORA-04045: errors during recompilation/revalidation of
APPADMIN.PW_SYNC_AFTER_ALTER
ORA-06512: at "SYS.DBMS_DST", line 1139
ORA-06512: at "SYS.DBMS_DST", line 571
ORA-01031: insufficient privileges
Solution :
1. Backup the DDL for trigger APPADMIN.PW_SYNC_AFTER_ALTER..
2. Drop trigger APPADMIN.PW_SYNC_AFTER_ALTER, Make Sure applications are stopped.
3. And run the following again.
CONN / as sysdba
alter session set "_with_subquery"=materialize;
alter session set "_simple_view_merging"=TRUE;
set serveroutput on
VAR numfail number
BEGIN
DBMS_DST.UPGRADE_DATABASE(:numfail,
parallel => TRUE,
log_errors => TRUE,
log_errors_table => 'SYS.DST$ERROR_TABLE',
log_triggers_table => 'SYS.DST$TRIGGER_TABLE',
error_on_overlap_time => FALSE,
error_on_nonexisting_time => FALSE);
DBMS_OUTPUT.PUT_LINE('Failures:'|| :numfail);
END;
/
VAR fail number
BEGIN
DBMS_DST.END_UPGRADE(:fail);
DBMS_OUTPUT.PUT_LINE('Failures:'|| :fail);
END;
/
SELECT PROPERTY_NAME, SUBSTR(property_value, 1, 30) value
FROM DATABASE_PROPERTIES
WHERE PROPERTY_NAME LIKE 'DST_%'
ORDER BY PROPERTY_NAME;
4) Create trigger APPADMIN.PW_SYNC_AFTER_ALTER
Trigger created
grant administer database trigger to appadmin;
Issue resolved.
↧
Article 7
Error while running owm utility
Error:
Exception in thread "AWT-EventQueue-0" java.lang.IllegalStateException: This function should be called while holding treeLock
at java.awt.Component.checkTreeLock(Component.java:1200)
at java.awt.Container.validateTree(Container.java:1682)
at oracle.security.admin.wltmgr.owmt.C027.v(C027)
at oracle.security.admin.wltmgr.owmt.C027.<init>(C027)
at oracle.security.admin.wltmgr.owmt.C120.<init>(C120)
at oracle.security.admin.wltmgr.owma.C126.Z(C126)
at oracle.security.admin.wltmgr.owma.C034.c(C034)
at oracle.security.admin.wltmgr.owma.C034.commandIssued(C034)
at oracle.sysman.emSDK.client.guiComponent.commandAdapter.CommandAdapter.actionPerformed(Unknown Source)
.
..
.
.
java.security.ProtectionDomain$JavaSecurityAccessImpl.doIntersectionPrivilege(ProtectionDomain.java:77)
at java.security.ProtectionDomain$JavaSecurityAccessImpl.doIntersectionPrivilege(ProtectionDomain.java:87)
at java.awt.EventQueue$4.run(EventQueue.java:720)
at java.awt.EventQueue$4.run(EventQueue.java:718)
Solution:
Change the path of jre in owm file as older version and run the utility.
cat owm|grep JAVA
JAVA_HOME=/usr/jdk/jdk1.6.0_171
export JAVA_HOME
elif [ -z "$JAVA_HOME" ]; then
JAVA_HOME=/usr/jdk/jdk1.6.0_171
export JAVA_HOME
echo "$TOOLHOME/jdk or \$JAVA_HOME should point to valid Java runtime"
elif [ ! -f "$JAVA_HOME/bin/java" ]; then
echo "$JAVA_HOME should point to valid Java runtime"
PATH=$JAVA_HOME/bin:$PATH
JRE_CLASSPATH=$JAVA_HOME/lib:$JAVA_HOME/lib/rt.jar:$JAVA_HOME/lib/i18n.jar
JAVA_COMPILER=NONE
export JAVA_COMPILER
JAVAMODE=
JAVAMODE=-d64
JAVAMODE=-d64
Error:
Exception in thread "AWT-EventQueue-0" java.lang.IllegalStateException: This function should be called while holding treeLock
at java.awt.Component.checkTreeLock(Component.java:1200)
at java.awt.Container.validateTree(Container.java:1682)
at oracle.security.admin.wltmgr.owmt.C027.v(C027)
at oracle.security.admin.wltmgr.owmt.C027.<init>(C027)
at oracle.security.admin.wltmgr.owmt.C120.<init>(C120)
at oracle.security.admin.wltmgr.owma.C126.Z(C126)
at oracle.security.admin.wltmgr.owma.C034.c(C034)
at oracle.security.admin.wltmgr.owma.C034.commandIssued(C034)
at oracle.sysman.emSDK.client.guiComponent.commandAdapter.CommandAdapter.actionPerformed(Unknown Source)
.
..
.
.
java.security.ProtectionDomain$JavaSecurityAccessImpl.doIntersectionPrivilege(ProtectionDomain.java:77)
at java.security.ProtectionDomain$JavaSecurityAccessImpl.doIntersectionPrivilege(ProtectionDomain.java:87)
at java.awt.EventQueue$4.run(EventQueue.java:720)
at java.awt.EventQueue$4.run(EventQueue.java:718)
Solution:
Change the path of jre in owm file as older version and run the utility.
cat owm|grep JAVA
JAVA_HOME=/usr/jdk/jdk1.6.0_171
export JAVA_HOME
elif [ -z "$JAVA_HOME" ]; then
JAVA_HOME=/usr/jdk/jdk1.6.0_171
export JAVA_HOME
echo "$TOOLHOME/jdk or \$JAVA_HOME should point to valid Java runtime"
elif [ ! -f "$JAVA_HOME/bin/java" ]; then
echo "$JAVA_HOME should point to valid Java runtime"
PATH=$JAVA_HOME/bin:$PATH
JRE_CLASSPATH=$JAVA_HOME/lib:$JAVA_HOME/lib/rt.jar:$JAVA_HOME/lib/i18n.jar
JAVA_COMPILER=NONE
export JAVA_COMPILER
JAVAMODE=
JAVAMODE=-d64
JAVAMODE=-d64
↧
Article 6
Query to find the child value of the sql Id.
Query :
col sql_text for a60 wrap
set verify off
set pagesize 999
set lines 155
col username format a13
col prog format a22
col sid format 999
col child_number format 99999 heading CHILD
col ocategory format a10
col avg_etime format 9,999,999.99
col avg_pio format 9,999,999.99
col avg_lio format 999,999,999
col etime format 9,999,999.99
select sql_id, child_number, plan_hash_value plan_hash, executions execs,
(elapsed_time/1000000)/decode(nvl(executions,0),0,1,executions) avg_etime,
buffer_gets/decode(nvl(executions,0),0,1,executions) avg_lio,
sql_text
from v$sql s
where upper(sql_text) like upper(nvl('&sql_text',sql_text))
and sql_text not like '%from v$sql where sql_text like nvl(%'
and sql_id like nvl('&sql_id',sql_id)
order by 1, 2, 3
/
Query :
col sql_text for a60 wrap
set verify off
set pagesize 999
set lines 155
col username format a13
col prog format a22
col sid format 999
col child_number format 99999 heading CHILD
col ocategory format a10
col avg_etime format 9,999,999.99
col avg_pio format 9,999,999.99
col avg_lio format 999,999,999
col etime format 9,999,999.99
select sql_id, child_number, plan_hash_value plan_hash, executions execs,
(elapsed_time/1000000)/decode(nvl(executions,0),0,1,executions) avg_etime,
buffer_gets/decode(nvl(executions,0),0,1,executions) avg_lio,
sql_text
from v$sql s
where upper(sql_text) like upper(nvl('&sql_text',sql_text))
and sql_text not like '%from v$sql where sql_text like nvl(%'
and sql_id like nvl('&sql_id',sql_id)
order by 1, 2, 3
/
↧
↧
Article 5
Query to flush the child plan
Query:
set serveroutput on
set pagesize 9999
set linesize 155
var name varchar2(50)
accept sql_id -
prompt 'Enter value for sql_id: '
BEGIN
select address||','||hash_value into :name
from v$sqlarea
where sql_id like '&&sql_id';
dbms_shared_pool.purge(:name,'C',1);
END;
/
undef sql_id
undef name
Query:
set serveroutput on
set pagesize 9999
set linesize 155
var name varchar2(50)
accept sql_id -
prompt 'Enter value for sql_id: '
BEGIN
select address||','||hash_value into :name
from v$sqlarea
where sql_id like '&&sql_id';
dbms_shared_pool.purge(:name,'C',1);
END;
/
undef sql_id
undef name
↧
Article 4
Starts a WebLogic Forms and Reports Services installation.
Query :
export MW_HOME=/u01/app/oracle/middleware
export DOMAIN_HOME=$MW_HOME/user_projects/domains/ClassicDomain
export FR_INST=$MW_HOME/asinst_1
echo "Starting AdminServer"
nohup $DOMAIN_HOME/bin/startWebLogic.sh > /dev/null 2>&1 &
sleep 120
echo "Starting WLS_FORMS"
nohup $DOMAIN_HOME/bin/startManagedWebLogic.sh WLS_FORMS > /dev/null 2>&1 &
echo "Starting WLS_REPORTS"
nohup $DOMAIN_HOME/bin/startManagedWebLogic.sh WLS_REPORTS > /dev/null 2>&1 &
echo "Start remaining processes using OPMN"
$FR_INST/bin/opmnctl startall
echo "Sleep for 10 minutes before calling reports startserver"
sleep 600
curl http://localhost:8888/reports/rwservlet/startserver > /dev/null 2>&1 &
echo "Done!"
↧
How to get FND log messages extract for SR
How to get FND log messages extract for SR
Please proivde FND logs as per below steps for reproducible case
1) Set the Profiles
* FND: Debug Log Enabled : Yes
* FND: Debug Log Level : Statement
* FND: Log Module : %
2) Get the current log sequence in the FND table.
select max(log_sequence) from fnd_log_messages ; -- start
3) Reproduce the issue
4) Get the log sequence in the FND table.
select max(log_sequence) from fnd_log_messages ; -- end
5) Get the FND log data.
select * from fnd_log_messages where log_sequence between <start> and <end>;
6) Spool this into an xls file.
7) Lastly, remember to disable FND: Debug Log after you are done; otherwise, you could encounter tablespace issues.
1) Set the Profiles
* FND: Debug Log Enabled : Yes
* FND: Debug Log Level : Statement
* FND: Log Module : %
2) Get the current log sequence in the FND table.
select max(log_sequence) from fnd_log_messages ; -- start
3) Reproduce the issue
4) Get the log sequence in the FND table.
select max(log_sequence) from fnd_log_messages ; -- end
5) Get the FND log data.
select * from fnd_log_messages where log_sequence between <start> and <end>;
6) Spool this into an xls file.
7) Lastly, remember to disable FND: Debug Log after you are done; otherwise, you could encounter tablespace issues.
↧