ode-user mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From Stefan Jakoubi <sjako...@securityresearch.at>
Subject Re: Hibernate mappings
Date Mon, 28 Apr 2008 14:04:50 GMT

Hi Mathieu,

I think we have found the solution - in fact 2 major are required to use ODE
with DB2:
- Update of the simple scheduler
- Update of DB script

Below, you can find a little HowTo for JBoss & ODE & DB2 - I hope everything
works finde ;-)

Cheers and thanks for your support,
Simon & Stefan


+++++++++++++++++++++++++++++++
+++++++++++++++++++++++++++++++
++ HOW TO: JBOSS & ODE & DB2 ++
+++++++++++++++++++++++++++++++
+++++++++++++++++++++++++++++++

*****************
* PREREQUISITES *
*****************

 - JBoss (tested on JBoss-4.0.3SP1)
 - deployed ODE (tested with version 1.1.1)
 - DB2 (tested with version 9.x)
 
 
*********************************
* MODIFYING SIMPLE SCHEDULER JAR *
* *******************************

- There is a bug using the scalar function MOD within a prepared statement
- Modify the JdbcDelegate class (see also end of this HowTo)
- Compile the code and make the jar


*****************
* CONFIGURATION *
*****************

-------------------------
### DB2 configuration ###
-------------------------

(1) create database for ODE usage (e.g. testode)

(2) execute the following statements

-- Apache ODE - SimpleScheduler Database Schema
-- 
-- Apache Derby scripts by Maciej Szefler.
-- 
-- Scripts modified for use with DB2 by Stefan Jakoubi and Simon Tjoa
-- (-) All primary key ID columns modified for auto increment purposes -> ID
bigint GENERATED ALWAYS AS IDENTITY not null
-- (-) Altered table LARGE_DATA -> BIN_DATA blob(2000M) NOT LOGGED -> as DB2
only supports logging up to 1GB
--     Furthermore, DB2 only allows blob sizes up to 2GB - 1 Byte!!!

CREATE TABLE ode_job (
  jobid CHAR(64)  NOT NULL DEFAULT '',
  ts BIGINT  NOT NULL DEFAULT 0,
  nodeid char(64),
  scheduled int  NOT NULL DEFAULT 0,
  transacted int  NOT NULL DEFAULT 0,
  details blob(4096),
  PRIMARY KEY(jobid));

CREATE INDEX IDX_ODE_JOB_TS ON ode_job(ts);
CREATE INDEX IDX_ODE_JOB_NODEID ON ode_job(nodeid);


create table BPEL_ACTIVITY_RECOVERY (ID bigint GENERATED ALWAYS AS IDENTITY
not null, PIID bigint, AID bigint, CHANNEL varchar(255), REASON
varchar(255), DATE_TIME timestamp, LDATA_ID bigint, ACTIONS varchar(255),
RETRIES integer, INSERT_TIME timestamp, MLOCK integer not null, primary key
(ID));
create table BPEL_CORRELATION_PROP (ID bigint GENERATED ALWAYS AS IDENTITY
not null, NAME varchar(255), NAMESPACE varchar(255), VALUE varchar(255),
CORR_SET_ID bigint, INSERT_TIME timestamp, MLOCK integer not null, primary
key (ID));
create table BPEL_CORRELATION_SET (ID bigint GENERATED ALWAYS AS IDENTITY
not null, VALUE varchar(255), CORR_SET_NAME varchar(255), SCOPE_ID bigint,
PIID bigint, PROCESS_ID bigint, INSERT_TIME timestamp, MLOCK integer not
null, primary key (ID));
create table BPEL_CORRELATOR (ID bigint GENERATED ALWAYS AS IDENTITY not
null, CID varchar(255), PROCESS_ID bigint, INSERT_TIME timestamp, MLOCK
integer not null, primary key (ID));
create table BPEL_CORRELATOR_MESSAGE_CKEY (ID bigint GENERATED ALWAYS AS
IDENTITY not null, CKEY varchar(255), CORRELATOR_MESSAGE_ID bigint,
INSERT_TIME timestamp, MLOCK integer not null, primary key (ID));
create table BPEL_EVENT (ID bigint GENERATED ALWAYS AS IDENTITY not null,
IID bigint, PID bigint, TSTAMP timestamp, TYPE varchar(255), DETAIL
clob(32000), LDATA_ID bigint, SID bigint, INSERT_TIME timestamp, MLOCK
integer not null, primary key (ID));
create table BPEL_FAULT (ID bigint GENERATED ALWAYS AS IDENTITY not null,
FAULTNAME varchar(255), LDATA_ID bigint, EXPLANATION varchar(4000), LINE_NUM
integer, AID integer, INSERT_TIME timestamp, MLOCK integer not null, primary
key (ID));
create table BPEL_INSTANCE (ID bigint GENERATED ALWAYS AS IDENTITY not null,
INSTANTIATING_CORRELATOR bigint, FAULT bigint, JACOB_STATE bigint,
PREVIOUS_STATE smallint, PROCESS_ID bigint, STATE smallint, LAST_ACTIVE_DT
timestamp, SEQUENCE bigint, FAILURE_COUNT integer, FAILURE_DT timestamp,
INSERT_TIME timestamp, MLOCK integer not null, primary key (ID));
create table BPEL_MESSAGE (ID bigint GENERATED ALWAYS AS IDENTITY not null,
MEX bigint, TYPE varchar(255), DATA bigint, INSERT_TIME timestamp, MLOCK
integer not null, primary key (ID));
create table BPEL_MESSAGE_EXCHANGE (ID bigint GENERATED ALWAYS AS IDENTITY
not null, PORT_TYPE varchar(255), CHANNEL_NAME varchar(255), CLIENTKEY
varchar(255), LDATA_EPR_ID bigint, LDATA_CEPR_ID bigint, REQUEST bigint,
RESPONSE bigint, INSERT_DT timestamp, OPERATION varchar(255), STATE
varchar(255), PROCESS bigint, PIID bigint, DIR char(1), PLINK_MODELID
integer, PATTERN varchar(255), CORR_STATUS varchar(255), FAULT_TYPE
varchar(255), FAULT_EXPL varchar(255), CALLEE varchar(255), PARTNERLINK
bigint, PIPED_ID varchar(255), INSERT_TIME timestamp, MLOCK integer not
null, primary key (ID));
create table BPEL_MEX_PROPS (MEX bigint not null, VALUE varchar(8000), NAME
varchar(255) not null, primary key (MEX, NAME));
create table BPEL_PLINK_VAL (ID bigint GENERATED ALWAYS AS IDENTITY not
null, PARTNER_LINK varchar(100) not null, PARTNERROLE varchar(100),
MYROLE_EPR bigint, PARTNERROLE_EPR bigint, PROCESS bigint, SCOPE bigint,
SVCNAME varchar(255), MYROLE varchar(100), MODELID integer, MYSESSIONID
varchar(255), PARTNERSESSIONID varchar(255), INSERT_TIME timestamp, MLOCK
integer not null, primary key (ID));
create table BPEL_PROCESS (ID bigint GENERATED ALWAYS AS IDENTITY not null,
PROCID varchar(255) not null unique, deployer varchar(255), deploydate
timestamp, type_name varchar(255), type_ns varchar(255), version bigint,
ACTIVE_ smallint, guid varchar(255), INSERT_TIME timestamp, MLOCK integer
not null, primary key (ID));
create table BPEL_SCOPE (ID bigint GENERATED ALWAYS AS IDENTITY not null,
PIID bigint, PARENT_SCOPE_ID bigint, STATE varchar(255) not null, NAME
varchar(255) not null, MODELID integer, INSERT_TIME timestamp, MLOCK integer
not null, primary key (ID));
create table BPEL_SELECTORS (ID bigint GENERATED ALWAYS AS IDENTITY not
null, PIID bigint not null, SELGRPID varchar(255) not null, IDX integer not
null, CORRELATION_KEY varchar(255) not null, CORRELATOR bigint not null,
INSERT_TIME timestamp, MLOCK integer not null, primary key (ID), unique
(CORRELATION_KEY, CORRELATOR));
create table BPEL_UNMATCHED (ID bigint GENERATED ALWAYS AS IDENTITY not
null, MEX bigint, CORRELATION_KEY varchar(255), CORRELATOR bigint not null,
INSERT_TIME timestamp, MLOCK integer not null, primary key (ID));
create table BPEL_XML_DATA (ID bigint GENERATED ALWAYS AS IDENTITY not null,
LDATA_ID bigint, NAME varchar(255) not null, SCOPE_ID bigint, PIID bigint,
IS_SIMPLE_TYPE smallint, INSERT_TIME timestamp, MLOCK integer not null,
primary key (ID));
create table LARGE_DATA (ID bigint GENERATED ALWAYS AS IDENTITY not null,
BIN_DATA blob(2000M) NOT LOGGED, INSERT_TIME timestamp, MLOCK integer not
null, primary key (ID));
create table VAR_PROPERTY (ID bigint GENERATED ALWAYS AS IDENTITY not null,
XML_DATA_ID bigint, PROP_VALUE varchar(255), PROP_NAME varchar(255) not
null, INSERT_TIME timestamp, MLOCK integer not null, primary key (ID));
alter table BPEL_ACTIVITY_RECOVERY add constraint FK94D24A7B14020712 foreign
key (LDATA_ID) references LARGE_DATA;
alter table BPEL_ACTIVITY_RECOVERY add constraint FK94D24A7BA821BBE1 foreign
key (PIID) references BPEL_INSTANCE;
alter table BPEL_CORRELATION_PROP add constraint FK4EC9DDAA4D0B7982 foreign
key (CORR_SET_ID) references BPEL_CORRELATION_SET;
alter table BPEL_CORRELATION_SET add constraint FKB838191BA821BBE1 foreign
key (PIID) references BPEL_INSTANCE;
alter table BPEL_CORRELATION_SET add constraint FKB838191B6D49C363 foreign
key (SCOPE_ID) references BPEL_SCOPE;
alter table BPEL_CORRELATION_SET add constraint FKB838191BE15A2343 foreign
key (PROCESS_ID) references BPEL_PROCESS;
create index IDX_CORRELATOR_CID on BPEL_CORRELATOR (CID);
alter table BPEL_CORRELATOR add constraint FKF50EFA33E15A2343 foreign key
(PROCESS_ID) references BPEL_PROCESS;
create index IDX_BPEL_CORRELATOR_MESSAGE_CKEY on
BPEL_CORRELATOR_MESSAGE_CKEY (CKEY);
alter table BPEL_CORRELATOR_MESSAGE_CKEY add constraint FK8997F700EEFA7470
foreign key (CORRELATOR_MESSAGE_ID) references BPEL_UNMATCHED;
alter table BPEL_EVENT add constraint FKAA6D673014020712 foreign key
(LDATA_ID) references LARGE_DATA;
alter table BPEL_EVENT add constraint FKAA6D6730A7EED251 foreign key (IID)
references BPEL_INSTANCE;
alter table BPEL_EVENT add constraint FKAA6D6730C831CBE3 foreign key (PID)
references BPEL_PROCESS;
alter table BPEL_FAULT add constraint FKAA722EB814020712 foreign key
(LDATA_ID) references LARGE_DATA;
alter table BPEL_INSTANCE add constraint FKE1DED41FDD43DBE1 foreign key
(INSTANTIATING_CORRELATOR) references BPEL_CORRELATOR;
alter table BPEL_INSTANCE add constraint FKE1DED41F6B66C85F foreign key
(JACOB_STATE) references LARGE_DATA;
alter table BPEL_INSTANCE add constraint FKE1DED41F1F3C9CB7 foreign key
(FAULT) references BPEL_FAULT;
alter table BPEL_INSTANCE add constraint FKE1DED41FE15A2343 foreign key
(PROCESS_ID) references BPEL_PROCESS;
alter table BPEL_MESSAGE add constraint FK4FA7231DCA00A413 foreign key (MEX)
references BPEL_MESSAGE_EXCHANGE;
alter table BPEL_MESSAGE add constraint FK4FA7231DEB2C9ED8 foreign key
(DATA) references LARGE_DATA;
alter table BPEL_MESSAGE_EXCHANGE add constraint FKBDA6BD0589D7A75B foreign
key (LDATA_CEPR_ID) references LARGE_DATA;
alter table BPEL_MESSAGE_EXCHANGE add constraint FKBDA6BD05BBA250D foreign
key (PARTNERLINK) references BPEL_PLINK_VAL;
alter table BPEL_MESSAGE_EXCHANGE add constraint FKBDA6BD05A821BBE1 foreign
key (PIID) references BPEL_INSTANCE;
alter table BPEL_MESSAGE_EXCHANGE add constraint FKBDA6BD05C29670AA foreign
key (LDATA_EPR_ID) references LARGE_DATA;
alter table BPEL_MESSAGE_EXCHANGE add constraint FKBDA6BD05B5BD38C7 foreign
key (PROCESS) references BPEL_PROCESS;
alter table BPEL_MESSAGE_EXCHANGE add constraint FKBDA6BD05FF451031 foreign
key (RESPONSE) references BPEL_MESSAGE;
alter table BPEL_MESSAGE_EXCHANGE add constraint FKBDA6BD0554DCEE7F foreign
key (REQUEST) references BPEL_MESSAGE;
alter table BPEL_MEX_PROPS add constraint FK203CAFC7CA00A413 foreign key
(MEX) references BPEL_MESSAGE_EXCHANGE;
alter table BPEL_PLINK_VAL add constraint FK7D71E742F2BC5EB8 foreign key
(MYROLE_EPR) references LARGE_DATA;
alter table BPEL_PLINK_VAL add constraint FK7D71E74290D95ED1 foreign key
(SCOPE) references BPEL_SCOPE;
alter table BPEL_PLINK_VAL add constraint FK7D71E742B5BD38C7 foreign key
(PROCESS) references BPEL_PROCESS;
alter table BPEL_PLINK_VAL add constraint FK7D71E742D4B651D4 foreign key
(PARTNERROLE_EPR) references LARGE_DATA;
alter table BPEL_SCOPE add constraint FKAB2A32EAA821BBE1 foreign key (PIID)
references BPEL_INSTANCE;
alter table BPEL_SCOPE add constraint FKAB2A32EA42B20B58 foreign key
(PARENT_SCOPE_ID) references BPEL_SCOPE;
create index IDX_SELECTOR_CORRELATOR on BPEL_SELECTORS (CORRELATOR);
create index IDX_SELECTOR_CKEY on BPEL_SELECTORS (CORRELATION_KEY);
create index IDX_SELECTOR_SELGRPID on BPEL_SELECTORS (SELGRPID);
alter table BPEL_SELECTORS add constraint FKF1F83A0AA821BBE1 foreign key
(PIID) references BPEL_INSTANCE;
alter table BPEL_SELECTORS add constraint FKF1F83A0A875201C9 foreign key
(CORRELATOR) references BPEL_CORRELATOR;
create index IDX_UNMATCHED_CKEY on BPEL_UNMATCHED (CORRELATION_KEY);
create index IDX_UNMATCHED_CORRELATOR on BPEL_UNMATCHED (CORRELATOR);
alter table BPEL_UNMATCHED add constraint FKF0663E01CA00A413 foreign key
(MEX) references BPEL_MESSAGE_EXCHANGE;
alter table BPEL_UNMATCHED add constraint FKF0663E01875201C9 foreign key
(CORRELATOR) references BPEL_CORRELATOR;
alter table BPEL_XML_DATA add constraint FKB7D47E7C14020712 foreign key
(LDATA_ID) references LARGE_DATA;
alter table BPEL_XML_DATA add constraint FKB7D47E7CA821BBE1 foreign key
(PIID) references BPEL_INSTANCE;
alter table BPEL_XML_DATA add constraint FKB7D47E7C6D49C363 foreign key
(SCOPE_ID) references BPEL_SCOPE;
alter table VAR_PROPERTY add constraint FK9C1E2C0DA48E25F2 foreign key
(XML_DATA_ID) references BPEL_XML_DATA;
create table hibernate_unique_key ( next_hi integer );
insert into hibernate_unique_key values ( 0 );
create table STORE_DU (NAME varchar(255) not null, deployer varchar(255),
DEPLOYDT timestamp, DIR varchar(255), primary key (NAME));
create table STORE_PROCESS (PID varchar(255) not null, DU varchar(255), TYPE
varchar(255), version bigint, STATE varchar(255), primary key (PID));
create table STORE_PROCESS_PROP (propId varchar(255) not null, value
varchar(255), name varchar(255) not null, primary key (propId, name));
create table STORE_VERSIONS (ID integer not null, VERSION bigint, primary
key (ID));
alter table STORE_PROCESS add constraint FKA83900D1BFFFC58C foreign key (DU)
references STORE_DU;
alter table STORE_PROCESS_PROP add constraint FKFD6C2E119ADDA5CB foreign key
(propId) references STORE_PROCESS;


(3) configure a DB2 user

- there should already be a user configured (e.g. username)
- set the password of this user within the windows user management (e.g.
password)


------------------
### db2jcc.jar ###
------------------

(4) copy db2jcc.jar to [JBOSS_HOME]\server\[server]\lib\db2jcc.jar


----------------------------
### ode-axis2.properties ###
----------------------------

(5) create a file named ode-axis2.properties within the directory
[JBOSS_HOME]\server\[server]\deploy\ode.war\WEB-INF\conf

(6) edit ode-axis2.properties

ode-axis2.db.mode=EXTERNAL
ode-axis2.db.ext.dataSource=java:[ODE JNDI NAME, e.g. ODEDS]


------------------
### db2-ds.xml ###
------------------

(7) copy [JBOSS_HOME]\docs\examples\jca\db2-ds.xml to
[JBOSS_HOME]\server\[server]\deploy\db2-ds.xml

(8) edit db2-ds.xml

<datasources>
  <local-tx-datasource>
    <jndi-name>[ODE JNDI NAME, e.g. ODEDS]</jndi-name>
    <connection-url>jdbc:db2://localhost:50000/[DATABASENAME, e.g.
testode]</connection-url>
    <driver-class>com.ibm.db2.jcc.DB2Driver</driver-class>
    <user-name>[DBUSER, e.g. username - you must not use
db2admin]</user-name>
    <password>[DBUSERPASSWORD, e.g. password - it must not be
blank]</password>
    <min-pool-size>10</min-pool-size>
        <!-- sql to call when connection is created
        <new-connection-sql>some arbitrary sql</new-connection-sql>
        -->

        <!-- sql to call on an existing pooled connection when it is
obtained from pool 
        <check-valid-connection-sql>some arbitrary
sql</check-valid-connection-sql>
        -->

      <!-- corresponding type-mapping in the standardjbosscmp-jdbc.xml
(optional) -->
      <metadata>
         <type-mapping>DB2</type-mapping>
      </metadata>
  </local-tx-datasource>

</datasources>


-----------------------
### simple scheduler ###
-----------------------

(9) copy the modified simple scheduler jar to
[JBOSS_HOME]\server\[server]\deploy\ode.war\WEB-INF\lib

- in our case: ode-scheduler-simple-1.1.1-SNAPSHOT.jar


----------------------------------------------
### Modification within class JdbcDelegate ###
----------------------------------------------

/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 * KIND, either express or implied.  See the License for the
 * specific language governing permissions and limitations
 * under the License.
 * 
 * UPDATED by Stefan Jakoubi and Simon Tjoa 
 * PERFORMED CHANGES: 
 * This class has been modified by Stefan Jakoubi and Simon Tjoa 
 *  (Secure Business Austria) to enable the use of the scalar function MOD
 *  within prepared statements. Thus, the scalar function MOD has been
 *  replaced with the internally DB2 formula used for determining MOD
results:
 *  MOD(x, y) = x - y * (x / y)     
 */

package org.apache.ode.scheduler.simple;

import java.io.ByteArrayOutputStream;
import java.io.ObjectInputStream;
import java.io.Serializable;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;

import javax.sql.DataSource;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.ode.utils.StreamUtils;

/**
 * JDBC-based implementation of the {@link DatabaseDelegate} interface.
Should work with most 
 * reasonably behaved databases. 
 * 
 * @author Maciej Szefler ( m s z e f l e r @ g m a i l . c o m ) modified
by by Stefan Jakoubi and Simon Tjoa
 */
public class JdbcDelegate implements DatabaseDelegate {

    private static final Log __log = LogFactory.getLog(JdbcDelegate.class);

    private static final String DELETE_JOB = "delete from ODE_JOB where
jobid = ? and nodeid = ?";

    private static final String UPDATE_REASSIGN = "update ODE_JOB set nodeid
= ?, scheduled = 0 where nodeid = ?";

    private static final String UPGRADE_JOB_DEFAULT = "update ODE_JOB set
nodeid = ? where nodeid is null and scheduled = 0 "
            + "and mod(ts,?) = ? and ts < ?";

    private static final String UPGRADE_JOB_SQLSERVER = "update ODE_JOB set
nodeid = ? where nodeid is null and scheduled = 0 "
        + "and (ts % ?) = ? and ts < ?";

    // UPDATED by Stefan Jakoubi and Simon Tjoa
    private static final String UPGRADE_JOB_DB2 = "update ODE_JOB set nodeid
= ? where nodeid is null and scheduled = 0 " +  
        " and (BIGINT(ts - ? * BIGINT(ts/?)) = ?) and ts < ? ";

    private static final String SAVE_JOB = "insert into ODE_JOB "
            + " (jobid, nodeid, ts, scheduled, transacted, details)
values(?, ?, ?, ?, ?, ?)";

    private static final String GET_NODEIDS = "select distinct nodeid from
ODE_JOB";

    private static final String SCHEDULE_IMMEDIATE = "select jobid, ts,
transacted, scheduled, details from ODE_JOB "
            + "where nodeid = ? and scheduled = 0 and ts < ? order by ts";

    private static final String UPDATE_SCHEDULED = "update ODE_JOB set
scheduled = 1 where jobid in (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";

    private static final int UPDATE_SCHEDULED_SLOTS = 10;

    private DataSource _ds;

    private Dialect _dialect;
    
    public JdbcDelegate(DataSource ds) {
        _ds = ds;
        _dialect = guessDialect();
    }

    public boolean deleteJob(String jobid, String nodeId) throws
DatabaseException {
        if (__log.isDebugEnabled())
            __log.debug("deleteJob " + jobid + " on node " + nodeId);

        Connection con = null;
        PreparedStatement ps = null;
        try {
            con = getConnection();
            ps = con.prepareStatement(DELETE_JOB);
            ps.setString(1, jobid);
            ps.setString(2, nodeId);
            return ps.executeUpdate() == 1;
        } catch (SQLException se) {
            throw new DatabaseException(se);
        } finally {
            close(ps);
            close(con);
        }
    }

    public List<String> getNodeIds() throws DatabaseException {
        Connection con = null;
        PreparedStatement ps = null;
        try {
            con = getConnection();
            ps = con.prepareStatement(GET_NODEIDS,
ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
            ResultSet rs = ps.executeQuery();
            ArrayList<String> nodes = new ArrayList<String>();
            while (rs.next()) {
                String nodeId = rs.getString(1);
                if (nodeId != null)
                    nodes.add(rs.getString(1));
            }
            if (__log.isDebugEnabled())
                __log.debug("getNodeIds: " + nodes);
            return nodes;
        } catch (SQLException se) {
            throw new DatabaseException(se);
        } finally {
            close(ps);
            close(con);
        }
    }

    public boolean insertJob(Job job, String nodeId, boolean loaded) throws
DatabaseException {
        if (__log.isDebugEnabled())
            __log.debug("insertJob " + job.jobId + " on node " + nodeId + "
loaded=" + loaded);

        Connection con = null;
        PreparedStatement ps = null;
        try {
            con = getConnection();
            ps = con.prepareStatement(SAVE_JOB);
            ps.setString(1, job.jobId);
            ps.setString(2, nodeId);
            ps.setLong(3, job.schedDate);
            ps.setInt(4, asInteger(loaded));
            ps.setInt(5, asInteger(job.transacted));
            ByteArrayOutputStream bos = new ByteArrayOutputStream();
            try {
                StreamUtils.write(bos, (Serializable) job.detail);
            } catch (Exception ex) {
                __log.error("Error serializing job detail: " + job.detail);
                throw new DatabaseException(ex);
            }
            ps.setBytes(6, bos.toByteArray());
            return ps.executeUpdate() == 1;
        } catch (SQLException se) {
            throw new DatabaseException(se);
        } finally {
            close(ps);
            close(con);
        }
    }

    @SuppressWarnings("unchecked")
    public List<Job> dequeueImmediate(String nodeId, long maxtime, int
maxjobs) throws DatabaseException {
        ArrayList<Job> ret = new ArrayList<Job>(maxjobs);
        Connection con = null;
        PreparedStatement ps = null;
        try {
            con = getConnection();
            ps = con.prepareStatement(SCHEDULE_IMMEDIATE);
            ps.setString(1, nodeId);
            ps.setLong(2, maxtime);
            ps.setMaxRows(maxjobs);
            ResultSet rs = ps.executeQuery();
            while (rs.next()) {
                Map<String, Object> details;
                try {
                    ObjectInputStream is = new
ObjectInputStream(rs.getBinaryStream(5));
                    details = (Map<String, Object>) is.readObject();
                    is.close();
                } catch (Exception e) {
                    throw new DatabaseException("Error deserializing job
details", e);
                }
                Job job = new Job(rs.getLong(2), rs.getString(1),
asBoolean(rs.getInt(3)), details);
                ret.add(job);
            }
            rs.close();
            ps.close();
            
            // mark jobs as scheduled, UPDATE_SCHEDULED_SLOTS at a time
            int j = 0;
            int updateCount = 0;
            ps = con.prepareStatement(UPDATE_SCHEDULED);
            for (int updates = 1; updates <= (ret.size() /
UPDATE_SCHEDULED_SLOTS) + 1; updates++) {
                for (int i = 1; i <= UPDATE_SCHEDULED_SLOTS; i++) {
                    ps.setString(i, j < ret.size() ? ret.get(j).jobId : "");
                    j++;
                }
                ps.execute();
                updateCount += ps.getUpdateCount();
            }
            if (updateCount != ret.size()) {
                throw new DatabaseException(
                        "Updating scheduled jobs failed to update all jobs;
expected=" + ret.size()
                                + " actual=" + updateCount);
            }
        } catch (SQLException se) {
            throw new DatabaseException(se);
        } finally {
            close(ps);
            close(con);
        }
        return ret;
    }

    public int updateReassign(String oldnode, String newnode) throws
DatabaseException {
        if (__log.isDebugEnabled())
            __log.debug("updateReassign from " + oldnode + " ---> " +
newnode);
        Connection con = null;
        PreparedStatement ps = null;
        try {
            con = getConnection();
            ps = con.prepareStatement(UPDATE_REASSIGN);
            ps.setString(1, newnode);
            ps.setString(2, oldnode);
            return ps.executeUpdate();
        } catch (SQLException se) {
            throw new DatabaseException(se);
        } finally {
            close(ps);
            close(con);
        }
    }

    public int updateAssignToNode(String node, int i, int numNodes, long
maxtime) throws DatabaseException {
        if (__log.isDebugEnabled())
            __log.debug("updateAsssignToNode node=" + node + " " + i + "/" +
numNodes + " maxtime=" + maxtime);
        Connection con = null;
        PreparedStatement ps = null;
        try {
            con = getConnection();
            // UPDATED by Stefan Jakoubi and Simon Tjoa
            if (_dialect == Dialect.DB2) {
               ps = con.prepareStatement(UPGRADE_JOB_DB2);
               ps.setString(1, node);
               ps.setInt(2, numNodes);
               ps.setInt(3, numNodes);
               ps.setInt(4, i);
               ps.setLong(5, maxtime);
            } else
            if (_dialect == Dialect.SQLSERVER) {
                // Herausgezogen um neues PS für DB2 einführen zu können
                ps = con.prepareStatement(UPGRADE_JOB_SQLSERVER);
                ps.setString(1, node);
                ps.setInt(2, numNodes);
                ps.setInt(3, i);
                ps.setLong(4, maxtime);
            } else {
                // Herausgezogen um neues PS für DB2 einführen zu können
                ps = con.prepareStatement(UPGRADE_JOB_DEFAULT);
                ps.setString(1, node);
                ps.setInt(2, numNodes);
                ps.setInt(3, i);
                ps.setLong(4, maxtime);
            }
                                         
             return ps.executeUpdate();
        } catch (SQLException se) {
            throw new DatabaseException(se);
        } finally {
            close(ps);
            close(con);
        }
    }

    private Connection getConnection() throws SQLException {
        return _ds.getConnection();
    }

    private int asInteger(boolean value) {
        return (value ? 1 : 0);
    }

    private boolean asBoolean(int value) {
        return (value != 0);
    }

    private void close(PreparedStatement ps) {
        if (ps != null) {
            try {
                ps.close();
            } catch (Exception e) {
                __log.warn("Exception while closing prepared statement", e);
            }
        }
    }

    private void close(Connection con) {
        if (con != null) {
            try {
                con.close();
            } catch (Exception e) {
                __log.warn("Exception while closing connection", e);
            }
        }
    }

    private Dialect guessDialect() {
        Dialect d = Dialect.UNKNOWN;
        Connection con = null;
        try {
            con = getConnection();
            DatabaseMetaData metaData = con.getMetaData();
            if (metaData != null) {
                String dbProductName = metaData.getDatabaseProductName();
                int dbMajorVer = metaData.getDatabaseMajorVersion();
                __log.debug("Using database " + dbProductName + " major
version " + dbMajorVer);
                if (dbProductName.indexOf("DB2") >= 0) {
                    d = Dialect.DB2;
                } else if (dbProductName.indexOf("Derby") >= 0) {
                    d = Dialect.DERBY;
                } else if (dbProductName.indexOf("Firebird") >= 0) {
                    d = Dialect.FIREBIRD;
                } else if (dbProductName.indexOf("HSQL") >= 0) {
                    d = Dialect.HSQL;
                } else if (dbProductName.indexOf("Microsoft SQL") >= 0) {
                    d = Dialect.SQLSERVER;
                } else if (dbProductName.indexOf("MySQL") >= 0) {
                    d = Dialect.MYSQL;
                } else if (dbProductName.indexOf("Sybase") >= 0) {
                    d = Dialect.SYBASE;
                }
            }
        } catch (SQLException e) {
            __log.warn("Unable to determine database dialect", e);
        } finally {
            close(con);
        }
        __log.info("Using database dialect: " + d);
        return d;
    }

    enum Dialect {
        DB2, DERBY, FIREBIRD, HSQL, MYSQL, ORACLE, SQLSERVER, SYBASE,
UNKNOWN 
    }
    
}






Stefan Jakoubi wrote:
> 
> Hi Mathieu,
> 
> thanks for your fast reply!
> 
>> We use XDoclet annotations in the source code to generate the mapping. So
>> you'll have to change those if you want to alter it. Alternatively you
>> can
>> run on the generated mappings but that's probably a pain (updates). But
>> why
>> do you want to change the mapping?
> 
> Furthermore, thank you for outlining the "where to find" - this helps us a
> lot!!!
> 
> We unfortunately have to alter the tables to conform our partner's
> requirements:
> (1) project specific tablename prefix
> (2) "ID" column not allowed -> "tablename_ID"
> (3) Reserved words (DB2) such as "TYPE" are not allowed as column name
> 
> 
> Question: shall I post/upload the changes we have made within the simple
> scheduler?
> 
> Thanks,
> Stefan
> 
> 
> 
> 
> 
> 
> Matthieu Riou-5 wrote:
>> 
>> On Fri, Apr 25, 2008 at 4:26 AM, Stefan Jakoubi <
>> sjakoubi@securityresearch.at> wrote:
>> 
>>> Dear ODE community,
>>>
>>>
>>>
>>> in this mail I (a) follow up a topic raised from a colleague
>>> (http://ode.markmail.org/message/gyoxq5cz3uruizoc?q=tjoa) of mine and
>>> (b) raise myself a question concerning configuring ODE for DB2.
>>>
>>>
>>>
>>> @ (a):
>>>
>>> When migrating ODE to DB2 there is a problem within the simple
>>> scheduler. In particular, the scalar function MOD(x, y) within a
>>> prepared statement throws an awful exception.
>>>
>>> The solution is to replace the MOD(x, y) within the prepared statement
>>> with: x - y * (x / y). This is according to how DB2 internally
>>> determines the result for the scalar function MOD.
>>>
>>>
>> Thanks for letting us know.
>> 
>> 
>>>
>>>
>>> @ (b):
>>>
>>> Can anybody please provide a complete table schema ODE requires?
>>>
>> 
>> You'll get several ones for different databases in
>> dao-hibernate-db/target
>> after a build.
>> 
>> 
>>>
>>> Does anybody know where to configure all required Hibernate mappings so
>>> that ODE table names may be altered?
>>>
>> 
>> We use XDoclet annotations in the source code to generate the mapping. So
>> you'll have to change those if you want to alter it. Alternatively you
>> can
>> run on the generated mappings but that's probably a pain (updates). But
>> why
>> do you want to change the mapping?
>> 
>> 
>>>
>>> When switching persistence to Hibernate, is it true that openjpa is not
>>> used any more (and thus, no jpa mappings have to be modified in case
>>> that I alter table names?)
>>>
>>>
>> Right, it's either Hibernate of OpenJPA, they don't coexist.
>> 
>> 
>>>
>>>
>>> Finally I want to tell that as soon as we get a solution for
>>> JBOSS/ODE/DB2 we will contribute at Jira!
>>>
>>>
>> Thanks!
>> 
>> Matthieu
>> 
>> 
>>>
>>>
>>> Thanks in advance & cheers,
>>>
>>> Stefan
>>>
>>>
>>>
>>>
>>>
>>>
>> 
>> 
> 
> 

-- 
View this message in context: http://www.nabble.com/Hibernate-mappings-tp16899339p16939634.html
Sent from the Apache Ode User mailing list archive at Nabble.com.


Mime
View raw message