To get started with i.o.cluster, you need to download the source code of the current release (for example 2021.1) on github.com, build it using maven and install the jar into your local maven repository (Minimum requirements for build: JDK 1.8, Maven 3), then add the following dependency to the pom.xml of your project:
Second, create the your interference.properties file to run test case and put it to config/ directory. This is simple configuration with most common parameter values and single-node startup mode:
###########################################
# init storage parameters
# (cannot be changed after
# the 1st launch of node)
###########################################
# identifier of current node in the cluster - integer value from 1 to 64
local.node.id=1
# amount of internal storage threads and amount of files for each storage types
files.amount=4
# the size of physical storage frame (in bytes) for all data, temp, undo files
frame.size=8192
# the size of physical storage frame (in bytes) for index files
frame.size.ix=8192
# system code page for serialize/deserialize String objects
codepage=UTF8
###########################################
# runtime parameters (may be changed later)
###########################################
# path where datafiles stored
db.path=c:/test0
# path where checkpoint log files stored
journal.path=c:/test0
# management console (not used now)
mmport=8086
# current node's server port for transport interactions
rmport=8059
# date format used in management console
dateformat=dd.MM.yyyy
# disk write mode: write through = sync/write back = async
diskio.mode=sync
# time between writes of changed frames from the queue to disk in milliseconds
sync.period=1000
# lock data changes for the duration of a scheduled sync of frames to disk
sync.lock.enable=true
# size of blocking queue, which use in SQL retrieve
# mechanism for prevent of heap overload
retrieve.queue.size=100000
# the number of threads for parallel processing of the SQL query
retrieve.threads.amount=8
# list of nodeIds, hosts and ports of cluster nodes, separated by commas.
# the list must contains string of the following format:
# nodeId:host:port,nodeId:host:port, : etc.
# list of nodes must contains all cluster nodes exclude current one
# if the value is not set, the node will function in single mode (as local database)
cluster.nodes=
# a list of fully qualified names of entity classes, separated by commas,
# for which when the service starts, verification will be performed and,
# if necessary, automatic registration
auto.register.classes=su.interference.test.entity.Dept,su.interference.test.entity.Emp,su.interference.test.entity.StreamTable
# transport parameters - do not change this values
transport.sync.timeout=60000
transport.read.buffer=33554432
transport.write.buffer=33554432
# cleanup parameters
# enable heap cleanup if possible
cleanup.enable=true
# closed transaction cleanup timeout
cleanup.tx.timeout=5000
# cleanup of unused frames in heap timeout
cleanup.frames.timeout=3000
# max amount of data frames in cleanup excluded table
cleanup.data.threshold=1000
# max amount of index frames in cleanup excluded index
cleanup.ix.threshold=2000
# thresholds in percent in heap for cleanup launch
cleanup.heap.data.threshold=60
cleanup.heap.ix.threshold=70
cleanup.heap.temp.threshold=50
cleanup.heap.undo.threshold=50
Then, create two Entity classes - Dept and Emp, with which further operations will be performed:
import javax.persistence.*;
import javax.persistence.Table;
@Entity
@Table(name="Dept", indexes={@Index(name="DeptPk", columnList="deptId", unique=true)})
public class Dept {
@Column
@Id
private int deptId;
@Column
private String deptName;
@Column
private String descript;
public Dept() {
}
public int getDeptId() {
return deptId;
}
public void setDeptId(int deptId) {
this.deptId = deptId;
}
public String getDeptName() {
return deptName;
}
public void setDeptName(String deptName) {
this.deptName = deptName;
}
public String getDescript() {
return descript;
}
public void setDescript(String descript) {
this.descript = descript;
}
}
import javax.persistence.*;
import javax.persistence.Table;
import java.util.Date;
@Entity
@Table(name="Emp", indexes={@Index(name="EmpPk", columnList="empId", unique=true),@Index(name="EmpDeptKey", columnList="deptId", unique=false)})
public class Emp {
@Column
@Id
private int empId;
@Column
private String empName;
@Column
private int deptId;
@Column
private String descript;
@Column
private Date createDate;
public Emp () {
}
public int getEmpId() {
return empId;
}
public void setEmpId(int empId) {
this.empId = empId;
}
public String getEmpName() {
return empName;
}
public void setEmpName(String empName) {
this.empName = empName;
}
public int getDeptId() {
return deptId;
}
public void setDeptId(int deptId) {
this.deptId = deptId;
}
public String getDescript() {
return descript;
}
public void setDescript(String descript) {
this.descript = descript;
}
public Date getCreateDate() {
return createDate;
}
public void setCreateDate(Date createDate) {
this.createDate = createDate;
}
}
Then, create the java class with following methods (make sure that Instance and Session is su.interference.core.Instance and su.interference.persistent.Session) :
// start interference service within java application
public void startup() {
try {
instance = Instance.getInstance();
final Session s = Session.getSession();
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
try {
instance.shutdownInstance();
} catch (Exception e) {
e.printStackTrace();
}
System.out.println("stopped");
}));
s.setUserId(Session.ROOT_USER_ID);
instance.startupInstance(s);
} catch (Exception e) {
e.printStackTrace();
}
}
// load 10000 records of random test data to both Dept and Emp tables
public void loadData() {
try {
Session session = Session.getSession();
for (int i = 1; i <= 10000; i++) {
Dept d = (Dept) session.newEntity(Dept.class, new Object[]{});
Emp e = (Emp) session.newEntity(Emp.class, new Object[]{});
d.setDeptId(i);
d.setDeptName("Department "+i);
d.setDescript("abcdefghijklmn "+i);
e.setEmpId(i);
e.setDeptId(i);
e.setEmpName("John Doe "+i);
e.setDescript("abcdefghijklmn "+i);
session.persist(d);
session.persist(e);
}
session.commit();
System.out.println("10000 records inserted");
} catch (Exception e) {
e.printStackTrace();
}
}
// execute query which join both tables by deptId,
// please note, that your.domain should be match with your java package
// used in Dept and Emp classes
public void executeQuery() throws Exception {
Session session = Session.getSession();
ResultSet rs = session.execute("select d.deptName, e.empName, e.descript " +
"from your.domain.entity.Dept d, your.domain.entity.Emp e " +
"where d.deptId = e.deptId");
Object o = rs.poll(session);
while (o != null) {
final GenericResult r = (GenericResult) o;
System.out.println(r.getValueByName("ddeptName") + ":" +
r.getValueByName("eempName") + ":" +
r.getValueByName("edescript"));
o = rs.poll(session);
}
}
// find() method use indices for fast data access
public void findDepts() throws Exception {
Session session = Session.getSession();
for (int id = 1; id <= 10000; id++) {
Dept dept = (Dept) session.find(Dept.class, id);
System.out.println(dept.getDeptName(session));
}
}
// update all Depts with new deptname value
public void updateDepts() throws Exception {
Session session = Session.getSession();
for (int id = 1; id <= 10000; id++) {
Dept dept = (Dept) session.find(Dept.class, id);
dept.setDeptName("Outdoor staff");
session.persist(dept);
}
session.commit();
}
// direct access to storage used poll() method
public void pollDepts() throws Exception {
Session session = Session.getSession();
session.startStatement();
Table t = Instance.getInstance().getTableByName(Dept.class.getName());
CDR o = (CDR) t.poll(s);
while (o != null) {
System.out.println(o.getDeptId() + " : " + o.getDeptName());
}
// see manual for more understanding with table direct access
session.closeQueue();
}
Use the following startup parameters to run this project (configure app-log-config.xml as your wish):