首 先在代碼與生產庫間建立一個Connection,將讀取到的數據放在ResultSet對象,然后再與開發庫建立一個Connection。從 ResultSet取出數據后通過TestConnection插入到開發庫,以此來實現Copy。代碼寫完后運行程序,速度太慢了,一秒鐘只能Copy 一千條數據,生產庫上有上億條數據,按照這個速度同步完要到猴年馬月呀,用PreparedStatement批處理速度也沒有提交多少。我想能不能用多 線程處理,多個人干活總比一個人干活速度要快。
假設生產庫有1萬條數據,我開5個線程,每個線程分2000條數據,同時向開發庫里插數據,Oracle支持高并發這樣的話速度至少會提高好多倍,按照這 個思路重新進行了編碼,批處理設置為1萬條一提交,統計插入數量的變量使用 java.util.concurrent.atomic.AtomicLong,程序一運行,傳輸速度飛快CPU利用率在70%~90%,現在一秒鐘可 以拷貝50萬條記錄,沒過幾分鐘上億條數據一條不落地全部Copy到目標庫。
在查詢的時候我用了如下語句
- String queryStr = "SELECT * FROM xx";
- ResultSet coreRs = PreparedStatement.executeQuery(queryStr);
String queryStr = "SELECT * FROM xx";
ResultSet coreRs = PreparedStatement.executeQuery(queryStr);
實習生問如果xx表里有上千萬條記錄,你全部查詢出來放到ResultSet, 那內存不溢出了么?Java在設計的時候已經考慮到這個問題了,并沒有查詢出所有的數據,而是只查詢了一部分數據放到ResultSet,數據“用完”它 會自動查詢下一批數據,你可以用setFetchSize(int rows)方法設置一個建議值給ResultSet,告訴它每次從數據庫Fetch多少條數據。但我不贊成,因為JDBC驅動會根據實際情況自動調整 Fetch的數量。另外性能也與網線的帶寬有直接的關系。
相關代碼
1 package com.dlbank.domain;
2
3 import java.sql.Connection;
4 import java.sql.PreparedStatement;
5 import java.sql.ResultSet;
6 import java.sql.Statement;
7 import java.util.List;
8 import java.util.concurrent.atomic.AtomicLong;
9
10 import org.apache.log4j.Logger;
11
12 /**
13 *<p>title: 數據同步類 </p>
14 *<p>Description: 該類用于將生產核心庫數據同步到開發庫</p>
15 *@author Tank Zhang
16 */
17 public class CoreDataSyncImpl implements CoreDataSync {
18
19 private List<String> coreTBNames; //要同步的核心庫表名
20 private ConnectionFactory connectionFactory;
21 private Logger log = Logger.getLogger(getClass());
22
23 private AtomicLong currentSynCount = new AtomicLong(0L); //當前已同步的條數
24
25 private int syncThreadNum; //同步的線程數
26
27 @Override
28 public void syncData(int businessType) throws Exception {
29
30 for (String tmpTBName : coreTBNames) {
31 log.info("開始同步核心庫" + tmpTBName + "表數據");
32 // 獲得核心庫連接
33 Connection coreConnection = connectionFactory.getDMSConnection(4);
34 Statement coreStmt = coreConnection.createStatement();
35 //為每個線程分配結果集
36 ResultSet coreRs = coreStmt.executeQuery("SELECT count(*) FROM "+tmpTBName);
37 coreRs.next();
38 //總共處理的數量
39 long totalNum = coreRs.getLong(1);
40 //每個線程處理的數量
41 long ownerRecordNum =(long) Math.ceil((totalNum / syncThreadNum));
42 log.info("共需要同步的數據量:"+totalNum);
43 log.info("同步線程數量:"+syncThreadNum);
44 log.info("每個線程可處理的數量:"+ownerRecordNum);
45 // 開啟五個線程向目標庫同步數據
46 for(int i=0; i < syncThreadNum; i ++){
47 StringBuilder sqlBuilder = new StringBuilder();
48 //拼裝后SQL示例
49 //Select * From dms_core_ds Where id between 1 And 657398
50 //Select * From dms_core_ds Where id between 657399 And 1314796
51 //Select * From dms_core_ds Where id between 1314797 And 1972194
52 //Select * From dms_core_ds Where id between 1972195 And 2629592
53 //Select * From dms_core_ds Where id between 2629593 And 3286990
54 //..
55 sqlBuilder.append("Select * From ").append(tmpTBName)
56 .append(" Where id between " ).append(i * ownerRecordNum +1)
57 .append( " And ")
58 .append((i * ownerRecordNum + ownerRecordNum));
59 Thread workThread = new Thread(
60 new WorkerHandler(sqlBuilder.toString(),businessType,tmpTBName));
61 workThread.setName("SyncThread-"+i);
62 workThread.start();
63 }
64 while (currentSynCount.get() < totalNum);
65 //休眠一會兒讓數據庫有機會commit剩余的批處理(只針對JUnit單元測試,因為單元測試完成后會關閉虛擬器,使線程里的代碼沒有機會作提交操作);
66 //Thread.sleep(1000 * 3);
67 log.info( "核心庫"+tmpTBName+"表數據同步完成,共同步了" + currentSynCount.get() + "條數據");
68 }
69 }// end for loop
70
71 public void setCoreTBNames(List<String> coreTBNames) {
72 this.coreTBNames = coreTBNames;
73 }
74
75 public void setConnectionFactory(ConnectionFactory connectionFactory) {
76 this.connectionFactory = connectionFactory;
77 }
78
79 public void setSyncThreadNum(int syncThreadNum) {
80 this.syncThreadNum = syncThreadNum;
81 }
82
83 //數據同步線程
84 final class WorkerHandler implements Runnable {
85 ResultSet coreRs;
86 String queryStr;
87 int businessType;
88 String targetTBName;
89 public WorkerHandler(String queryStr,int businessType,String targetTBName) {
90 this.queryStr = queryStr;
91 this.businessType = businessType;
92 this.targetTBName = targetTBName;
93 }
94 @Override
95 public void run() {
96 try {
97 //開始同步
98 launchSyncData();
99 } catch(Exception e){
100 log.error(e);
101 e.printStackTrace();
102 }
103 }
104 //同步數據方法
105 void launchSyncData() throws Exception{
106 // 獲得核心庫連接
107 Connection coreConnection = connectionFactory.getDMSConnection(4);
108 Statement coreStmt = coreConnection.createStatement();
109 // 獲得目標庫連接
110 Connection targetConn = connectionFactory.getDMSConnection(businessType);
111 targetConn.setAutoCommit(false);// 設置手動提交
112 PreparedStatement targetPstmt = targetConn.prepareStatement("INSERT INTO " + targetTBName+" VALUES (?,?,?,?,?)");
113 ResultSet coreRs = coreStmt.executeQuery(queryStr);
114 log.info(Thread.currentThread().getName()+"'s Query SQL::"+queryStr);
115 int batchCounter = 0; //累加的批處理數量
116 while (coreRs.next()) {
117 targetPstmt.setString(1, coreRs.getString(2));
118 targetPstmt.setString(2, coreRs.getString(3));
119 targetPstmt.setString(3, coreRs.getString(4));
120 targetPstmt.setString(4, coreRs.getString(5));
121 targetPstmt.setString(5, coreRs.getString(6));
122 targetPstmt.addBatch();
123 batchCounter++;
124 currentSynCount.incrementAndGet();//遞增
125 if (batchCounter % 10000 == 0) { //1萬條數據一提交
126 targetPstmt.executeBatch();
127 targetPstmt.clearBatch();
128 targetConn.commit();
129 }
130 }
131 //提交剩余的批處理
132 targetPstmt.executeBatch();
133 targetPstmt.clearBatch();
134 targetConn.commit();
135 //釋放連接
136 connectionFactory.release(targetConn, targetPstmt,coreRs);
137 }
138 }
139 }