List<ConsultationTrtcAudit> tracAuditList = new ArrayList<>();
int start = 0;
int end = 0;
int count = consultationNos.size();
// 分批批量查询
while (end < count) {
end = end + BATCH_QUERY;
end = Math.min(end, count);
List<String> consultationNoBatch = consultationNos.subList(start, end);
ConsultationMarkingRecordDto dto = new ConsultationMarkingRecordDto();
tracAuditList.addAll(consultatioTrtcAuditMapper.listByConsultationNos(dto));
start = start + BATCH_QUERY;
}
针对该问题,可以尝试以下优化方法:
示例代码:
int pageSize = 1000;// 每次查询1000条数据 int offset = 0;// 数据偏移量 List dataList = new ArrayList(); while(true){ List pageList = dao.getDataList(offset, pageSize);// 数据库分页查询 if(pageList.isEmpty()){ break; } dataList.addAll(pageList); offset += pageSize; }
示例代码:
int threadCount = 10;// 创建10个线程进行处理 ExecutorService executor = Executors.newFixedThreadPool(threadCount); List dataList = dao.getDataList();// 获取所有数据 List>> futureList = new ArrayList>>(); int pageSize = dataList.size() / threadCount + 1;// 每个线程处理数据的数量 for(int i = 0; i < threadCount; i++){ int fromIndex = i * pageSize; int toIndex = Math.min((i+1) * pageSize, dataList.size()); List subList = dataList.subList(fromIndex, toIndex); Future> future = executor.submit(new DataProcessor(subList));// 将数据分配给线程池中的线程进行并行处理 futureList.add(future); } List resultList = new ArrayList(); for(Future> future : futureList){ resultList.addAll(future.get());// 等待所有线程处理完毕并将结果进行汇总 } executor.shutdown();// 关闭线程池
示例代码:
List dataList = dao.getDataList(); int sum = 0; List resultList = new ArrayList(); for(Data data : dataList){ int value = processData(data);// 对数据进行处理 sum += data.getValue();// 计算总和 resultList.add(new Result(data.getId(), value)); } Result summary = new Result(0, sum);// 生成总和结果