我准备了一个kafka制作人,将List放入kafka主题 . 它适用于100万行/记录 . 我有的 生产环境 文件包含110万条记录 . What is the best way to deal with such huge data at my KafkaProducer?

下面是代码,我习惯处理1百万条记录,大约需要4分钟才能将相同的内容写入kafka主题 .

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.RandomAccessFile;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;

import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;
import org.apache.kafka.connect.source.SourceTask;

public class KafkaSourceTask extends SourceTask {

    private String filename;

    private String topic;

    private RandomAccessFile raf;

    private long lastRecordedOffset = 0L;

    private BufferedReader bufferedReader = null;

    Schema schema = SchemaBuilder.struct().field("emp_id", 
            Schema.STRING_SCHEMA).field("name", Schema.STRING_SCHEMA)
            .field("last_name", Schema.STRING_SCHEMA).field("department", 
            Schema.STRING_SCHEMA).build();

public void start(Map<String, String> props) {
    filename = props.get("file");
    topic = props.get("topic");

}

@Override
public List<SourceRecord> poll() throws InterruptedException {
    double startTime = System.nanoTime();
    try {
        bufferedReader = new BufferedReader(new InputStreamReader(new FileInputStream(new File(filename)),
                StandardCharsets.UTF_8));
        raf = new RandomAccessFile(filename, "r");
        long filePointer = raf.getFilePointer();
        System.out.println(filePointer + " - " + lastRecordedOffset);
        if (bufferedReader.ready() && (filePointer > lastRecordedOffset || filePointer == 0)) {
            raf.seek(lastRecordedOffset);

            ArrayList<SourceRecord> records = new ArrayList<>();
            String line;
            while ((line = raf.readLine()) != null) {
                records.add(new SourceRecord(null, null, topic, schema, buildRecordValue(line)));
            }
            lastRecordedOffset = raf.getFilePointer();
            raf.close();
            bufferedReader.close();

            double endTime = System.nanoTime();
            return records;
        }
    }
    catch (IOException e) {

        e.printStackTrace();
    }

    return null;
}

@Override
public synchronized void stop() {
    try {
        raf.close();
    }
    catch (IOException e) {
        e.printStackTrace();
    }
}

private Struct buildRecordValue(String line) {
    String[] values = line.split(",");
    Struct value = new Struct(schema).put("emp_id", values[0]).put("name", values[1]).put("last_name", values[2])
            .put("department", values[3]);
    return value;
}

@Override
public String version() {
    // TODO Auto-generated method stub
    return null;
}
}

对此有任何帮助或建议将不胜感激 . 谢谢你提前 .