I have a Lambda function which imports specific CSV file from S3 to MySQL. However, the file size of CSV is around 1 GB. When I run this code, it doesn't process and time out.
//s3 to rds
const fs = require("fs");
const AWS = require('aws-sdk');
var mysql = require('mysql');
var config = require('./config.json');
const s3 = new AWS.S3({
accessKeyId: 'XXXXXXXXXXXXXXX',
secretAccessKey: 'XXXXXXXXXXXXXXXXXXXXXXXXXXxx'
});
var filePath = `localfilepath`;
var pool = mysql.createPool({
host: config.dbhost,
user: config.dbuser,
password: config.dbpassword,
database: config.dbname
});
pool.getConnection((err, connection) => {
if (err) throw err;
console.log("Connected!" + connection);
var s3Params = {
Bucket: '<your_bucket_name>',
Key: '<your_key>'
};
s3.getObject(s3Params, function(err, result) {
if (err) {
throw new Error(err);
} else {
console.log('file stored successfully', result);
fs.createWriteStream(filePath).write(result.Body);
connection.query('TRUNCATE TABLE <table_name>', (err, result) => {
if (err) {
throw new Error(err);
} else {
console.log('table truncated');
var query = `LOAD DATA LOCAL INFILE '<file_name>' INTO table <table_name> FIELDS TERMINATED BY ',' ENCLOSED BY '"' IGNORE 1 LINES `;
connection.query(query, function(err, result) {
if (err) throw err;
console.log("Result: " + result);
connection.release();
fs.unlinkSync(filePath);
console.log('file deleted');
});
}
});
}
});
})
How can I make this working?