NodeJS 正在读取指定区块大小的文件的节点

0g0grzrc  于 2022-11-22  发布在  Node.js
关注(0)|答案(4)|浏览(167)

目标:将大文件上传到AWS Glacier,而无需将整个文件保存在内存中。
我现在正在使用fs.readFileSync()上传到glacier,一切正常。但是,我需要处理大于4GB的文件,我想并行上传多个块。这意味着要进行多部分上传。我可以选择块的大小,但是glacier需要每个块的大小相同(除了最后一个)
这个线程建议我可以在读取流上设置块大小,但实际上并不保证我能得到它。
有没有关于如何在不将整个文件读入内存并手动拆分的情况下获得一致的部分的信息?
假设我能做到这一点,我只是打算使用集群,其中有几个进程以上传到AWS的速度完成流。如果这似乎是并行工作的错误方式,我希望有一些建议。

zqry0prt

zqry0prt1#

如果没有其他问题,您可以手动使用fs.open()fs.read()fs.close()。示例:

var CHUNK_SIZE = 10 * 1024 * 1024, // 10MB
    buffer = Buffer.alloc(CHUNK_SIZE),
    filePath = '/tmp/foo';

fs.open(filePath, 'r', function(err, fd) {
  if (err) throw err;
  function readNextChunk() {
    fs.read(fd, buffer, 0, CHUNK_SIZE, null, function(err, nread) {
      if (err) throw err;

      if (nread === 0) {
        // done reading file, do any necessary finalization steps

        fs.close(fd, function(err) {
          if (err) throw err;
        });
        return;
      }

      var data;
      if (nread < CHUNK_SIZE)
        data = buffer.slice(0, nread);
      else
        data = buffer;

      // do something with `data`, then call `readNextChunk();`
    });
  }
  readNextChunk();
});
qrjkbowd

qrjkbowd2#

您可以考虑使用下面的代码片段,其中我们以1024字节的块读取文件

var fs = require('fs');

var data = '';

var readStream = fs.createReadStream('/tmp/foo.txt',{ highWaterMark: 1 * 1024, encoding: 'utf8' });

readStream.on('data', function(chunk) {
    data += chunk;
    console.log('chunk Data : ')
    console.log(chunk);// your processing chunk logic will go here

}).on('end', function() {
    console.log('###################');
    console.log(data); 
// here you see all data processed at end of file
    });

请注意:highWaterMark是用于块大小的参数希望这能有所帮助!
Web参考:https://stackabuse.com/read-files-with-node-js/Changing readstream chunksize显示器

ffscu2ro

ffscu2ro3#

基于mscdex's answer,这里有一个模块,它使用sync替代方法,并使用StringDecoder正确解析UTF-8
readableStream的问题在于,为了使用它,你必须将整个项目转换为使用异步发射器和回调。如果你在编写一些简单的代码,比如nodejs中的一个小CLI,这就没有意义了。

//usage
let file = new UTF8FileReader()
file.open('./myfile.txt', 1024) 
while ( file.isOpen ) {
    let stringData=file.readChunk()
    console.log(stringData)
}

//--------------------
// UTF8FileReader.ts
//--------------------
import * as fs from 'fs';
import { StringDecoder, NodeStringDecoder } from "string_decoder";

export class UTF8FileReader {

    filename: string;
    isOpen: boolean = false;
    private chunkSize: number;
    private fd: number; //file handle from fs.OpenFileSync
    private readFilePos: number;
    private readBuffer: Buffer;

    private utf8decoder: NodeStringDecoder

    /**
     * open the file | throw
     * @param filename
     */
    open(filename, chunkSize: number = 16 * 1024) {

        this.chunkSize = chunkSize;

        try {
            this.fd = fs.openSync(filename, 'r');
        }
        catch (e) {
            throw new Error("opening " + filename + ", error:" + e.toString());
        }

        this.filename = filename;
        this.isOpen = true;

        this.readBuffer = Buffer.alloc(this.chunkSize);
        this.readFilePos = 0;

        //a StringDecoder is a buffered object that ensures complete UTF-8 multibyte decoding from a byte buffer
        this.utf8decoder = new StringDecoder('utf8')

    }

    /**
     * read another chunk from the file 
     * return the decoded UTF8 into a string
     * (or throw)
     * */
    readChunk(): string {

        let decodedString = '' //return '' by default

        if (!this.isOpen) {
            return decodedString;
        }

        let readByteCount: number;
        try {
            readByteCount = fs.readSync(this.fd, this.readBuffer, 0, this.chunkSize, this.readFilePos);
        }
        catch (e) {
            throw new Error("reading " + this.filename + ", error:" + e.toString());
        }

        if (readByteCount) {
            //some data read, advance readFilePos 
            this.readFilePos += readByteCount;
            //get only the read bytes (if we reached the end of the file)
            const onlyReadBytesBuf = this.readBuffer.slice(0, readByteCount);
            //correctly decode as utf8, and store in decodedString
            //yes, the api is called "write", but it decodes a string - it's a write-decode-and-return the string kind-of-thing :)
            decodedString = this.utf8decoder.write(onlyReadBytesBuf); 
        }
        else {
            //read returns 0 => all bytes read
            this.close();
        }
        return decodedString 
    }

    close() {
        if (!this.isOpen) {
            return;
        }
        fs.closeSync(this.fd);
        this.isOpen = false;
        this.utf8decoder.end();
    }

}

如果您还没有打印脚本,请使用以下.js转换代码:

// UTF8FileReader.js
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.UTF8FileReader = void 0;
//--------------------
// UTF8FileReader
//--------------------
const fs = require("fs");
const string_decoder_1 = require("string_decoder");
class UTF8FileReader {
    constructor() {
        this.isOpen = false;
    }
    /**
     * open the file | throw
     * @param filename
     */
    open(filename, chunkSize = 16 * 1024) {
        this.chunkSize = chunkSize;
        try {
            this.fd = fs.openSync(filename, 'r');
        }
        catch (e) {
            throw new Error("opening " + filename + ", error:" + e.toString());
        }
        this.filename = filename;
        this.isOpen = true;
        this.readBuffer = Buffer.alloc(this.chunkSize);
        this.readFilePos = 0;
        //a StringDecoder is a buffered object that ensures complete UTF-8 multibyte decoding from a byte buffer
        this.utf8decoder = new string_decoder_1.StringDecoder('utf8');
    }
    /**
     * read another chunk from the file
     * return the decoded UTF8 into a string
     * (or throw)
     * */
    readChunk() {
        let decodedString = ''; //return '' by default
        if (!this.isOpen) {
            return decodedString;
        }
        let readByteCount;
        try {
            readByteCount = fs.readSync(this.fd, this.readBuffer, 0, this.chunkSize, this.readFilePos);
        }
        catch (e) {
            throw new Error("reading " + this.filename + ", error:" + e.toString());
        }
        if (readByteCount) {
            //some data read, advance readFilePos 
            this.readFilePos += readByteCount;
            //get only the read bytes (if we reached the end of the file)
            const onlyReadBytesBuf = this.readBuffer.slice(0, readByteCount);
            //correctly decode as utf8, and store in decodedString
            //yes, the api is called "write", but it decodes a string - it's a write-decode-and-return the string kind-of-thing :)
            decodedString = this.utf8decoder.write(onlyReadBytesBuf);
        }
        else {
            //read returns 0 => all bytes read
            this.close();
        }
        return decodedString;
    }
    close() {
        if (!this.isOpen) {
            return;
        }
        fs.closeSync(this.fd);
        this.isOpen = false;
        this.utf8decoder.end();
    }
}
exports.UTF8FileReader = UTF8FileReader;
3wabscal

3wabscal4#

我会建议像this这样的东西,因为buffer.slice已经过时了,对我来说,它在阅读大的pdf文件时有问题。

import {promisify} from 'node:util';
import fs from 'node:fs';
import {Buffer} from 'node:buffer';
import pify from 'pify';

const fsReadP = pify(fs.read, {multiArgs: true});
const fsOpenP = promisify(fs.open);
const fsCloseP = promisify(fs.close);

export async function readChunk(filePath, {length, startPosition}) {
    const fileDescriptor = await fsOpenP(filePath, 'r');

    try {
        let [bytesRead, buffer] = await fsReadP(fileDescriptor, {
            buffer: Buffer.alloc(length),
            length,
            position: startPosition,
        });

        if (bytesRead < length) {
            buffer = buffer.subarray(0, bytesRead);
        }

        return buffer;
    } finally {
        await fsCloseP(fileDescriptor);
    }
}

export function readChunkSync(filePath, {length, startPosition}) {
    let buffer = Buffer.alloc(length);
    const fileDescriptor = fs.openSync(filePath, 'r');

    try {
        const bytesRead = fs.readSync(fileDescriptor, buffer, {
            length,
            position: startPosition,
        });

        if (bytesRead < length) {
            buffer = buffer.subarray(0, bytesRead);
        }

        return buffer;
    } finally {
        fs.closeSync(fileDescriptor);
    }
}

相关问题