Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions benchmark/fs/bench-mkdirpSync.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
'use strict';

// Benchmarks MKDirpSync (fs.mkdirSync with recursive: true), which iterates
// over a continuation_data path queue. Varying depth exercises the inner loop
// more, making the continuation_data pointer cache more impactful.

const common = require('../common');
const fs = require('fs');
const path = require('path');
const tmpdir = require('../../test/common/tmpdir');
tmpdir.refresh();

const bench = common.createBenchmark(main, {
n: [1e3],
depth: [4, 8, 16],
});

let dirc = 0;

function main({ n, depth }) {
bench.start();
for (let i = 0; i < n; i++) {
const parts = Array.from({ length: depth }, () => String(++dirc));
fs.mkdirSync(path.join(tmpdir.path, ...parts), { recursive: true });
}
bench.end(n);
}
43 changes: 43 additions & 0 deletions benchmark/streams/writable-writev-string.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
'use strict';

const common = require('../common.js');
const { Writable } = require('stream');

// Benchmarks StreamBase::Writev with string chunks, exercising the chunk
// cache that avoids redundant V8 array accesses, ToString, and ParseEncoding
// calls between the sizing pass and the write pass.
const bench = common.createBenchmark(main, {
n: [1e4],
chunks: [4, 16, 64],
encoding: ['utf8', 'latin1'],
type: ['string', 'buffer', 'mixed'],
});

function main({ n, chunks, encoding, type }) {
const str = 'Hello, benchmark! '.repeat(4);
const buf = Buffer.from(str, encoding);

const wr = new Writable({
writev(chunks, cb) { cb(); },
write(chunk, enc, cb) { cb(); },
});

bench.start();
for (let i = 0; i < n; i++) {
wr.cork();
for (let j = 0; j < chunks; j++) {
if (type === 'buffer') {
wr.write(buf);
} else if (type === 'string') {
wr.write(str, encoding);
} else if (j % 2 === 0) {
// Alternate buffer and string to hit the mixed (non-all_buffers) path.
wr.write(buf);
} else {
wr.write(str, encoding);
}
}
wr.uncork();
}
bench.end(n);
}
18 changes: 9 additions & 9 deletions src/node_file.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1866,16 +1866,17 @@ int MKDirpSync(uv_loop_t* loop,
req_wrap->continuation_data()->PushPath(std::move(path));
}

while (req_wrap->continuation_data()->paths().size() > 0) {
std::string next_path = req_wrap->continuation_data()->PopPath();
FSContinuationData* cont_data = req_wrap->continuation_data();
while (cont_data->paths().size() > 0) {
std::string next_path = cont_data->PopPath();
int err = uv_fs_mkdir(loop, req, next_path.c_str(), mode, nullptr);
while (true) {
switch (err) {
// Note: uv_fs_req_cleanup in terminal paths will be called by
// ~FSReqWrapSync():
case 0:
req_wrap->continuation_data()->MaybeSetFirstPath(next_path);
if (req_wrap->continuation_data()->paths().empty()) {
cont_data->MaybeSetFirstPath(next_path);
if (cont_data->paths().empty()) {
return 0;
}
break;
Expand All @@ -1889,9 +1890,9 @@ int MKDirpSync(uv_loop_t* loop,
std::string dirname =
next_path.substr(0, next_path.find_last_of(kPathSeparator));
if (dirname != next_path) {
req_wrap->continuation_data()->PushPath(std::move(next_path));
req_wrap->continuation_data()->PushPath(std::move(dirname));
} else if (req_wrap->continuation_data()->paths().empty()) {
cont_data->PushPath(std::move(next_path));
cont_data->PushPath(std::move(dirname));
} else if (cont_data->paths().empty()) {
err = UV_EEXIST;
continue;
}
Expand All @@ -1903,8 +1904,7 @@ int MKDirpSync(uv_loop_t* loop,
err = uv_fs_stat(loop, req, next_path.c_str(), nullptr);
if (err == 0 && !S_ISDIR(req->statbuf.st_mode)) {
uv_fs_req_cleanup(req);
if (orig_err == UV_EEXIST &&
req_wrap->continuation_data()->paths().size() > 0) {
if (orig_err == UV_EEXIST && cont_data->paths().size() > 0) {
return UV_ENOTDIR;
}
return UV_EEXIST;
Expand Down
80 changes: 43 additions & 37 deletions src/stream_base.cc
Original file line number Diff line number Diff line change
Expand Up @@ -201,12 +201,24 @@ int StreamBase::Writev(const FunctionCallbackInfo<Value>& args) {
size_t offset;

if (!all_buffers) {
// Cache per-chunk data from the first pass so the second pass avoids
// redundant V8 array accesses, ToString conversions, and ParseEncoding
// calls. Local<> handles remain valid for the duration of this scope.
struct CachedChunk {
Local<Value> value;
Local<String> string; // empty for Buffer chunks
enum encoding enc;
};
MaybeStackBuffer<CachedChunk, 16> chunk_cache(count);

// Determine storage size first
for (size_t i = 0; i < count; i++) {
Local<Value> chunk;
if (!chunks->Get(context, i * 2).ToLocal(&chunk))
return -1;

chunk_cache[i].value = chunk;

if (Buffer::HasInstance(chunk))
continue;
// Buffer chunk, no additional storage required
Expand All @@ -219,6 +231,8 @@ int StreamBase::Writev(const FunctionCallbackInfo<Value>& args) {
if (!chunks->Get(context, i * 2 + 1).ToLocal(&next_chunk))
return -1;
enum encoding encoding = ParseEncoding(isolate, next_chunk);
chunk_cache[i].string = string;
chunk_cache[i].enc = encoding;
size_t chunk_size;
if ((encoding == UTF8 &&
string->Length() > 65535 &&
Expand All @@ -230,35 +244,23 @@ int StreamBase::Writev(const FunctionCallbackInfo<Value>& args) {
storage_size += chunk_size;
}

if (storage_size > INT_MAX)
return UV_ENOBUFS;
} else {
for (size_t i = 0; i < count; i++) {
Local<Value> chunk;
if (!chunks->Get(context, i).ToLocal(&chunk))
return -1;
bufs[i].base = Buffer::Data(chunk);
bufs[i].len = Buffer::Length(chunk);
}
}
if (storage_size > INT_MAX) return UV_ENOBUFS;

std::unique_ptr<BackingStore> bs;
if (storage_size > 0) {
bs = ArrayBuffer::NewBackingStore(
isolate, storage_size, BackingStoreInitializationMode::kUninitialized);
}
std::unique_ptr<BackingStore> bs;
if (storage_size > 0) {
bs = ArrayBuffer::NewBackingStore(
isolate,
storage_size,
BackingStoreInitializationMode::kUninitialized);
}

offset = 0;
if (!all_buffers) {
offset = 0;
for (size_t i = 0; i < count; i++) {
Local<Value> chunk;
if (!chunks->Get(context, i * 2).ToLocal(&chunk))
return -1;

// Write buffer
if (Buffer::HasInstance(chunk)) {
bufs[i].base = Buffer::Data(chunk);
bufs[i].len = Buffer::Length(chunk);
// string.IsEmpty() signals a Buffer chunk; enc is uninitialised in
// that case so we must not read it.
if (chunk_cache[i].string.IsEmpty()) {
bufs[i].base = Buffer::Data(chunk_cache[i].value);
bufs[i].len = Buffer::Length(chunk_cache[i].value);
continue;
}

Expand All @@ -268,28 +270,32 @@ int StreamBase::Writev(const FunctionCallbackInfo<Value>& args) {
static_cast<char*>(bs ? bs->Data() : nullptr) + offset;
size_t str_size = (bs ? bs->ByteLength() : 0) - offset;

Local<String> string;
if (!chunk->ToString(context).ToLocal(&string))
return -1;
Local<Value> next_chunk;
if (!chunks->Get(context, i * 2 + 1).ToLocal(&next_chunk))
return -1;
enum encoding encoding = ParseEncoding(isolate, next_chunk);
str_size = StringBytes::Write(isolate,
str_storage,
str_size,
string,
encoding);
chunk_cache[i].string,
chunk_cache[i].enc);
bufs[i].base = str_storage;
bufs[i].len = str_size;
offset += str_size;
}

StreamWriteResult res = Write(*bufs, count, nullptr, req_wrap_obj);
SetWriteResult(res);
if (res.wrap != nullptr && storage_size > 0)
res.wrap->SetBackingStore(std::move(bs));
return res.err;
} else {
for (size_t i = 0; i < count; i++) {
Local<Value> chunk;
if (!chunks->Get(context, i).ToLocal(&chunk)) return -1;
bufs[i].base = Buffer::Data(chunk);
bufs[i].len = Buffer::Length(chunk);
}
}

StreamWriteResult res = Write(*bufs, count, nullptr, req_wrap_obj);
SetWriteResult(res);
if (res.wrap != nullptr && storage_size > 0)
res.wrap->SetBackingStore(std::move(bs));
return res.err;
}

Expand Down
105 changes: 105 additions & 0 deletions test/parallel/test-fs-mkdir-recursive-deep.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
'use strict';

// Tests correctness of fs.mkdirSync with recursive:true for deeply nested
// paths. MKDirpSync caches the continuation_data pointer in a local variable
// to avoid repeated virtual dispatch; these tests verify the optimization does
// not alter observable behaviour across various path depths.

const common = require('../common');
const assert = require('assert');
const fs = require('fs');
const path = require('path');

const tmpdir = require('../common/tmpdir');
tmpdir.refresh();

let dirc = 0;
function nextdir() { return String(++dirc); }

// Deep path creation: all segments new, verify return value is the first
// created segment and all intermediate directories exist.
{
const depth = 8;
const segments = Array.from({ length: depth }, nextdir);
const pathname = tmpdir.resolve(...segments);
const firstCreated = tmpdir.resolve(segments[0]);

const result = fs.mkdirSync(pathname, { recursive: true });

assert.strictEqual(result, path.toNamespacedPath(firstCreated));
assert.ok(fs.existsSync(pathname));
assert.ok(fs.statSync(pathname).isDirectory());

// Every intermediate directory must exist.
for (let i = 1; i < depth; i++) {
const intermediate = tmpdir.resolve(...segments.slice(0, i + 1));
assert.ok(fs.statSync(intermediate).isDirectory(),
`intermediate path missing: ${intermediate}`);
}
}

// Depth 16 — forces more iterations through the continuation_data loop.
{
const depth = 16;
const segments = Array.from({ length: depth }, nextdir);
const pathname = tmpdir.resolve(...segments);

fs.mkdirSync(pathname, { recursive: true });

assert.ok(fs.existsSync(pathname));
assert.ok(fs.statSync(pathname).isDirectory());
}

// Idempotent: calling mkdirSync twice on an existing deep path must not throw
// and must return undefined (no new directories were created).
{
const segments = Array.from({ length: 8 }, nextdir);
const pathname = tmpdir.resolve(...segments);

fs.mkdirSync(pathname, { recursive: true });
const result = fs.mkdirSync(pathname, { recursive: true });

assert.strictEqual(result, undefined);
assert.ok(fs.existsSync(pathname));
}

// Partial creation: first N segments already exist — return value is the first
// newly created segment.
{
const segments = Array.from({ length: 6 }, nextdir);
const existingBase = tmpdir.resolve(...segments.slice(0, 3));
fs.mkdirSync(existingBase, { recursive: true });

const pathname = tmpdir.resolve(...segments);
const firstNew = tmpdir.resolve(...segments.slice(0, 4));

const result = fs.mkdirSync(pathname, { recursive: true });

assert.strictEqual(result, path.toNamespacedPath(firstNew));
assert.ok(fs.existsSync(pathname));
assert.ok(fs.statSync(pathname).isDirectory());
}

// Path with ".." components: mkdirSync must still create the correct directory.
{
const a = nextdir();
const b = nextdir();
const c = nextdir();
const pathname = `${tmpdir.path}/${a}/../${b}/${c}`;
fs.mkdirSync(pathname, { recursive: true });
assert.ok(fs.existsSync(pathname));
assert.ok(fs.statSync(pathname).isDirectory());
}

// Async counterpart: deep path via fs.mkdir callback.
{
const segments = Array.from({ length: 8 }, nextdir);
const pathname = tmpdir.resolve(...segments);
const firstCreated = tmpdir.resolve(segments[0]);

fs.mkdir(pathname, { recursive: true }, common.mustSucceed((result) => {
assert.strictEqual(result, path.toNamespacedPath(firstCreated));
assert.ok(fs.existsSync(pathname));
assert.ok(fs.statSync(pathname).isDirectory());
}));
}
Loading
Loading