summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYorhel <git@yorhel.nl>2021-05-29 19:22:00 +0200
committerYorhel <git@yorhel.nl>2021-05-29 19:22:00 +0200
commit59ef5fd27b4b1613fa10877f8827096c746535bb (patch)
treeae053ec5366dc3ce6b9a0cdf02a6dcb883f622b9
parent23903088833447bbe558ad1c93abe47505635383 (diff)
Improved error reporting + minor cleanup
-rw-r--r--src/main.zig15
-rw-r--r--src/model.zig92
-rw-r--r--src/scan.zig134
-rw-r--r--src/ui.zig20
4 files changed, 149 insertions, 112 deletions
diff --git a/src/main.zig b/src/main.zig
index 195e3d9..649dc8e 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -184,8 +184,7 @@ fn readExcludeFile(path: []const u8) !void {
}
}
-// TODO: Better error reporting
-pub fn main() !void {
+pub fn main() void {
// Grab thousands_sep from the current C locale.
_ = c.setlocale(c.LC_ALL, "");
if (c.localeconv()) |locale| {
@@ -228,7 +227,7 @@ pub fn main() !void {
else if(opt.is("--exclude")) config.exclude_patterns.append(args.arg()) catch unreachable
else if(opt.is("-X") or opt.is("--exclude-from")) {
const arg = args.arg();
- readExcludeFile(arg) catch |e| ui.die("Error reading excludes from {s}: {}.\n", .{ arg, e });
+ readExcludeFile(arg) catch |e| ui.die("Error reading excludes from {s}: {s}.\n", .{ arg, ui.errorString(e) });
} else if(opt.is("--exclude-caches")) config.exclude_caches = true
else if(opt.is("--exclude-kernfs")) config.exclude_kernfs = true
else if(opt.is("--confirm-quit")) config.confirm_quit = true
@@ -255,17 +254,19 @@ pub fn main() !void {
ui.die("Standard input is not a TTY. Did you mean to import a file using '-f -'?\n", .{});
config.nc_tty = !in_tty or (if (export_file) |f| std.mem.eql(u8, f, "-") else false);
- event_delay_timer = try std.time.Timer.start();
+ event_delay_timer = std.time.Timer.start() catch unreachable;
defer ui.deinit();
state = .scan;
var out_file = if (export_file) |f| (
if (std.mem.eql(u8, f, "-")) std.io.getStdOut()
- else try std.fs.cwd().createFileZ(f, .{})
+ else std.fs.cwd().createFileZ(f, .{})
+ catch |e| ui.die("Error opening export file: {s}.\n", .{ui.errorString(e)})
) else null;
- try if (import_file) |f| scan.importRoot(f, out_file)
- else scan.scanRoot(scan_dir orelse ".", out_file);
+ if (import_file) |f| scan.importRoot(f, out_file)
+ else scan.scanRoot(scan_dir orelse ".", out_file)
+ catch |e| ui.die("Error opening directory: {s}.\n", .{ui.errorString(e)});
if (out_file != null) return;
config.scan_ui = .full; // in case we're refreshing from the UI, always in full mode.
diff --git a/src/model.zig b/src/model.zig
index 842e220..deef87d 100644
--- a/src/model.zig
+++ b/src/model.zig
@@ -126,8 +126,8 @@ pub const Entry = packed struct {
add_total = new_hl;
} else if (self.link()) |l| {
- const n = HardlinkNode{ .ino = l.ino, .dir = p, .num_files = 1 };
- var d = devices.items[dev].hardlinks.getOrPut(n) catch unreachable;
+ const n = devices.HardlinkNode{ .ino = l.ino, .dir = p, .num_files = 1 };
+ var d = devices.list.items[dev].hardlinks.getOrPut(n) catch unreachable;
new_hl = !d.found_existing;
if (d.found_existing) d.entry.key.num_files += 1;
// First time we encounter this file in this dir, count it.
@@ -167,7 +167,7 @@ pub const Dir = packed struct {
shared_size: u64,
items: u32,
- // Indexes into the global 'devices' array
+ // Indexes into the global 'devices.list' array
dev: DevId,
err: bool,
@@ -252,58 +252,64 @@ comptime {
// with the same dev,ino. ncdu provides this list in the info window. Doesn't
// seem too commonly used, can still be provided by a slow full scan of the
// tree.
+//
+// Problem: A file's st_nlink count may have changed during a scan and hence be
+// inconsistent with other entries for the same file. Not ~too~ common so a
+// few glitches are fine, but I haven't worked out the impact of this yet.
-// 20 bytes per hardlink/Dir entry, everything in a single allocation.
-// (Should really be aligned to 8 bytes and hence take up 24 bytes, but let's see how this works out)
-//
-// getEntry() allows modification of the key without re-insertion (this is unsafe in the general case, but works fine for modifying num_files)
-//
-// Potential problem: HashMap uses a 32bit item counter, which may be exceeded in extreme scenarios.
-// (ncdu itself doesn't support more than 31bit-counted files, but this table is hardlink_count*parent_dirs and may grow a bit)
+pub const devices = struct {
+ var list: std.ArrayList(Device) = std.ArrayList(Device).init(main.allocator);
+ var lookup: std.AutoHashMap(u64, DevId) = std.AutoHashMap(u64, DevId).init(main.allocator);
-const HardlinkNode = packed struct {
- ino: u64,
- dir: *Dir,
- num_files: u32,
+ // 20 bytes per hardlink/Dir entry, everything in a single allocation.
+ // (Should really be aligned to 8 bytes and hence take up 24 bytes, but let's see how this works out)
+ //
+ // getEntry() allows modification of the key without re-insertion (this is unsafe in the general case, but works fine for modifying num_files)
+ //
+ // Potential problem: HashMap uses a 32bit item counter, which may be exceeded in extreme scenarios.
+ // (ncdu itself doesn't support more than 31bit-counted files, but this table is hardlink_count*parent_dirs and may grow a bit)
- const Self = @This();
+ const HardlinkNode = packed struct {
+ ino: u64,
+ dir: *Dir,
+ num_files: u32,
- // hash() assumes a struct layout, hence the 'packed struct'
- fn hash(self: Self) u64 { return std.hash.Wyhash.hash(0, @ptrCast([*]const u8, &self)[0..@byteOffsetOf(Self, "dir")+@sizeOf(*Dir)]); }
- fn eql(a: Self, b: Self) bool { return a.ino == b.ino and a.dir == b.dir; }
-};
+ const Self = @This();
-// Device entry, this is used for two reasons:
-// 1. st_dev ids are 64-bit, but in a typical filesystem there's only a few
-// unique ids, hence we can save RAM by only storing smaller DevId's in Dir
-// entries and using that as an index to a lookup table.
-// 2. Keeping track of hardlink counts for each dir and inode, as described above.
-//
-// (Device entries are never deallocated)
-const Device = struct {
- dev: u64,
- hardlinks: Hardlinks = Hardlinks.init(main.allocator),
+ // hash() assumes a struct layout, hence the 'packed struct'
+ fn hash(self: Self) u64 { return std.hash.Wyhash.hash(0, @ptrCast([*]const u8, &self)[0..@byteOffsetOf(Self, "dir")+@sizeOf(*Dir)]); }
+ fn eql(a: Self, b: Self) bool { return a.ino == b.ino and a.dir == b.dir; }
+ };
const Hardlinks = std.HashMap(HardlinkNode, void, HardlinkNode.hash, HardlinkNode.eql, 80);
-};
-var devices: std.ArrayList(Device) = std.ArrayList(Device).init(main.allocator);
-var dev_lookup: std.AutoHashMap(u64, DevId) = std.AutoHashMap(u64, DevId).init(main.allocator);
+ // Device entry, this is used for two reasons:
+ // 1. st_dev ids are 64-bit, but in a typical filesystem there's only a few
+ // unique ids, hence we can save RAM by only storing smaller DevId's in Dir
+ // entries and using that as an index to a lookup table.
+ // 2. Keeping track of hardlink counts for each dir and inode, as described above.
+ //
+ // (Device entries are never deallocated)
+ const Device = struct {
+ dev: u64,
+ hardlinks: Hardlinks = Hardlinks.init(main.allocator),
+ };
-pub fn getDevId(dev: u64) DevId {
- var d = dev_lookup.getOrPut(dev) catch unreachable;
- if (!d.found_existing) {
- errdefer dev_lookup.removeAssertDiscard(dev);
- d.entry.value = @intCast(DevId, devices.items.len);
- devices.append(.{ .dev = dev }) catch unreachable;
+ pub fn getId(dev: u64) DevId {
+ var d = lookup.getOrPut(dev) catch unreachable;
+ if (!d.found_existing) {
+ errdefer lookup.removeAssertDiscard(dev);
+ d.entry.value = @intCast(DevId, list.items.len);
+ list.append(.{ .dev = dev }) catch unreachable;
+ }
+ return d.entry.value;
}
- return d.entry.value;
-}
-pub fn getDev(id: DevId) u64 {
- return devices.items[id].dev;
-}
+ pub fn getDev(id: DevId) u64 {
+ return list.items[id].dev;
+ }
+};
pub var root: *Dir = undefined;
diff --git a/src/scan.zig b/src/scan.zig
index 6323ce5..a4341b3 100644
--- a/src/scan.zig
+++ b/src/scan.zig
@@ -131,14 +131,18 @@ const Context = struct {
const Writer = std.io.BufferedWriter(4096, std.fs.File.Writer);
const Self = @This();
- fn initFile(out: std.fs.File) !Self {
+ fn writeErr(e: anyerror) noreturn {
+ ui.die("Error writing to file: {s}.\n", .{ ui.errorString(e) });
+ }
+
+ fn initFile(out: std.fs.File) Self {
var buf = main.allocator.create(Writer) catch unreachable;
errdefer main.allocator.destroy(buf);
buf.* = std.io.bufferedWriter(out.writer());
var wr = buf.writer();
- try wr.writeAll("[1,2,{\"progname\":\"ncdu\",\"progver\":\"" ++ main.program_version ++ "\",\"timestamp\":");
- try wr.print("{d}", .{std.time.timestamp()});
- try wr.writeByte('}');
+ wr.writeAll("[1,2,{\"progname\":\"ncdu\",\"progver\":\"" ++ main.program_version ++ "\",\"timestamp\":") catch |e| writeErr(e);
+ wr.print("{d}", .{std.time.timestamp()}) catch |e| writeErr(e);
+ wr.writeByte('}') catch |e| writeErr(e);
return Self{ .wr = buf };
}
@@ -146,10 +150,10 @@ const Context = struct {
return Self{ .parents = model.Parents{} };
}
- fn final(self: *Self) !void {
+ fn final(self: *Self) void {
if (self.wr) |wr| {
- try wr.writer().writeByte(']');
- try wr.flush();
+ wr.writer().writeByte(']') catch |e| writeErr(e);
+ wr.flush() catch |e| writeErr(e);
}
}
@@ -171,7 +175,7 @@ const Context = struct {
if (self.stat.dir) {
if (self.parents) |*p| if (p.top() != model.root) p.pop();
- if (self.wr) |w| w.writer().writeByte(']') catch ui.die("Error writing to file.", .{});
+ if (self.wr) |w| w.writer().writeByte(']') catch |e| writeErr(e);
} else
self.stat.dir = true; // repeated popPath()s mean we're closing parent dirs.
}
@@ -188,9 +192,24 @@ const Context = struct {
const Special = enum { err, other_fs, kernfs, excluded };
+ fn writeSpecial(self: *Self, w: anytype, t: Special) !void {
+ try w.writeAll(",\n");
+ if (self.stat.dir) try w.writeByte('[');
+ try w.writeAll("{\"name\":");
+ try writeJsonString(w, self.name);
+ switch (t) {
+ .err => try w.writeAll(",\"read_error\":true"),
+ .other_fs => try w.writeAll(",\"excluded\":\"othfs\""),
+ .kernfs => try w.writeAll(",\"excluded\":\"kernfs\""),
+ .excluded => try w.writeAll(",\"excluded\":\"pattern\""),
+ }
+ try w.writeByte('}');
+ if (self.stat.dir) try w.writeByte(']');
+ }
+
// Insert the current path as a special entry (i.e. a file/dir that is not counted)
// Ignores self.stat except for the 'dir' option.
- fn addSpecial(self: *Self, t: Special) !void {
+ fn addSpecial(self: *Self, t: Special) void {
std.debug.assert(self.items_seen > 0); // root item can't be a special
if (t == .err) {
@@ -209,26 +228,30 @@ const Context = struct {
.excluded => f.excluded = true,
}
- } else if (self.wr) |wr| {
- var w = wr.writer();
- try w.writeAll(",\n");
- if (self.stat.dir) try w.writeByte('[');
- try w.writeAll("{\"name\":");
- try writeJsonString(w, self.name);
- switch (t) {
- .err => try w.writeAll(",\"read_error\":true"),
- .other_fs => try w.writeAll(",\"excluded\":\"othfs\""),
- .kernfs => try w.writeAll(",\"excluded\":\"kernfs\""),
- .excluded => try w.writeAll(",\"excluded\":\"pattern\""),
- }
- try w.writeByte('}');
- if (self.stat.dir) try w.writeByte(']');
- }
+ } else if (self.wr) |wr|
+ self.writeSpecial(wr.writer(), t) catch |e| writeErr(e);
+
self.items_seen += 1;
}
+ fn writeStat(self: *Self, w: anytype, dir_dev: u64) !void {
+ try w.writeAll(",\n");
+ if (self.stat.dir) try w.writeByte('[');
+ try w.writeAll("{\"name\":");
+ try writeJsonString(w, self.name);
+ if (self.stat.size > 0) try w.print(",\"asize\":{d}", .{ self.stat.size });
+ if (self.stat.blocks > 0) try w.print(",\"dsize\":{d}", .{ blocksToSize(self.stat.blocks) });
+ if (self.stat.dir and self.stat.dev != dir_dev) try w.print(",\"dev\":{d}", .{ self.stat.dev });
+ if (self.stat.hlinkc) try w.print(",\"ino\":{d},\"hlnkc\":true,\"nlink\":{d}", .{ self.stat.ino, self.stat.nlink });
+ if (!self.stat.dir and !self.stat.reg) try w.writeAll(",\"notreg\":true");
+ if (main.config.extended)
+ try w.print(",\"uid\":{d},\"gid\":{d},\"mode\":{d},\"mtime\":{d}",
+ .{ self.stat.ext.uid, self.stat.ext.gid, self.stat.ext.mode, self.stat.ext.mtime });
+ try w.writeByte('}');
+ }
+
// Insert current path as a counted file/dir/hardlink, with information from self.stat
- fn addStat(self: *Self, dir_dev: u64) !void {
+ fn addStat(self: *Self, dir_dev: u64) void {
if (self.parents) |*p| {
const etype = if (self.stat.dir) model.EType.dir
else if (self.stat.hlinkc) model.EType.link
@@ -236,7 +259,7 @@ const Context = struct {
var e = model.Entry.create(etype, main.config.extended, self.name);
e.blocks = self.stat.blocks;
e.size = self.stat.size;
- if (e.dir()) |d| d.dev = model.getDevId(self.stat.dev);
+ if (e.dir()) |d| d.dev = model.devices.getId(self.stat.dev);
if (e.file()) |f| f.notreg = !self.stat.dir and !self.stat.reg;
// TODO: Handle the scenario where we don't know the hard link count
// (i.e. on imports from old ncdu versions that don't have the "nlink" field)
@@ -253,22 +276,8 @@ const Context = struct {
if (e.dir()) |d| p.push(d); // Enter the directory
}
- } else if (self.wr) |wr| {
- var w = wr.writer();
- try w.writeAll(",\n");
- if (self.stat.dir) try w.writeByte('[');
- try w.writeAll("{\"name\":");
- try writeJsonString(w, self.name);
- if (self.stat.size > 0) try w.print(",\"asize\":{d}", .{ self.stat.size });
- if (self.stat.blocks > 0) try w.print(",\"dsize\":{d}", .{ blocksToSize(self.stat.blocks) });
- if (self.stat.dir and self.stat.dev != dir_dev) try w.print(",\"dev\":{d}", .{ self.stat.dev });
- if (self.stat.hlinkc) try w.print(",\"ino\":{d},\"hlnkc\":true,\"nlink\":{d}", .{ self.stat.ino, self.stat.nlink });
- if (!self.stat.dir and !self.stat.reg) try w.writeAll(",\"notreg\":true");
- if (main.config.extended)
- try w.print(",\"uid\":{d},\"gid\":{d},\"mode\":{d},\"mtime\":{d}",
- .{ self.stat.ext.uid, self.stat.ext.gid, self.stat.ext.mode, self.stat.ext.mtime });
- try w.writeByte('}');
- }
+ } else if (self.wr) |wr|
+ self.writeStat(wr.writer(), dir_dev) catch |e| writeErr(e);
self.items_seen += 1;
}
@@ -286,7 +295,7 @@ const Context = struct {
var active_context: ?*Context = null;
// Read and index entries of the given dir.
-fn scanDir(ctx: *Context, dir: std.fs.Dir, dir_dev: u64) std.fs.File.Writer.Error!void {
+fn scanDir(ctx: *Context, dir: std.fs.Dir, dir_dev: u64) void {
// XXX: The iterator allocates 8k+ bytes on the stack, may want to do heap allocation here?
var it = dir.iterate();
while(true) {
@@ -313,17 +322,17 @@ fn scanDir(ctx: *Context, dir: std.fs.Dir, dir_dev: u64) std.fs.File.Writer.Erro
break :blk false;
};
if (excluded) {
- try ctx.addSpecial(.excluded);
+ ctx.addSpecial(.excluded);
continue;
}
ctx.stat = Stat.read(dir, ctx.name, false) catch {
- try ctx.addSpecial(.err);
+ ctx.addSpecial(.err);
continue;
};
if (main.config.same_fs and ctx.stat.dev != dir_dev) {
- try ctx.addSpecial(.other_fs);
+ ctx.addSpecial(.other_fs);
continue;
}
@@ -341,13 +350,13 @@ fn scanDir(ctx: *Context, dir: std.fs.Dir, dir_dev: u64) std.fs.File.Writer.Erro
var edir =
if (ctx.stat.dir) dir.openDirZ(ctx.name, .{ .access_sub_paths = true, .iterate = true, .no_follow = true }) catch {
- try ctx.addSpecial(.err);
+ ctx.addSpecial(.err);
continue;
} else null;
defer if (edir != null) edir.?.close();
if (std.builtin.os.tag == .linux and main.config.exclude_kernfs and ctx.stat.dir and isKernfs(edir.?, ctx.stat.dev)) {
- try ctx.addSpecial(.kernfs);
+ ctx.addSpecial(.kernfs);
continue;
}
@@ -357,20 +366,20 @@ fn scanDir(ctx: *Context, dir: std.fs.Dir, dir_dev: u64) std.fs.File.Writer.Erro
var buf: [sig.len]u8 = undefined;
if (f.reader().readAll(&buf)) |len| {
if (len == sig.len and std.mem.eql(u8, &buf, sig)) {
- try ctx.addSpecial(.excluded);
+ ctx.addSpecial(.excluded);
continue;
}
} else |_| {}
} else |_| {}
}
- try ctx.addStat(dir_dev);
- if (ctx.stat.dir) try scanDir(ctx, edir.?, ctx.stat.dev);
+ ctx.addStat(dir_dev);
+ if (ctx.stat.dir) scanDir(ctx, edir.?, ctx.stat.dev);
}
}
pub fn scanRoot(path: []const u8, out: ?std.fs.File) !void {
- var ctx = if (out) |f| try Context.initFile(f) else Context.initMem();
+ var ctx = if (out) |f| Context.initFile(f) else Context.initMem();
active_context = &ctx;
defer active_context = null;
defer ctx.deinit();
@@ -380,14 +389,14 @@ pub fn scanRoot(path: []const u8, out: ?std.fs.File) !void {
ctx.pushPath(full_path orelse path);
ctx.stat = try Stat.read(std.fs.cwd(), ctx.pathZ(), true);
- if (!ctx.stat.dir) return error.NotADirectory;
- try ctx.addStat(0);
+ if (!ctx.stat.dir) return error.NotDir;
+ ctx.addStat(0);
var dir = try std.fs.cwd().openDirZ(ctx.pathZ(), .{ .access_sub_paths = true, .iterate = true });
defer dir.close();
- try scanDir(&ctx, dir, ctx.stat.dev);
+ scanDir(&ctx, dir, ctx.stat.dev);
ctx.popPath();
- try ctx.final();
+ ctx.final();
}
// Using a custom recursive descent JSON parser here. std.json is great, but
@@ -704,8 +713,8 @@ const Import = struct {
}
if (name) |n| self.ctx.pushPath(n)
else self.die("missing \"name\" field");
- if (special) |s| self.ctx.addSpecial(s) catch unreachable
- else self.ctx.addStat(dir_dev) catch unreachable;
+ if (special) |s| self.ctx.addSpecial(s)
+ else self.ctx.addStat(dir_dev);
}
fn item(self: *Self, dev: u64) void {
@@ -771,20 +780,21 @@ const Import = struct {
}
};
-pub fn importRoot(path: [:0]const u8, out: ?std.fs.File) !void {
+pub fn importRoot(path: [:0]const u8, out: ?std.fs.File) void {
var fd = if (std.mem.eql(u8, "-", path)) std.io.getStdIn()
- else try std.fs.cwd().openFileZ(path, .{});
+ else std.fs.cwd().openFileZ(path, .{})
+ catch |e| ui.die("Error reading file: {s}.\n", .{ui.errorString(e)});
defer fd.close();
var imp = Import{
- .ctx = if (out) |f| try Context.initFile(f) else Context.initMem(),
+ .ctx = if (out) |f| Context.initFile(f) else Context.initMem(),
.rd = std.io.bufferedReader(fd.reader()),
};
active_context = &imp.ctx;
defer active_context = null;
defer imp.ctx.deinit();
imp.root();
- try imp.ctx.final();
+ imp.ctx.final();
}
var animation_pos: u32 = 0;
diff --git a/src/ui.zig b/src/ui.zig
index f5b7a07..3f3129f 100644
--- a/src/ui.zig
+++ b/src/ui.zig
@@ -47,6 +47,26 @@ pub fn oom() void {
init();
}
+// Lazy strerror() for Zig file I/O, not complete.
+// (Would be nicer if Zig just exposed errno so I could call strerror() directly)
+pub fn errorString(e: anyerror) []const u8 {
+ return switch (e) {
+ error.DiskQuota => "Disk quota exceeded",
+ error.FileTooBig => "File too big",
+ error.InputOutput => "I/O error",
+ error.NoSpaceLeft => "No space left on device",
+ error.AccessDenied => "Access denied",
+ error.SymlinkLoop => "Symlink loop",
+ error.ProcessFdQuotaExceeded => "Process file descriptor limit exceeded",
+ error.SystemFdQuotaExceeded => "System file descriptor limit exceeded",
+ error.NameTooLong => "Filename too long",
+ error.FileNotFound => "No such file or directory",
+ error.IsDir => "Is a directory",
+ error.NotDir => "Not a directory",
+ else => "Unknown error", // rather useless :(
+ };
+}
+
var to_utf8_buf = std.ArrayList(u8).init(main.allocator);
fn toUtf8BadChar(ch: u8) bool {