aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk@google.com>2020-12-28 16:02:17 -0800
committerJaegeuk Kim <jaegeuk@google.com>2020-12-28 16:03:03 -0800
commitba23d909fa093189ad6ace38364bb58b30da3977 (patch)
tree9b6db16dab4bb72043429d161a588b26b1c63efa
parent905df203c9aa1a7fb39e2ec4c84998ff7d9f9d5b (diff)
parent73c08716410c73194cf1b70d1228839c542ce088 (diff)
downloadf2fs-tools-ba23d909fa093189ad6ace38364bb58b30da3977.tar.gz
Merge remote-tracking branch 'aosp/upstream-master' into aosp
* aosp/upstream-master: libzoned: use blk_zone_v2 and blk_zone_report_v2 by default f2fs-tools: fix wrong blk_zone_rep_v2 definition mkfs.f2fs: allocate zones together to avoid random access mkfs.f2fs: adjust zone alignment when using multi-partitions fsck.f2fs: fix alignment on multi-partition support f2fs-tools: Miscellaneous cleanup to README. mkfs.f2fs.8: Better document the -g argument. mkfs.f2fs.8: fix formatting for -l parameter in man page f2fs-tools: Make sload.f2fs reproduce hard links f2fs-tools:sload.f2fs compression support f2fs_io: add compress/decompress commands f2fs-tools: Added #ifdef WITH_func f2fs-tools: fix a few spelling errors in f2fs-tools f2fs-tools: skipped to end on error syntax error mkfs.f2fs: show a message when compression is enabled f2fs_io: add get/set compression option Fix ASSERT() macro with '%' in the expression f2fs-toos: fsck.f2fs Fix bad return value fsck.f2fs: do xnid sanity check only during fsck f2fs_io: add erase option mkfs.f2fs.8: document the verity feature fsck: clear unexpected casefold flags mkfs.f2fs: add -h and --help f2fs_io: change fibmap to fiemap Signed-off-by: Jaegeuk Kim <jaegeuk@google.com> Change-Id: I34dccfeceffa2cf1a0d109b490552966efc59c02
-rw-r--r--METADATA4
-rw-r--r--README10
-rw-r--r--configure.ac13
-rw-r--r--fsck/Makefile.am9
-rw-r--r--fsck/compress.c178
-rw-r--r--fsck/compress.h22
-rw-r--r--fsck/dir.c93
-rw-r--r--fsck/f2fs.h10
-rw-r--r--fsck/fsck.c16
-rw-r--r--fsck/fsck.h11
-rw-r--r--fsck/main.c183
-rw-r--r--fsck/mount.c7
-rw-r--r--fsck/segment.c218
-rw-r--r--fsck/sload.c12
-rw-r--r--fsck/xattr.c2
-rw-r--r--include/android_config.h2
-rw-r--r--include/f2fs_fs.h131
-rw-r--r--lib/libf2fs_io.c3
-rw-r--r--man/defrag.f2fs.82
-rw-r--r--man/mkfs.f2fs.819
-rw-r--r--man/sload.f2fs.894
-rw-r--r--mkfs/f2fs_format.c19
-rw-r--r--mkfs/f2fs_format_main.c15
-rw-r--r--tools/f2fs_io/Makefile.am2
-rw-r--r--tools/f2fs_io/f2fs_io.c209
-rw-r--r--tools/f2fs_io/f2fs_io.h21
-rw-r--r--tools/f2fs_io_parse.c2
-rw-r--r--tools/f2fscrypt.82
28 files changed, 1193 insertions, 116 deletions
diff --git a/METADATA b/METADATA
index d7fd26e..9c14beb 100644
--- a/METADATA
+++ b/METADATA
@@ -12,7 +12,7 @@ third_party {
license_type: RESTRICTED
last_upgrade_date {
year: 2020
- month: 08
- day: 24
+ month: 12
+ day: 28
}
}
diff --git a/README b/README
index 4ea3356..afe334f 100644
--- a/README
+++ b/README
@@ -1,8 +1,8 @@
-F2FS format utilility
+F2FS format utility
---------------------
-To use f2fs filesystem, you should format the storage partition
-with this utilility. Otherwise, you cannot mount f2fs.
+To use the f2fs filesystem, you should format the storage partition
+with this utility. Otherwise, you cannot mount f2fs.
Before compilation
------------------
@@ -17,7 +17,7 @@ You should install the following packages.
Initial compilation
-------------------
-Before compilation initially, autoconf/automake tools should be run.
+Before initial compilation, autoconf/automake tools should be run.
# ./autogen.sh
@@ -47,4 +47,4 @@ How to run by default
$ mkfs.f2fs -l [LABEL] $DEV
-For more mkfs options, see man page.
+For more mkfs options, see the man page.
diff --git a/configure.ac b/configure.ac
index 1e5619d..32e97a2 100644
--- a/configure.ac
+++ b/configure.ac
@@ -52,6 +52,18 @@ AC_PATH_PROG([LDCONFIG], [ldconfig],
[$PATH:/sbin])
# Checks for libraries.
+AC_CHECK_LIB([lzo2], [main],
+ [AC_SUBST([liblzo2_LIBS], ["-llzo2"])
+ AC_DEFINE([HAVE_LIBLZO2], [1],
+ [Define if you have liblzo2])
+ ], [], [])
+
+AC_CHECK_LIB([lz4], [main],
+ [AC_SUBST([liblz4_LIBS], ["-llz4"])
+ AC_DEFINE([HAVE_LIBLZ4], [1],
+ [Define if you have liblz4])
+ ], [], [])
+
PKG_CHECK_MODULES([libuuid], [uuid])
AS_IF([test "x$with_selinux" != "xno"],
@@ -93,6 +105,7 @@ AC_CHECK_HEADERS(m4_flatten([
linux/posix_acl.h
linux/types.h
linux/xattr.h
+ linux/fiemap.h
mach/mach_time.h
mntent.h
scsi/sg.h
diff --git a/fsck/Makefile.am b/fsck/Makefile.am
index 1fc7310..e7d599c 100644
--- a/fsck/Makefile.am
+++ b/fsck/Makefile.am
@@ -3,12 +3,15 @@
AM_CPPFLAGS = ${libuuid_CFLAGS} -I$(top_srcdir)/include
AM_CFLAGS = -Wall
sbin_PROGRAMS = fsck.f2fs
-noinst_HEADERS = common.h dict.h dqblk_v2.h f2fs.h fsck.h node.h quotaio.h quotaio_tree.h quotaio_v2.h xattr.h
+noinst_HEADERS = common.h dict.h dqblk_v2.h f2fs.h fsck.h node.h quotaio.h \
+ quotaio_tree.h quotaio_v2.h xattr.h compress.h
include_HEADERS = $(top_srcdir)/include/quota.h
fsck_f2fs_SOURCES = main.c fsck.c dump.c mount.c defrag.c resize.c \
- node.c segment.c dir.c sload.c xattr.c \
+ node.c segment.c dir.c sload.c xattr.c compress.c \
dict.c mkquota.c quotaio.c quotaio_tree.c quotaio_v2.c
-fsck_f2fs_LDADD = ${libselinux_LIBS} ${libuuid_LIBS} $(top_builddir)/lib/libf2fs.la
+fsck_f2fs_LDADD = ${libselinux_LIBS} ${libuuid_LIBS} \
+ ${liblzo2_LIBS} ${liblz4_LIBS} \
+ $(top_builddir)/lib/libf2fs.la
install-data-hook:
ln -sf fsck.f2fs $(DESTDIR)/$(sbindir)/dump.f2fs
diff --git a/fsck/compress.c b/fsck/compress.c
new file mode 100644
index 0000000..620768d
--- /dev/null
+++ b/fsck/compress.c
@@ -0,0 +1,178 @@
+/**
+ * compress.c
+ *
+ * Copyright (c) 2020 Google Inc.
+ * Robin Hsu <robinhsu@google.com>
+ * : add sload compression support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* for config.h for general environment (non-Android) */
+#include "f2fs.h"
+
+#include "compress.h"
+#ifdef HAVE_LIBLZO2
+#include <lzo/lzo1x.h> /* for lzo1x_1_15_compress() */
+#endif
+#ifdef HAVE_LIBLZ4
+#include <lz4.h> /* for LZ4_compress_fast_extState() */
+#endif
+
+/*
+ * macro/constants borrowed from kernel header (GPL-2.0):
+ * include/linux/lzo.h, and include/linux/lz4.h
+ */
+#ifdef HAVE_LIBLZO2
+#define lzo1x_worst_compress(x) ((x) + (x) / 16 + 64 + 3 + 2)
+#define LZO_WORK_SIZE ALIGN_UP(LZO1X_1_15_MEM_COMPRESS, 8)
+#endif
+#ifdef HAVE_LIBLZ4
+#define LZ4_MEMORY_USAGE 14
+#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
+#ifndef LZ4_STREAMSIZE
+#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(long long))
+#endif
+#define LZ4_MEM_COMPRESS LZ4_STREAMSIZE
+#define LZ4_ACCELERATION_DEFAULT 1
+#define LZ4_WORK_SIZE ALIGN_UP(LZ4_MEM_COMPRESS, 8)
+#endif
+
+#if defined(HAVE_LIBLZO2) || defined(HAVE_LIBLZ4)
+static void reset_cc(struct compress_ctx *cc)
+{
+ memset(cc->rbuf, 0, cc->cluster_size * F2FS_BLKSIZE);
+ memset(cc->cbuf->cdata, 0, cc->cluster_size * F2FS_BLKSIZE
+ - F2FS_BLKSIZE);
+}
+#endif
+
+#ifdef HAVE_LIBLZO2
+static void lzo_compress_init(struct compress_ctx *cc)
+{
+ size_t size = cc->cluster_size * F2FS_BLKSIZE;
+ size_t alloc = size + lzo1x_worst_compress(size)
+ + COMPRESS_HEADER_SIZE + LZO_WORK_SIZE;
+ cc->private = malloc(alloc);
+ ASSERT(cc->private);
+ cc->rbuf = (char *) cc->private + LZO_WORK_SIZE;
+ cc->cbuf = (struct compress_data *)((char *) cc->rbuf + size);
+}
+
+static int lzo_compress(struct compress_ctx *cc)
+{
+ int ret = lzo1x_1_15_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
+ (lzo_uintp)(&cc->clen), cc->private);
+ cc->cbuf->clen = cpu_to_le32(cc->clen);
+ return ret;
+}
+#endif
+
+#ifdef HAVE_LIBLZ4
+static void lz4_compress_init(struct compress_ctx *cc)
+{
+ size_t size = cc->cluster_size * F2FS_BLKSIZE;
+ size_t alloc = size + LZ4_COMPRESSBOUND(size)
+ + COMPRESS_HEADER_SIZE + LZ4_WORK_SIZE;
+ cc->private = malloc(alloc);
+ ASSERT(cc->private);
+ cc->rbuf = (char *) cc->private + LZ4_WORK_SIZE;
+ cc->cbuf = (struct compress_data *)((char *) cc->rbuf + size);
+}
+
+static int lz4_compress(struct compress_ctx *cc)
+{
+ cc->clen = LZ4_compress_fast_extState(cc->private, cc->rbuf,
+ (char *)cc->cbuf->cdata, cc->rlen,
+ cc->rlen - F2FS_BLKSIZE * c.compress.min_blocks,
+ LZ4_ACCELERATION_DEFAULT);
+
+ if (!cc->clen)
+ return 1;
+
+ cc->cbuf->clen = cpu_to_le32(cc->clen);
+ return 0;
+}
+#endif
+
+const char *supported_comp_names[] = {
+ "lzo",
+ "lz4",
+ "",
+};
+
+compress_ops supported_comp_ops[] = {
+#ifdef HAVE_LIBLZO2
+ {lzo_compress_init, lzo_compress, reset_cc},
+#else
+ {NULL, NULL, NULL},
+#endif
+#ifdef HAVE_LIBLZ4
+ {lz4_compress_init, lz4_compress, reset_cc},
+#else
+ {NULL, NULL, NULL},
+#endif
+};
+
+/* linked list */
+typedef struct _ext_t {
+ const char *ext;
+ struct _ext_t *next;
+} ext_t;
+
+static ext_t *extension_list;
+
+static bool ext_found(const char *ext)
+{
+ ext_t *p = extension_list;
+
+ while (p != NULL && strcmp(ext, p->ext))
+ p = p->next;
+ return (p != NULL);
+}
+
+static const char *get_ext(const char *path)
+{
+ char *p = strrchr(path, '.');
+
+ return p == NULL ? path + strlen(path) : p + 1;
+}
+
+static bool ext_do_filter(const char *path)
+{
+ return (ext_found(get_ext(path)) == true) ^
+ (c.compress.filter == COMPR_FILTER_ALLOW);
+}
+
+static void ext_filter_add(const char *ext)
+{
+ ext_t *node;
+
+ ASSERT(ext != NULL);
+ if (ext_found(ext))
+ return; /* ext was already registered */
+ node = malloc(sizeof(ext_t));
+ ASSERT(node != NULL);
+ node->ext = ext;
+ node->next = extension_list;
+ extension_list = node;
+}
+
+static void ext_filter_destroy(void)
+{
+ ext_t *p;
+
+ while (extension_list != NULL) {
+ p = extension_list;
+ extension_list = p->next;
+ free(p);
+ }
+}
+
+filter_ops ext_filter = {
+ .add = ext_filter_add,
+ .destroy = ext_filter_destroy,
+ .filter = ext_do_filter,
+};
diff --git a/fsck/compress.h b/fsck/compress.h
new file mode 100644
index 0000000..917de2d
--- /dev/null
+++ b/fsck/compress.h
@@ -0,0 +1,22 @@
+/**
+ * compress.h
+ *
+ * Copyright (c) 2020 Google Inc.
+ * Robin Hsu <robinhsu@google.com>
+ * : add sload compression support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef COMPRESS_H
+#define COMPRESS_H
+
+#include "f2fs_fs.h"
+
+extern const char *supported_comp_names[];
+extern compress_ops supported_comp_ops[];
+extern filter_ops ext_filter;
+
+#endif /* COMPRESS_H */
diff --git a/fsck/dir.c b/fsck/dir.c
index dc03c98..aeb876d 100644
--- a/fsck/dir.c
+++ b/fsck/dir.c
@@ -15,6 +15,7 @@
*/
#include "fsck.h"
#include "node.h"
+#include <search.h>
static int room_for_filename(const u8 *bitmap, int slots, int max_slots)
{
@@ -634,10 +635,43 @@ int convert_inline_dentry(struct f2fs_sb_info *sbi, struct f2fs_node *node,
return 0;
}
+static int cmp_from_devino(const void *a, const void *b) {
+ u64 devino_a = ((struct hardlink_cache_entry*) a)->from_devino;
+ u64 devino_b = ((struct hardlink_cache_entry*) b)->from_devino;
+
+ return (devino_a > devino_b) - (devino_a < devino_b);
+}
+
+struct hardlink_cache_entry *f2fs_search_hardlink(struct f2fs_sb_info *sbi,
+ struct dentry *de)
+{
+ struct hardlink_cache_entry *find_hardlink = NULL;
+ struct hardlink_cache_entry *found_hardlink = NULL;
+ void *search_result;
+
+ /* This might be a hardlink, try to find it in the cache */
+ find_hardlink = calloc(1, sizeof(struct hardlink_cache_entry));
+ find_hardlink->from_devino = de->from_devino;
+
+ search_result = tsearch(find_hardlink, &(sbi->hardlink_cache),
+ cmp_from_devino);
+ ASSERT(search_result != 0);
+
+ found_hardlink = *(struct hardlink_cache_entry**) search_result;
+ ASSERT(find_hardlink->from_devino == found_hardlink->from_devino);
+
+ /* If it was already in the cache, free the entry we just created */
+ if (found_hardlink != find_hardlink)
+ free(find_hardlink);
+
+ return found_hardlink;
+}
+
int f2fs_create(struct f2fs_sb_info *sbi, struct dentry *de)
{
struct f2fs_node *parent, *child;
- struct node_info ni;
+ struct hardlink_cache_entry *found_hardlink = NULL;
+ struct node_info ni, hardlink_ni;
struct f2fs_summary sum;
block_t blkaddr = NULL_ADDR;
int ret;
@@ -649,6 +683,9 @@ int f2fs_create(struct f2fs_sb_info *sbi, struct dentry *de)
return -1;
}
+ if (de->from_devino)
+ found_hardlink = f2fs_search_hardlink(sbi, de);
+
parent = calloc(BLOCK_SZ, 1);
ASSERT(parent);
@@ -674,7 +711,26 @@ int f2fs_create(struct f2fs_sb_info *sbi, struct dentry *de)
child = calloc(BLOCK_SZ, 1);
ASSERT(child);
- f2fs_alloc_nid(sbi, &de->ino);
+ if (found_hardlink && found_hardlink->to_ino) {
+ /*
+ * If we found this devino in the cache, we're creating a
+ * hard link.
+ */
+ get_node_info(sbi, found_hardlink->to_ino, &hardlink_ni);
+ if (hardlink_ni.blk_addr == NULL_ADDR) {
+ MSG(1, "No original inode for hard link to_ino=%x\n",
+ found_hardlink->to_ino);
+ return -1;
+ }
+
+ /* Use previously-recorded inode */
+ de->ino = found_hardlink->to_ino;
+ blkaddr = hardlink_ni.blk_addr;
+ MSG(1, "Info: Creating \"%s\" as hard link to inode %d\n",
+ de->path, de->ino);
+ } else {
+ f2fs_alloc_nid(sbi, &de->ino);
+ }
init_inode_block(sbi, child, de);
@@ -689,6 +745,30 @@ int f2fs_create(struct f2fs_sb_info *sbi, struct dentry *de)
goto free_child_dir;
}
+ if (found_hardlink) {
+ if (!found_hardlink->to_ino) {
+ MSG(2, "Adding inode %d from %s to hardlink cache\n",
+ de->ino, de->path);
+ found_hardlink->to_ino = de->ino;
+ } else {
+ /* Replace child with original block */
+ free(child);
+
+ child = calloc(BLOCK_SZ, 1);
+ ASSERT(child);
+
+ ret = dev_read_block(child, blkaddr);
+ ASSERT(ret >= 0);
+
+ /* Increment links and skip to writing block */
+ child->i.i_links = cpu_to_le32(
+ le32_to_cpu(child->i.i_links) + 1);
+ MSG(2, "Number of links on inode %d is now %d\n",
+ de->ino, le32_to_cpu(child->i.i_links));
+ goto write_child_dir;
+ }
+ }
+
/* write child */
set_summary(&sum, de->ino, 0, ni.version);
ret = reserve_new_block(sbi, &blkaddr, &sum, CURSEG_HOT_NODE, 1);
@@ -697,16 +777,21 @@ int f2fs_create(struct f2fs_sb_info *sbi, struct dentry *de)
/* update nat info */
update_nat_blkaddr(sbi, de->ino, de->ino, blkaddr);
+write_child_dir:
ret = dev_write_block(child, blkaddr);
ASSERT(ret >= 0);
update_free_segments(sbi);
MSG(1, "Info: Create %s -> %s\n"
" -- ino=%x, type=%x, mode=%x, uid=%x, "
- "gid=%x, cap=%"PRIx64", size=%lu, pino=%x\n",
+ "gid=%x, cap=%"PRIx64", size=%lu, link=%u "
+ "blocks=%"PRIx64" pino=%x\n",
de->full_path, de->path,
de->ino, de->file_type, de->mode,
- de->uid, de->gid, de->capabilities, de->size, de->pino);
+ de->uid, de->gid, de->capabilities, de->size,
+ le32_to_cpu(child->i.i_links),
+ le64_to_cpu(child->i.i_blocks),
+ de->pino);
free_child_dir:
free(child);
free_parent_dir:
diff --git a/fsck/f2fs.h b/fsck/f2fs.h
index 76e8272..9c6b0e4 100644
--- a/fsck/f2fs.h
+++ b/fsck/f2fs.h
@@ -221,6 +221,7 @@ struct dentry {
uint64_t capabilities;
nid_t ino;
nid_t pino;
+ u64 from_devino;
};
/* different from dnode_of_data in kernel */
@@ -234,6 +235,12 @@ struct dnode_of_data {
int idirty, ndirty;
};
+struct hardlink_cache_entry {
+ u64 from_devino;
+ nid_t to_ino;
+ int nbuild;
+};
+
struct f2fs_sb_info {
struct f2fs_fsck *fsck;
@@ -276,6 +283,9 @@ struct f2fs_sb_info {
/* true if late_build_segment_manger() is called */
bool seg_manager_done;
+
+ /* keep track of hardlinks so we can recreate them */
+ void *hardlink_cache;
};
static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
diff --git a/fsck/fsck.c b/fsck/fsck.c
index f97e9fb..e526720 100644
--- a/fsck/fsck.c
+++ b/fsck/fsck.c
@@ -790,6 +790,17 @@ void fsck_chk_inode_blk(struct f2fs_sb_info *sbi, u32 nid,
}
ofs = get_extra_isize(node_blk);
+ if ((node_blk->i.i_flags & cpu_to_le32(F2FS_CASEFOLD_FL)) &&
+ (ftype != F2FS_FT_DIR ||
+ !(c.feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)))) {
+ ASSERT_MSG("[0x%x] unexpected casefold flag", nid);
+ if (c.fix_on) {
+ FIX_MSG("ino[0x%x] clear casefold flag", nid);
+ node_blk->i.i_flags &= ~cpu_to_le32(F2FS_CASEFOLD_FL);
+ need_fix = 1;
+ }
+ }
+
if ((node_blk->i.i_inline & F2FS_INLINE_DATA)) {
unsigned int inline_size = MAX_INLINE_DATA(node_blk);
if (cur_qtype != -1)
@@ -3135,10 +3146,11 @@ int fsck_verify(struct f2fs_sb_info *sbi)
#ifndef WITH_ANDROID
if (nr_unref_nid && !c.ro) {
char ans[255] = {0};
+ int res;
printf("\nDo you want to restore lost files into ./lost_found/? [Y/N] ");
- ret = scanf("%s", ans);
- ASSERT(ret >= 0);
+ res = scanf("%s", ans);
+ ASSERT(res >= 0);
if (!strcasecmp(ans, "y")) {
for (i = 0; i < fsck->nr_nat_entries; i++) {
if (f2fs_test_bit(i, fsck->nat_area_bitmap))
diff --git a/fsck/fsck.h b/fsck/fsck.h
index c5e85fe..b9dcd5c 100644
--- a/fsck/fsck.h
+++ b/fsck/fsck.h
@@ -282,7 +282,16 @@ block_t new_node_block(struct f2fs_sb_info *,
struct quota_file;
u64 f2fs_quota_size(struct quota_file *);
u64 f2fs_read(struct f2fs_sb_info *, nid_t, u8 *, u64, pgoff_t);
+enum wr_addr_type {
+ WR_NORMAL = 1,
+ WR_COMPRESS_DATA = 2,
+ WR_NULL_ADDR = NULL_ADDR, /* 0 */
+ WR_NEW_ADDR = NEW_ADDR, /* -1U */
+ WR_COMPRESS_ADDR = COMPRESS_ADDR, /* -2U */
+};
u64 f2fs_write(struct f2fs_sb_info *, nid_t, u8 *, u64, pgoff_t);
+u64 f2fs_write_compress_data(struct f2fs_sb_info *, nid_t, u8 *, u64, pgoff_t);
+u64 f2fs_write_addrtag(struct f2fs_sb_info *, nid_t, pgoff_t, unsigned int);
void f2fs_filesize_update(struct f2fs_sb_info *, nid_t, u64);
int get_dnode_of_data(struct f2fs_sb_info *, struct dnode_of_data *,
@@ -296,6 +305,8 @@ int f2fs_find_path(struct f2fs_sb_info *, char *, nid_t *);
nid_t f2fs_lookup(struct f2fs_sb_info *, struct f2fs_node *, u8 *, int);
int f2fs_add_link(struct f2fs_sb_info *, struct f2fs_node *,
const unsigned char *, int, nid_t, int, block_t, int);
+struct hardlink_cache_entry *f2fs_search_hardlink(struct f2fs_sb_info *sbi,
+ struct dentry *de);
/* xattr.c */
void *read_all_xattrs(struct f2fs_sb_info *, struct f2fs_node *);
diff --git a/fsck/main.c b/fsck/main.c
index 32559f1..a538c72 100644
--- a/fsck/main.c
+++ b/fsck/main.c
@@ -13,6 +13,9 @@
* Copyright (c) 2019 Google Inc.
* Robin Hsu <robinhsu@google.com>
* : add cache layer
+ * Copyright (c) 2020 Google Inc.
+ * Robin Hsu <robinhsu@google.com>
+ * : add sload compression support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -25,6 +28,7 @@
#include <getopt.h>
#include <stdbool.h>
#include "quotaio.h"
+#include "compress.h"
struct f2fs_fsck gfsck;
@@ -134,6 +138,17 @@ void sload_usage()
MSG(0, " -S sparse_mode\n");
MSG(0, " -t mount point [prefix of target fs path, default:/]\n");
MSG(0, " -T timestamp\n");
+ MSG(0, " -c enable compression (default allow policy)\n");
+ MSG(0, " ------------ Compression sub-options -----------------\n");
+ MSG(0, " -L <log-of-blocks-per-cluster>, default 2\n");
+ MSG(0, " -a <algorithm> compression algorithm, default LZ4\n");
+ MSG(0, " -x <ext> compress files except for these extensions.\n");
+ MSG(0, " -i <ext> compress files with these extensions only.\n");
+ MSG(0, " * -i or -x: use it many times for multiple extensions.\n");
+ MSG(0, " * -i and -x cannot be used together..\n");
+ MSG(0, " -m <num> min compressed blocks per cluster\n");
+ MSG(0, " -r readonly (IMMUTABLE) for compressed files\n");
+ MSG(0, " ------------------------------------------------------\n");
MSG(0, " -d debug level [default:0]\n");
MSG(0, " -V print the version number and exit\n");
exit(1);
@@ -345,6 +360,7 @@ void f2fs_parse_options(int argc, char *argv[])
break;
}
} else if (!strcmp("dump.f2fs", prog)) {
+#ifdef WITH_DUMP
const char *option_string = "d:i:n:s:Sa:b:V";
static struct dump_option dump_opt = {
.nid = 0, /* default root ino */
@@ -426,7 +442,9 @@ void f2fs_parse_options(int argc, char *argv[])
}
c.private = &dump_opt;
+#endif
} else if (!strcmp("defrag.f2fs", prog)) {
+#ifdef WITH_DEFRAG
const char *option_string = "d:s:Sl:t:iV";
c.func = DEFRAG;
@@ -484,7 +502,9 @@ void f2fs_parse_options(int argc, char *argv[])
if (err != NOERROR)
break;
}
+#endif
} else if (!strcmp("resize.f2fs", prog)) {
+#ifdef WITH_RESIZE
const char *option_string = "d:st:iV";
c.func = RESIZE;
@@ -526,8 +546,10 @@ void f2fs_parse_options(int argc, char *argv[])
if (err != NOERROR)
break;
}
+#endif
} else if (!strcmp("sload.f2fs", prog)) {
- const char *option_string = "C:d:f:p:s:St:T:V";
+#ifdef WITH_SLOAD
+ const char *option_string = "cL:a:i:x:m:rC:d:f:p:s:St:T:V";
#ifdef HAVE_LIBSELINUX
int max_nr_opt = (int)sizeof(c.seopt_file) /
sizeof(c.seopt_file[0]);
@@ -536,8 +558,83 @@ void f2fs_parse_options(int argc, char *argv[])
char *p;
c.func = SLOAD;
+ c.compress.cc.log_cluster_size = 2;
+ c.compress.alg = COMPR_LZ4;
+ c.compress.min_blocks = 1;
+ c.compress.filter_ops = &ext_filter;
while ((option = getopt(argc, argv, option_string)) != EOF) {
+ unsigned int i;
+ int val;
+
switch (option) {
+ case 'c': /* compression support */
+ c.compress.enabled = true;
+ break;
+ case 'L': /* compression: log of blocks-per-cluster */
+ c.compress.required = true;
+ val = atoi(optarg);
+ if (val < MIN_COMPRESS_LOG_SIZE ||
+ val > MAX_COMPRESS_LOG_SIZE) {
+ MSG(0, "\tError: log of blocks per"
+ " cluster must be in the range"
+ " of %d .. %d.\n",
+ MIN_COMPRESS_LOG_SIZE,
+ MAX_COMPRESS_LOG_SIZE);
+ error_out(prog);
+ }
+ c.compress.cc.log_cluster_size = val;
+ break;
+ case 'a': /* compression: choose algorithm */
+ c.compress.required = true;
+ c.compress.alg = MAX_COMPRESS_ALGS;
+ for (i = 0; i < MAX_COMPRESS_ALGS; i++) {
+ if (!strcmp(supported_comp_names[i],
+ optarg)) {
+ c.compress.alg = i;
+ break;
+ }
+ }
+ if (c.compress.alg == MAX_COMPRESS_ALGS) {
+ MSG(0, "\tError: Unknown compression"
+ " algorithm %s\n", optarg);
+ error_out(prog);
+ }
+ break;
+ case 'i': /* compress only these extensions */
+ c.compress.required = true;
+ if (c.compress.filter == COMPR_FILTER_ALLOW) {
+ MSG(0, "\tError: could not mix option"
+ " -i and -x\n");
+ error_out(prog);
+ }
+ c.compress.filter = COMPR_FILTER_DENY;
+ c.compress.filter_ops->add(optarg);
+ break;
+ case 'x': /* compress except for these extensions */
+ c.compress.required = true;
+ if (c.compress.filter == COMPR_FILTER_DENY) {
+ MSG(0, "\tError: could not mix option"
+ " -i and -x\n");
+ error_out(prog);
+ }
+ c.compress.filter = COMPR_FILTER_ALLOW;
+ c.compress.filter_ops->add(optarg);
+ break;
+ case 'm': /* minimum compressed blocks per cluster */
+ c.compress.required = true;
+ val = atoi(optarg);
+ if (val <= 0) {
+ MSG(0, "\tError: minimum compressed"
+ " blocks per cluster must be"
+ " positive.\n");
+ error_out(prog);
+ }
+ c.compress.min_blocks = val;
+ break;
+ case 'r': /* compress file to set IMMUTABLE */
+ c.compress.required = true;
+ c.compress.readonly = true;
+ break;
case 'C':
c.fs_config_file = absolute_path(optarg);
break;
@@ -595,22 +692,46 @@ void f2fs_parse_options(int argc, char *argv[])
if (err != NOERROR)
break;
}
+ if (c.compress.required && !c.compress.enabled) {
+ MSG(0, "\tError: compression sub-options are used"
+ " without the compression enable (-c) option\n"
+ );
+ error_out(prog);
+ }
+ if (err == NOERROR && c.compress.enabled) {
+ c.compress.cc.cluster_size = 1
+ << c.compress.cc.log_cluster_size;
+ if (c.compress.filter == COMPR_FILTER_UNASSIGNED)
+ c.compress.filter = COMPR_FILTER_ALLOW;
+ if (c.compress.min_blocks >=
+ c.compress.cc.cluster_size) {
+ MSG(0, "\tError: minimum reduced blocks by"
+ " compression per cluster must be at"
+ " most one less than blocks per"
+ " cluster, i.e. %d\n",
+ c.compress.cc.cluster_size - 1);
+ error_out(prog);
+ }
+ }
+#endif /* WITH_SLOAD */
}
- add_default_options();
+ if (err == NOERROR) {
+ add_default_options();
- if (optind >= argc) {
- MSG(0, "\tError: Device not specified\n");
- error_out(prog);
- }
+ if (optind >= argc) {
+ MSG(0, "\tError: Device not specified\n");
+ error_out(prog);
+ }
- c.devices[0].path = strdup(argv[optind]);
- if (argc > (optind + 1)) {
- c.dbg_lv = 0;
- err = EUNKNOWN_ARG;
+ c.devices[0].path = strdup(argv[optind]);
+ if (argc > (optind + 1)) {
+ c.dbg_lv = 0;
+ err = EUNKNOWN_ARG;
+ }
+ if (err == NOERROR)
+ return;
}
- if (err == NOERROR)
- return;
/* print out error */
switch (err) {
@@ -705,6 +826,7 @@ static int do_fsck(struct f2fs_sb_info *sbi)
return FSCK_ERRORS_LEFT_UNCORRECTED;
}
+#ifdef WITH_DUMP
static void do_dump(struct f2fs_sb_info *sbi)
{
struct dump_option *opt = (struct dump_option *)c.private;
@@ -731,7 +853,9 @@ static void do_dump(struct f2fs_sb_info *sbi)
print_cp_state(flag);
}
+#endif
+#ifdef WITH_DEFRAG
static int do_defrag(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
@@ -780,7 +904,9 @@ out_range:
c.defrag_target);
return -1;
}
+#endif
+#ifdef WITH_RESIZE
static int do_resize(struct f2fs_sb_info *sbi)
{
if (!c.target_sectors)
@@ -794,6 +920,32 @@ static int do_resize(struct f2fs_sb_info *sbi)
return f2fs_resize(sbi);
}
+#endif
+
+#ifdef WITH_SLOAD
+static int init_compr(struct f2fs_sb_info *sbi)
+{
+ if (!c.compress.enabled)
+ return 0;
+
+ if (!(sbi->raw_super->feature
+ & cpu_to_le32(F2FS_FEATURE_COMPRESSION))) {
+ MSG(0, "Error: Compression (-c) was requested "
+ "but the file system is not created "
+ "with such feature.\n");
+ return -1;
+ }
+ if (!supported_comp_ops[c.compress.alg].init) {
+ MSG(0, "Error: The selected compression algorithm is not"
+ " supported\n");
+ return -1;
+ }
+ c.compress.ops = supported_comp_ops + c.compress.alg;
+ c.compress.ops->init(&c.compress.cc);
+ c.compress.ops->reset(&c.compress.cc);
+ c.compress.cc.rlen = c.compress.cc.cluster_size * F2FS_BLKSIZE;
+ return 0;
+}
static int do_sload(struct f2fs_sb_info *sbi)
{
@@ -804,8 +956,12 @@ static int do_sload(struct f2fs_sb_info *sbi)
if (!c.mount_point)
c.mount_point = "/";
+ if (init_compr(sbi))
+ return -1;
+
return f2fs_sload(sbi);
}
+#endif
#if defined(__APPLE__)
static u64 get_boottime_ns()
@@ -953,6 +1109,9 @@ retry:
return ret2;
}
+ if (c.func == SLOAD)
+ c.compress.filter_ops->destroy();
+
printf("\nDone: %lf secs\n", (get_boottime_ns() - start) / 1000000000.0);
return ret;
diff --git a/fsck/mount.c b/fsck/mount.c
index 8ebc5b0..6b2f17e 100644
--- a/fsck/mount.c
+++ b/fsck/mount.c
@@ -793,7 +793,7 @@ static int verify_sb_chksum(struct f2fs_super_block *sb)
int sanity_check_raw_super(struct f2fs_super_block *sb, enum SB_ADDR sb_addr)
{
unsigned int blocksize;
- unsigned int segment_count, segs_per_sec, secs_per_zone;
+ unsigned int segment_count, segs_per_sec, secs_per_zone, segs_per_zone;
unsigned int total_sections, blocks_per_seg;
if ((get_sb(feature) & F2FS_FEATURE_SB_CHKSUM) &&
@@ -845,6 +845,7 @@ int sanity_check_raw_super(struct f2fs_super_block *sb, enum SB_ADDR sb_addr)
segs_per_sec = get_sb(segs_per_sec);
secs_per_zone = get_sb(secs_per_zone);
total_sections = get_sb(section_count);
+ segs_per_zone = segs_per_sec * secs_per_zone;
/* blocks_per_seg should be 512, given the above check */
blocks_per_seg = 1 << get_sb(log_blocks_per_seg);
@@ -883,7 +884,7 @@ int sanity_check_raw_super(struct f2fs_super_block *sb, enum SB_ADDR sb_addr)
dev_segs += le32_to_cpu(sb->devs[i].total_segments);
i++;
}
- if (segment_count != dev_segs) {
+ if (segment_count != dev_segs / segs_per_zone * segs_per_zone) {
MSG(0, "Segment count (%u) mismatch with total segments from devices (%u)",
segment_count, dev_segs);
return 1;
@@ -2482,7 +2483,7 @@ void rewrite_sit_area_bitmap(struct f2fs_sb_info *sbi)
se->valid_blocks = valid_blocks;
type = se->type;
if (type >= NO_CHECK_TYPE) {
- ASSERT_MSG("Invalide type and valid blocks=%x,%x",
+ ASSERT_MSG("Invalid type and valid blocks=%x,%x",
segno, valid_blocks);
type = 0;
}
diff --git a/fsck/segment.c b/fsck/segment.c
index 0487f41..365c7f8 100644
--- a/fsck/segment.c
+++ b/fsck/segment.c
@@ -8,6 +8,9 @@
* Hou Pengyang <houpengyang@huawei.com>
* Liu Shuoran <liushuoran@huawei.com>
* Jaegeuk Kim <jaegeuk@kernel.org>
+ * Copyright (c) 2020 Google Inc.
+ * Robin Hsu <robinhsu@google.com>
+ * : add sload compression support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -111,6 +114,8 @@ int new_data_block(struct f2fs_sb_info *sbi, void *block,
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
+
+ dn->data_blkaddr = blkaddr;
ret = reserve_new_block(sbi, &dn->data_blkaddr, &sum, type, 0);
if (ret) {
c.alloc_failed = 1;
@@ -228,8 +233,14 @@ u64 f2fs_read(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
return read_count;
}
-u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
- u64 count, pgoff_t offset)
+/*
+ * Do not call this function directly. Instead, call one of the following:
+ * u64 f2fs_write();
+ * u64 f2fs_write_compress_data();
+ * u64 f2fs_write_addrtag();
+ */
+static u64 f2fs_write_ex(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
+ u64 count, pgoff_t offset, enum wr_addr_type addr_type)
{
struct dnode_of_data dn;
struct node_info ni;
@@ -243,6 +254,19 @@ u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
void* index_node = NULL;
int idirty = 0;
int err;
+ bool has_data = (addr_type == WR_NORMAL
+ || addr_type == WR_COMPRESS_DATA);
+
+ if (count == 0)
+ return 0;
+
+ /*
+ * Enforce calling from f2fs_write(), f2fs_write_compress_data(),
+ * and f2fs_write_addrtag(). Beside, check if is properly called.
+ */
+ ASSERT((!has_data && buffer == NULL) || (has_data && buffer != NULL));
+ if (addr_type != WR_NORMAL)
+ ASSERT(offset % F2FS_BLKSIZE == 0); /* block boundary only */
/* Memory allocation for block buffer and inode. */
blk_buffer = calloc(BLOCK_SZ, 2);
@@ -265,15 +289,26 @@ u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
if (err)
break;
idirty |= dn.idirty;
- if (index_node)
- free(index_node);
+ free(index_node);
index_node = (dn.node_blk == dn.inode_blk) ?
- NULL : dn.node_blk;
+ NULL : dn.node_blk;
remained_blkentries = ADDRS_PER_PAGE(sbi,
- dn.node_blk, dn.inode_blk);
+ dn.node_blk, dn.inode_blk) -
+ dn.ofs_in_node;
}
ASSERT(remained_blkentries > 0);
+ if (!has_data) {
+ dn.data_blkaddr = addr_type;
+ set_data_blkaddr(&dn);
+ idirty |= dn.idirty;
+ if (dn.ndirty)
+ ASSERT(dev_write_block(dn.node_blk,
+ dn.node_blkaddr) >= 0);
+ written_count = 0;
+ break;
+ }
+
blkaddr = datablock_addr(dn.node_blk, dn.ofs_in_node);
if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
err = new_data_block(sbi, blk_buffer,
@@ -281,6 +316,7 @@ u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
if (err)
break;
blkaddr = dn.data_blkaddr;
+ idirty |= dn.idirty;
}
off_in_blk = offset % BLOCK_SZ;
@@ -305,9 +341,10 @@ u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
dn.ofs_in_node++;
if ((--remained_blkentries == 0 || count == 0) && (dn.ndirty))
- ASSERT(dev_write_block(dn.node_blk, dn.node_blkaddr) >= 0);
+ ASSERT(dev_write_block(dn.node_blk, dn.node_blkaddr)
+ >= 0);
}
- if (offset > le64_to_cpu(inode->i.i_size)) {
+ if (addr_type == WR_NORMAL && offset > le64_to_cpu(inode->i.i_size)) {
inode->i.i_size = cpu_to_le64(offset);
idirty = 1;
}
@@ -315,13 +352,33 @@ u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
ASSERT(inode == dn.inode_blk);
ASSERT(write_inode(inode, ni.blk_addr) >= 0);
}
- if (index_node)
- free(index_node);
+
+ free(index_node);
free(blk_buffer);
return written_count;
}
+u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
+ u64 count, pgoff_t offset)
+{
+ return f2fs_write_ex(sbi, ino, buffer, count, offset, WR_NORMAL);
+}
+
+u64 f2fs_write_compress_data(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
+ u64 count, pgoff_t offset)
+{
+ return f2fs_write_ex(sbi, ino, buffer, count, offset, WR_COMPRESS_DATA);
+}
+
+u64 f2fs_write_addrtag(struct f2fs_sb_info *sbi, nid_t ino, pgoff_t offset,
+ unsigned int addrtag)
+{
+ ASSERT(addrtag == COMPRESS_ADDR || addrtag == NEW_ADDR
+ || addrtag == NULL_ADDR);
+ return f2fs_write_ex(sbi, ino, NULL, F2FS_BLKSIZE, offset, addrtag);
+}
+
/* This function updates only inode->i.i_size */
void f2fs_filesize_update(struct f2fs_sb_info *sbi, nid_t ino, u64 filesize)
{
@@ -342,15 +399,74 @@ void f2fs_filesize_update(struct f2fs_sb_info *sbi, nid_t ino, u64 filesize)
free(inode);
}
+#define MAX_BULKR_RETRY 5
+int bulkread(int fd, void *rbuf, size_t rsize, bool *eof)
+{
+ int n = 0;
+ int retry = MAX_BULKR_RETRY;
+ int cur;
+
+ if (!rsize)
+ return 0;
+
+ if (eof != NULL)
+ *eof = false;
+ while (rsize && (cur = read(fd, rbuf, rsize)) != 0) {
+ if (cur == -1) {
+ if (errno == EINTR && retry--)
+ continue;
+ return -1;
+ }
+ retry = MAX_BULKR_RETRY;
+
+ rsize -= cur;
+ n += cur;
+ }
+ if (eof != NULL)
+ *eof = (cur == 0);
+ return n;
+}
+
+u64 f2fs_fix_mutable(struct f2fs_sb_info *sbi, nid_t ino, pgoff_t offset,
+ unsigned int compressed)
+{
+ unsigned int i;
+ u64 wlen;
+
+ if (c.compress.readonly)
+ return 0;
+
+ for (i = 0; i < compressed - 1; i++) {
+ wlen = f2fs_write_addrtag(sbi, ino,
+ offset + (i << F2FS_BLKSIZE_BITS), NEW_ADDR);
+ if (wlen)
+ return wlen;
+ }
+ return 0;
+}
+
int f2fs_build_file(struct f2fs_sb_info *sbi, struct dentry *de)
{
int fd, n;
pgoff_t off = 0;
u8 buffer[BLOCK_SZ];
+ struct node_info ni;
+ struct f2fs_node *node_blk;
if (de->ino == 0)
return -1;
+ if (de->from_devino) {
+ struct hardlink_cache_entry *found_hardlink;
+
+ found_hardlink = f2fs_search_hardlink(sbi, de);
+ if (found_hardlink && found_hardlink->to_ino &&
+ found_hardlink->nbuild)
+ return 0;
+
+ found_hardlink->nbuild++;
+ }
+
fd = open(de->full_path, O_RDONLY);
if (fd < 0) {
MSG(0, "Skip: Fail to open %s\n", de->full_path);
@@ -359,8 +475,6 @@ int f2fs_build_file(struct f2fs_sb_info *sbi, struct dentry *de)
/* inline_data support */
if (de->size <= DEF_MAX_INLINE_DATA) {
- struct node_info ni;
- struct f2fs_node *node_blk;
int ret;
get_node_info(sbi, de->ino, &ni);
@@ -385,6 +499,86 @@ int f2fs_build_file(struct f2fs_sb_info *sbi, struct dentry *de)
node_blk->i.i_size = cpu_to_le64(de->size);
ASSERT(write_inode(node_blk, ni.blk_addr) >= 0);
free(node_blk);
+#ifdef WITH_SLOAD
+ } else if (c.func == SLOAD && c.compress.enabled &&
+ c.compress.filter_ops->filter(de->full_path)) {
+ bool eof = false;
+ u8 *rbuf = c.compress.cc.rbuf;
+ unsigned int cblocks = 0;
+
+ node_blk = calloc(BLOCK_SZ, 1);
+ ASSERT(node_blk);
+
+ /* read inode */
+ get_node_info(sbi, de->ino, &ni);
+ ASSERT(dev_read_block(node_blk, ni.blk_addr) >= 0);
+ /* update inode meta */
+ node_blk->i.i_compress_algrithm = c.compress.alg;
+ node_blk->i.i_log_cluster_size =
+ c.compress.cc.log_cluster_size;
+ node_blk->i.i_flags = cpu_to_le32(
+ F2FS_COMPR_FL |
+ (c.compress.readonly ? FS_IMMUTABLE_FL : 0));
+ ASSERT(write_inode(node_blk, ni.blk_addr) >= 0);
+
+ while (!eof && (n = bulkread(fd, rbuf, c.compress.cc.rlen,
+ &eof)) > 0) {
+ int ret = c.compress.ops->compress(&c.compress.cc);
+ u64 wlen;
+ u32 csize = ALIGN_UP(c.compress.cc.clen +
+ COMPRESS_HEADER_SIZE, BLOCK_SZ);
+ unsigned int cur_cblk;
+
+ if (ret || n < c.compress.cc.rlen ||
+ n < (int)(csize + BLOCK_SZ *
+ c.compress.min_blocks)) {
+ wlen = f2fs_write(sbi, de->ino, rbuf, n, off);
+ ASSERT((int)wlen == n);
+ } else {
+ wlen = f2fs_write_addrtag(sbi, de->ino, off,
+ WR_COMPRESS_ADDR);
+ ASSERT(!wlen);
+ wlen = f2fs_write_compress_data(sbi, de->ino,
+ (u8 *)c.compress.cc.cbuf,
+ csize, off + BLOCK_SZ);
+ ASSERT(wlen == csize);
+ c.compress.ops->reset(&c.compress.cc);
+ cur_cblk = (c.compress.cc.rlen - csize) /
+ BLOCK_SZ;
+ cblocks += cur_cblk;
+ wlen = f2fs_fix_mutable(sbi, de->ino,
+ off + BLOCK_SZ + csize,
+ cur_cblk);
+ ASSERT(!wlen);
+ }
+ off += n;
+ }
+ if (n == -1) {
+ fprintf(stderr, "Load file '%s' failed: ",
+ de->full_path);
+ perror(NULL);
+ }
+ /* read inode */
+ get_node_info(sbi, de->ino, &ni);
+ ASSERT(dev_read_block(node_blk, ni.blk_addr) >= 0);
+ /* update inode meta */
+ node_blk->i.i_size = cpu_to_le64(off);
+ if (!c.compress.readonly) {
+ node_blk->i.i_compr_blocks = cpu_to_le64(cblocks);
+ node_blk->i.i_blocks += cpu_to_le64(cblocks);
+ }
+ ASSERT(write_inode(node_blk, ni.blk_addr) >= 0);
+ free(node_blk);
+
+ if (!c.compress.readonly) {
+ sbi->total_valid_block_count += cblocks;
+ if (sbi->total_valid_block_count >=
+ sbi->user_block_count) {
+ ERR_MSG("Not enough space\n");
+ ASSERT(0);
+ }
+ }
+#endif
} else {
while ((n = read(fd, buffer, BLOCK_SZ)) > 0) {
f2fs_write(sbi, de->ino, buffer, n, off);
diff --git a/fsck/sload.c b/fsck/sload.c
index 14012fb..4dea78b 100644
--- a/fsck/sload.c
+++ b/fsck/sload.c
@@ -148,6 +148,15 @@ static void set_inode_metadata(struct dentry *de)
}
if (S_ISREG(stat.st_mode)) {
+ if (stat.st_nlink > 1) {
+ /*
+ * This file might have multiple links to it, so remember
+ * device and inode.
+ */
+ de->from_devino = stat.st_dev;
+ de->from_devino <<= 32;
+ de->from_devino |= stat.st_ino;
+ }
de->file_type = F2FS_FT_REG_FILE;
} else if (S_ISDIR(stat.st_mode)) {
de->file_type = F2FS_FT_DIR;
@@ -333,6 +342,9 @@ int f2fs_sload(struct f2fs_sb_info *sbi)
/* flush NAT/SIT journal entries */
flush_journal_entries(sbi);
+ /* initialize empty hardlink cache */
+ sbi->hardlink_cache = 0;
+
ret = build_directory(sbi, c.from_dir, "/",
c.target_out_dir, F2FS_ROOT_INO(sbi));
if (ret) {
diff --git a/fsck/xattr.c b/fsck/xattr.c
index e9dcb52..f0c5343 100644
--- a/fsck/xattr.c
+++ b/fsck/xattr.c
@@ -24,7 +24,7 @@ void *read_all_xattrs(struct f2fs_sb_info *sbi, struct f2fs_node *inode)
u64 inline_size = inline_xattr_size(&inode->i);
nid_t xnid = le32_to_cpu(inode->i.i_xattr_nid);
- if (xnid) {
+ if (c.func == FSCK && xnid) {
struct f2fs_node *node_blk = NULL;
struct node_info ni;
int ret;
diff --git a/include/android_config.h b/include/android_config.h
index 0613400..0a03d35 100644
--- a/include/android_config.h
+++ b/include/android_config.h
@@ -7,6 +7,8 @@
#define HAVE_POSIX_ACL_H 1
#define HAVE_LINUX_TYPES_H 1
#define HAVE_LINUX_XATTR_H 1
+#define HAVE_LINUX_FS_H 1
+#define HAVE_LINUX_FIEMAP_H 1
#define HAVE_MNTENT_H 1
#define HAVE_STDLIB_H 1
#define HAVE_STRING_H 1
diff --git a/include/f2fs_fs.h b/include/f2fs_fs.h
index b5bda13..a51a359 100644
--- a/include/f2fs_fs.h
+++ b/include/f2fs_fs.h
@@ -5,6 +5,9 @@
* http://www.samsung.com/
* Copyright (c) 2019 Google Inc.
* http://www.google.com/
+ * Copyright (c) 2020 Google Inc.
+ * Robin Hsu <robinhsu@google.com>
+ * : add sload compression support
*
* Dual licensed under the GPL or LGPL version 2 licenses.
*
@@ -68,6 +71,10 @@ typedef uint16_t u_int16_t;
typedef uint8_t u_int8_t;
#endif
+/* codes from kernel's f2fs.h, GPL-v2.0 */
+#define MIN_COMPRESS_LOG_SIZE 2
+#define MAX_COMPRESS_LOG_SIZE 8
+
typedef u_int64_t u64;
typedef u_int32_t u32;
typedef u_int16_t u16;
@@ -93,6 +100,31 @@ typedef u32 __be32;
typedef u64 __be64;
#endif
+/*
+ * code borrowed from kernel f2fs dirver: f2fs.h, GPL-2.0
+ * : definitions of COMPRESS_DATA_RESERVED_SIZE,
+ * struct compress_data, COMPRESS_HEADER_SIZE,
+ * and struct compress_ctx
+ */
+#define COMPRESS_DATA_RESERVED_SIZE 4
+struct compress_data {
+ __le32 clen; /* compressed data size */
+ __le32 chksum; /* checksum of compressed data */
+ __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */
+ u8 cdata[]; /* compressed data */
+};
+#define COMPRESS_HEADER_SIZE (sizeof(struct compress_data))
+/* compress context */
+struct compress_ctx {
+ unsigned int cluster_size; /* page count in cluster */
+ unsigned int log_cluster_size; /* log of cluster size */
+ void *rbuf; /* compression input buffer */
+ struct compress_data *cbuf; /* comprsssion output header + data */
+ size_t rlen; /* valid data length in rbuf */
+ size_t clen; /* valid data length in cbuf */
+ void *private; /* work buf for compress algorithm */
+};
+
#if HAVE_BYTESWAP_H
#include <byteswap.h>
#else
@@ -194,8 +226,8 @@ static inline uint64_t bswap_64(uint64_t val)
#define ASSERT(exp) \
do { \
if (!(exp)) { \
- printf("[ASSERT] (%s:%4d) " #exp"\n", \
- __func__, __LINE__); \
+ printf("[ASSERT] (%s:%4d) %s\n", \
+ __func__, __LINE__, #exp); \
exit(-1); \
} \
} while (0)
@@ -345,6 +377,47 @@ typedef struct {
bool dbg_en;
} dev_cache_config_t;
+/* f2fs_configration for compression used for sload.f2fs */
+typedef struct {
+ void (*init)(struct compress_ctx *cc);
+ int (*compress)(struct compress_ctx *cc);
+ void (*reset)(struct compress_ctx *cc);
+} compress_ops;
+
+/* Should be aligned to supported_comp_names and support_comp_ops */
+enum compress_algorithms {
+ COMPR_LZO,
+ COMPR_LZ4,
+ MAX_COMPRESS_ALGS,
+};
+
+enum filter_policy {
+ COMPR_FILTER_UNASSIGNED = 0,
+ COMPR_FILTER_ALLOW,
+ COMPR_FILTER_DENY,
+};
+
+typedef struct {
+ void (*add)(const char *);
+ void (*destroy)(void);
+ bool (*filter)(const char *);
+} filter_ops;
+
+typedef struct {
+ bool enabled; /* disabled by default */
+ bool required; /* require to enable */
+ bool readonly; /* readonly to release blocks */
+ struct compress_ctx cc; /* work context */
+ enum compress_algorithms alg; /* algorithm to compress */
+ compress_ops *ops; /* ops per algorithm */
+ unsigned int min_blocks; /* save more blocks than this */
+ enum filter_policy filter; /* filter to try compression */
+ filter_ops *filter_ops; /* filter ops */
+} compress_config_t;
+
+#define ALIGN_UP(value, size) ((value) + ((value) % (size) > 0 ? \
+ (size) - (value) % (size) : 0))
+
struct f2fs_configuration {
u_int32_t reserved_segments;
u_int32_t new_reserved_segments;
@@ -441,6 +514,9 @@ struct f2fs_configuration {
/* cache parameters */
dev_cache_config_t cache_config;
+
+ /* compression support for sload.f2fs */
+ compress_config_t compress;
};
#ifdef CONFIG_64BIT
@@ -1282,6 +1358,30 @@ static inline int get_inline_xattr_addrs(struct f2fs_inode *inode)
#ifdef HAVE_LINUX_BLKZONED_H
+/* Let's just use v2, since v1 should be compatible with v2 */
+#define BLK_ZONE_REP_CAPACITY (1 << 0)
+struct blk_zone_v2 {
+ __u64 start; /* Zone start sector */
+ __u64 len; /* Zone length in number of sectors */
+ __u64 wp; /* Zone write pointer position */
+ __u8 type; /* Zone type */
+ __u8 cond; /* Zone condition */
+ __u8 non_seq; /* Non-sequential write resources active */
+ __u8 reset; /* Reset write pointer recommended */
+ __u8 resv[4];
+ __u64 capacity; /* Zone capacity in number of sectors */
+ __u8 reserved[24];
+};
+#define blk_zone blk_zone_v2
+
+struct blk_zone_report_v2 {
+ __u64 sector;
+ __u32 nr_zones;
+ __u32 flags;
+ struct blk_zone zones[0];
+};
+#define blk_zone_report blk_zone_report_v2
+
#define blk_zone_type(z) (z)->type
#define blk_zone_conv(z) ((z)->type == BLK_ZONE_TYPE_CONVENTIONAL)
#define blk_zone_seq_req(z) ((z)->type == BLK_ZONE_TYPE_SEQWRITE_REQ)
@@ -1331,31 +1431,6 @@ blk_zone_cond_str(struct blk_zone *blkz)
/*
* Handle kernel zone capacity support
*/
-#ifndef HAVE_BLK_ZONE_REP_V2
-#define BLK_ZONE_REP_CAPACITY (1 << 0)
-struct blk_zone_v2 {
- __u64 start; /* Zone start sector */
- __u64 len; /* Zone length in number of sectors */
- __u64 wp; /* Zone write pointer position */
- __u8 type; /* Zone type */
- __u8 cond; /* Zone condition */
- __u8 non_seq; /* Non-sequential write resources active */
- __u8 reset; /* Reset write pointer recommended */
- __u8 resv[4];
- __u64 capacity; /* Zone capacity in number of sectors */
- __u8 reserved[24];
-};
-#define blk_zone blk_zone_v2
-
-struct blk_zone_report_v2 {
- __u64 sector;
- __u32 nr_zones;
- __u32 flags;
-struct blk_zone zones[0];
-};
-#define blk_zone_report blk_zone_report_v2
-#endif /* HAVE_BLK_ZONE_REP_V2 */
-
#define blk_zone_empty(z) (blk_zone_cond(z) == BLK_ZONE_COND_EMPTY)
#define blk_zone_sector(z) (z)->start
#define blk_zone_length(z) (z)->len
@@ -1377,7 +1452,7 @@ int f2fs_reset_zone(int, void *);
extern int f2fs_reset_zones(int);
extern uint32_t f2fs_get_usable_segments(struct f2fs_super_block *sb);
-#define SIZE_ALIGN(val, size) ((val) + (size) - 1) / (size)
+#define SIZE_ALIGN(val, size) (((val) + (size) - 1) / (size))
#define SEG_ALIGN(blks) SIZE_ALIGN(blks, c.blks_per_seg)
#define ZONE_ALIGN(blks) SIZE_ALIGN(blks, c.blks_per_seg * \
c.segs_per_zone)
diff --git a/lib/libf2fs_io.c b/lib/libf2fs_io.c
index 138285d..dcedc17 100644
--- a/lib/libf2fs_io.c
+++ b/lib/libf2fs_io.c
@@ -5,6 +5,9 @@
* http://www.samsung.com/
* Copyright (c) 2019 Google Inc.
* http://www.google.com/
+ * Copyright (c) 2020 Google Inc.
+ * Robin Hsu <robinhsu@google.com>
+ * : add quick-buffer for sload compression support
*
* Dual licensed under the GPL or LGPL version 2 licenses.
*/
diff --git a/man/defrag.f2fs.8 b/man/defrag.f2fs.8
index b08399b..34113de 100644
--- a/man/defrag.f2fs.8
+++ b/man/defrag.f2fs.8
@@ -48,7 +48,7 @@ is 0 on success and -1 on failure.
Specify the starting block address.
.TP
.BI \-l " number of blocks"
-Specifiy the number of blocks to move.
+Specify the number of blocks to move.
.TP
.BI \-t " target block address"
Specify the destination block address.
diff --git a/man/mkfs.f2fs.8 b/man/mkfs.f2fs.8
index e2aee76..15e0bd9 100644
--- a/man/mkfs.f2fs.8
+++ b/man/mkfs.f2fs.8
@@ -32,6 +32,7 @@ mkfs.f2fs \- create an F2FS file system
]
[
.B \-g
+.I default-options
]
[
.B \-i
@@ -98,7 +99,7 @@ mkfs.f2fs \- create an F2FS file system
is used to create a f2fs file system (usually in a disk partition).
\fIdevice\fP is the special file corresponding to the device (e.g.
\fI/dev/sdXX\fP).
-\fIsectors\fP is optionally given for specifing the filesystem size.
+\fIsectors\fP is optionally given for specifying the filesystem size.
.PP
The exit code returned by
.B mkfs.f2fs
@@ -136,11 +137,18 @@ Force overwrite when an existing filesystem is detected on the device.
By default, mkfs.f2fs will not write to the device if it suspects that
there is a filesystem or partition table on the device already.
.TP
-.BI \-g
-Add default Android options.
+.BI \-g " default-options"
+Use a default set of options.
+The following values are supported:
+.RS 1.2i
+.TP 1.2i
+.B android
+Use default options for Android.
+.RE
.TP
.BI \-i
Enable extended node bitmap.
+.TP
.BI \-l " volume-label"
Specify the volume label to the partition mounted as F2FS.
.TP
@@ -183,7 +191,7 @@ Enable inode creation time feature. Requires extra attr.
Enable lost+found feature.
.TP
.B verity
-Reserved feature.
+Enable support for verity protected files (a.k.a. fs-verity).
.TP
.B sb_checksum
Enable superblock checksum.
@@ -256,6 +264,9 @@ Number of sectors. Default is determined by device size.
.TP
.BI \-V
Print the version number and exit.
+.TP
+.BI \-h,\ \-\-help
+Print usage and exit.
.SH AUTHOR
This version of
.B mkfs.f2fs
diff --git a/man/sload.f2fs.8 b/man/sload.f2fs.8
index d07330c..c165b35 100644
--- a/man/sload.f2fs.8
+++ b/man/sload.f2fs.8
@@ -7,22 +7,48 @@ sload.f2fs \- load directories and files into the device directly
.B sload.f2fs
[
.B \-f
-.I source directory path
+.I source-directory-path
]
[
.B \-t
-.I mount point
+.I mount-point
]
[
.B \-d
.I debugging-level
]
+[
+.B \-c
+[
+.B \-L
+.I log-of-blocks-per-cluster
+]
+[
+.B \-a
+.I compression-algorithm
+]
+[
+.B \-x
+.I file-extension-to-exclude-from-compression
+|
+.B \-i
+.I file-extension-to-include-for-compression
+]
+[
+.B \-m
+.I minimum-compressed-blocks-per-cluster
+]
+[
+.B \-r
+]
+]
.I device
.SH DESCRIPTION
.B sload.f2fs
-is used to load directories and files into a disk partition.
-\fIdevice\fP is the special file corresponding to the device (e.g.
-\fI/dev/sdXX\fP).
+is used to load directories and files into a disk partition, or an F2FS
+image (file).
+\fIdevice\fP could a special file corresponding to the device (e.g.
+\fI/dev/sdXX\fP), or an F2FS image file.
.PP
The exit code returned by
@@ -30,24 +56,72 @@ The exit code returned by
is 0 on success and -1 on failure.
.SH OPTIONS
.TP
-.BI \-f " source directory path"
+.BI \-f " source-directory-path"
Specify the source directory path to be loaded.
.TP
-.BI \-t " mount point path"
+.BI \-t " mount-point-path"
Specify the mount point path in the partition to load.
.TP
.BI \-d " debug-level"
Specify the level of debugging options.
The default number is 0, which shows basic debugging messages.
.TP
+.BI \-c
+Enable a cluster-based file compression.
+The file would be chopped into clusters, and each cluster is compressed
+independently.
+.TP
+.BI \-L " log-of-blocks-per-cluster
+Specify cluster size in power of two blocks.
+The minimum value is 2 (4 blocks, default).
+The maximum value is 8 (256 blocks).
+Note that a block contains 4096 bytes.
+This option must be used with option \fB\-c\fR.
+.TP
+.BI \-a " compression-algorithm"
+Choose the algorithm for compression. Available options are:
+lzo, lz4 (default).
+This option must be used with option \fB\-c\fR.
+.TP
+.BI \-i " file-extension-to-include-for-compression"
+Specify a file extension to include for the compression.
+To specify multiple file extensions, use multiple option \fB\-i\fR's.
+Files having one of the listed extensions will be compressed.
+This option must be used with option \fB\-c\fR.
+.TP
+.BI \-x " file-extension-to-exclude-from-compression"
+Specify a file extension to exclude from compression.
+To specify multiple file extensions, use multiple option \fB\-x\fR's.
+Files having one of the listed extensions won't be compressed.
+This option must be used with option \fB\-c\fR.
+.TP
+.BI \-m " minimum-compressed-blocks-per-cluster"
+Specify a minimum block count saved (by compression) per cluster.
+The minimum value is 1 (default).
+Maximum value is the cluster size in blocks minus 1.
+If compression of a cluster fails to save at least the minimum compressed
+block count given by the option, the cluster will not be compressed.
+This option must be used with option \fB\-c\fR.
+.TP
+.BI \-r
+Specify read-only flag for the compressed files.
+It allows filesystem to release compressed space to the users, since, without
+this option, filesystem should keep the space for future file updates.
+This option must be used with option \fB\-c\fR.
+
+.SH NOTES
+If neither \fB\-i\fR nor \fB\-x\fR is used, all files will be compressed.
+Obviously, option \fB\-i\fR and \fB-x\fR can not be used together.
+
.SH AUTHOR
This version of
.B sload.f2fs
-has been written by Hou Pengyang <houpengyang@huawei.com>,
-Liu Shuoran <liushuoran@huawei.com>, Jaegeuk Kim <jaegeuk@kernel.org>
+has been contributed by Hou Pengyang <houpengyang@huawei.com>,
+Liu Shuoran <liushuoran@huawei.com>, Jaegeuk Kim <jaegeuk@kernel.org>,
+Robin Hsu <robinhsu@google.com>
.SH AVAILABILITY
.B sload.f2fs
-is available from git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs-tools.git.
+is available from <git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs-tools.git>.
.SH SEE ALSO
.BR mkfs.f2fs(8),
.BR fsck.f2fs(8),
diff --git a/mkfs/f2fs_format.c b/mkfs/f2fs_format.c
index a6c542e..b4bec92 100644
--- a/mkfs/f2fs_format.c
+++ b/mkfs/f2fs_format.c
@@ -254,14 +254,22 @@ static int f2fs_prepare_super_block(void)
return -1;
}
+ if (c.zoned_mode && c.ndevs > 1)
+ zone_align_start_offset +=
+ (c.devices[0].total_sectors * c.sector_size) % zone_size_bytes;
+
set_sb(segment0_blkaddr, zone_align_start_offset / blk_size_bytes);
sb->cp_blkaddr = sb->segment0_blkaddr;
MSG(0, "Info: zone aligned segment0 blkaddr: %u\n",
get_sb(segment0_blkaddr));
- if (c.zoned_mode && (get_sb(segment0_blkaddr) + c.start_sector /
- DEFAULT_SECTORS_PER_BLOCK) % c.zone_blocks) {
+ if (c.zoned_mode &&
+ ((c.ndevs == 1 &&
+ (get_sb(segment0_blkaddr) + c.start_sector /
+ DEFAULT_SECTORS_PER_BLOCK) % c.zone_blocks) ||
+ (c.ndevs > 1 &&
+ c.devices[1].start_blkaddr % c.zone_blocks))) {
MSG(1, "\tError: Unaligned segment0 block address %u\n",
get_sb(segment0_blkaddr));
return -1;
@@ -502,6 +510,13 @@ static int f2fs_prepare_super_block(void)
c.cur_seg[CURSEG_HOT_DATA] = prev_zone(CURSEG_COLD_NODE);
c.cur_seg[CURSEG_COLD_DATA] = 0;
c.cur_seg[CURSEG_WARM_DATA] = next_zone(CURSEG_COLD_DATA);
+ } else if (c.zoned_mode) {
+ c.cur_seg[CURSEG_HOT_NODE] = 0;
+ c.cur_seg[CURSEG_WARM_NODE] = next_zone(CURSEG_HOT_NODE);
+ c.cur_seg[CURSEG_COLD_NODE] = next_zone(CURSEG_WARM_NODE);
+ c.cur_seg[CURSEG_HOT_DATA] = next_zone(CURSEG_COLD_NODE);
+ c.cur_seg[CURSEG_WARM_DATA] = next_zone(CURSEG_HOT_DATA);
+ c.cur_seg[CURSEG_COLD_DATA] = next_zone(CURSEG_WARM_DATA);
} else {
c.cur_seg[CURSEG_HOT_NODE] = 0;
c.cur_seg[CURSEG_WARM_NODE] = next_zone(CURSEG_HOT_NODE);
diff --git a/mkfs/f2fs_format_main.c b/mkfs/f2fs_format_main.c
index f2f0a80..03eb748 100644
--- a/mkfs/f2fs_format_main.c
+++ b/mkfs/f2fs_format_main.c
@@ -20,6 +20,7 @@
#include <time.h>
#include <uuid.h>
#include <errno.h>
+#include <getopt.h>
#include "config.h"
#ifdef HAVE_LIBBLKID
@@ -97,6 +98,9 @@ static void f2fs_show_info()
f2fs_encoding2str(c.s_encoding));
if (c.feature & le32_to_cpu(F2FS_FEATURE_PRJQUOTA))
MSG(0, "Info: Enable Project quota\n");
+
+ if (c.feature & le32_to_cpu(F2FS_FEATURE_COMPRESSION))
+ MSG(0, "Info: Enable Compression\n");
}
static void add_default_options(void)
@@ -125,12 +129,16 @@ static void add_default_options(void)
static void f2fs_parse_options(int argc, char *argv[])
{
- static const char *option_string = "qa:c:C:d:e:E:g:il:mo:O:rR:s:S:z:t:T:U:Vfw:";
+ static const char *option_string = "qa:c:C:d:e:E:g:hil:mo:O:rR:s:S:z:t:T:U:Vfw:";
+ static const struct option long_opts[] = {
+ { .name = "help", .has_arg = 0, .flag = NULL, .val = 'h' },
+ { .name = NULL, .has_arg = 0, .flag = NULL, .val = 0 }
+ };
int32_t option=0;
int val;
char *token;
- while ((option = getopt(argc,argv,option_string)) != EOF) {
+ while ((option = getopt_long(argc,argv,option_string,long_opts,NULL)) != EOF) {
switch (option) {
case 'q':
c.dbg_lv = -1;
@@ -164,6 +172,9 @@ static void f2fs_parse_options(int argc, char *argv[])
if (!strcmp(optarg, "android"))
c.defset = CONF_ANDROID;
break;
+ case 'h':
+ mkfs_usage();
+ break;
case 'i':
c.large_nat_bitmap = 1;
break;
diff --git a/tools/f2fs_io/Makefile.am b/tools/f2fs_io/Makefile.am
index 73ce525..6c17db1 100644
--- a/tools/f2fs_io/Makefile.am
+++ b/tools/f2fs_io/Makefile.am
@@ -1,7 +1,7 @@
## Makefile.am
if LINUX
-AM_CPPFLAGS = -I./include
+AM_CPPFLAGS = -I../../include
AM_CFLAGS = -Wall
sbin_PROGRAMS = f2fs_io
f2fs_io_SOURCES = f2fs_io.c
diff --git a/tools/f2fs_io/f2fs_io.c b/tools/f2fs_io/f2fs_io.c
index 6177d29..033c256 100644
--- a/tools/f2fs_io/f2fs_io.c
+++ b/tools/f2fs_io/f2fs_io.c
@@ -24,13 +24,13 @@
#include <getopt.h>
#include <inttypes.h>
#include <limits.h>
+#include <linux/fs.h>
#include <signal.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <string.h>
#include <sys/mman.h>
#include <sys/sendfile.h>
#include <sys/stat.h>
@@ -42,6 +42,8 @@
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
+#include <android_config.h>
+
#include "f2fs_io.h"
struct cmd_desc {
@@ -432,6 +434,56 @@ static void do_fallocate(int argc, char **argv, const struct cmd_desc *cmd)
exit(0);
}
+#define erase_desc "erase a block device"
+#define erase_help \
+"f2fs_io erase [block_device_path]\n\n" \
+"Send DISCARD | BLKSECDISCARD comamnd to" \
+"block device in block_device_path\n" \
+
+static void do_erase(int argc, char **argv, const struct cmd_desc *cmd)
+{
+ int fd, ret;
+ struct stat st;
+ u64 range[2];
+
+ if (argc != 2) {
+ fputs("Excess arguments\n\n", stderr);
+ fputs(cmd->cmd_help, stderr);
+ exit(1);
+ }
+
+ if (stat(argv[1], &st) != 0) {
+ fputs("stat error\n", stderr);
+ exit(1);
+ }
+
+ if (!S_ISBLK(st.st_mode)) {
+ fputs(argv[1], stderr);
+ fputs(" is not a block device\n", stderr);
+ exit(1);
+ }
+
+ fd = xopen(argv[1], O_WRONLY, 0);
+
+ range[0] = 0;
+ ret = ioctl(fd, BLKGETSIZE64, &range[1]);
+ if (ret < 0) {
+ fputs("get size failed\n", stderr);
+ exit(1);
+ }
+
+ ret = ioctl(fd, BLKSECDISCARD, &range);
+ if (ret < 0) {
+ ret = ioctl(fd, BLKDISCARD, &range);
+ if (ret < 0) {
+ fputs("Discard failed\n", stderr);
+ exit(1);
+ }
+ }
+
+ exit(0);
+}
+
#define write_desc "write data into file"
#define write_help \
"f2fs_io write [chunk_size in 4kb] [offset in chunk_size] [count] [pattern] [IO] [file_path]\n\n" \
@@ -662,27 +714,18 @@ static void do_randread(int argc, char **argv, const struct cmd_desc *cmd)
exit(0);
}
-struct file_ext {
- __u32 f_pos;
- __u32 start_blk;
- __u32 end_blk;
- __u32 blk_count;
-};
-
-#ifndef FIBMAP
-#define FIBMAP _IO(0x00, 1) /* bmap access */
-#endif
-
#define fiemap_desc "get block address in file"
#define fiemap_help \
"f2fs_io fiemap [offset in 4kb] [count] [file_path]\n\n"\
+#if defined(HAVE_LINUX_FIEMAP_H) && defined(HAVE_LINUX_FS_H)
static void do_fiemap(int argc, char **argv, const struct cmd_desc *cmd)
{
- u64 offset;
- u32 blknum;
unsigned count, i;
int fd;
+ __u64 phy_addr;
+ struct fiemap *fm = xmalloc(sizeof(struct fiemap) +
+ sizeof(struct fiemap_extent));
if (argc != 4) {
fputs("Excess arguments\n\n", stderr);
@@ -690,23 +733,37 @@ static void do_fiemap(int argc, char **argv, const struct cmd_desc *cmd)
exit(1);
}
- offset = atoi(argv[1]);
+ fm->fm_start = atoi(argv[1]) * F2FS_BLKSIZE;
+ fm->fm_length = F2FS_BLKSIZE;
+ fm->fm_extent_count = 1;
count = atoi(argv[2]);
fd = xopen(argv[3], O_RDONLY | O_LARGEFILE, 0);
- printf("Fiemap: offset = %08"PRIx64" len = %d\n", offset, count);
+ printf("Fiemap: offset = %08"PRIx64" len = %d\n",
+ (u64)fm->fm_start / F2FS_BLKSIZE, count);
for (i = 0; i < count; i++) {
- blknum = offset + i;
-
- if (ioctl(fd, FIBMAP, &blknum) < 0)
- die_errno("FIBMAP failed");
-
- printf("%u ", blknum);
+ if (ioctl(fd, FS_IOC_FIEMAP, fm) < 0)
+ die_errno("FIEMAP failed");
+
+ phy_addr = fm->fm_extents[0].fe_physical / F2FS_BLKSIZE;
+ if (phy_addr == NEW_ADDR)
+ printf("NEW_ADDR ");
+ else
+ printf("%llu ", phy_addr);
+ fm->fm_start += F2FS_BLKSIZE;
}
printf("\n");
+ free(fm);
exit(0);
}
+#else
+static void do_fiemap(int UNUSED(argc), char **UNUSED(argv),
+ const struct cmd_desc *UNUSED(cmd))
+{
+ die("Not support for this platform");
+}
+#endif
#define gc_urgent_desc "start/end/run gc_urgent for given time period"
#define gc_urgent_help \
@@ -935,6 +992,109 @@ static void do_reserve_cblocks(int argc, char **argv, const struct cmd_desc *cmd
exit(0);
}
+#define get_coption_desc "get compression option of a compressed file"
+#define get_coption_help \
+"f2fs_io get_coption [file]\n\n" \
+" algorithm : compression algorithm (0:lzo, 1: lz4, 2:zstd, 3:lzorle)\n" \
+" log_cluster_size : compression cluster log size (2 <= log_size <= 8)\n"
+
+static void do_get_coption(int argc, char **argv, const struct cmd_desc *cmd)
+{
+ struct f2fs_comp_option option;
+ int ret, fd;
+
+ if (argc != 2) {
+ fputs("Excess arguments\n\n", stderr);
+ fputs(cmd->cmd_help, stderr);
+ exit(1);
+ }
+
+ fd = xopen(argv[1], O_RDONLY, 0);
+
+ ret = ioctl(fd, F2FS_IOC_GET_COMPRESS_OPTION, &option);
+ if (ret < 0)
+ die_errno("F2FS_IOC_GET_COMPRESS_OPTION failed");
+
+ printf("compression algorithm:%u\n", option.algorithm);
+ printf("compression cluster log size:%u\n", option.log_cluster_size);
+
+ exit(0);
+}
+
+#define set_coption_desc "set compression option of a compressed file"
+#define set_coption_help \
+"f2fs_io set_coption [algorithm] [log_cluster_size] [file_path]\n\n" \
+" algorithm : compression algorithm (0:lzo, 1: lz4, 2:zstd, 3:lzorle)\n" \
+" log_cluster_size : compression cluster log size (2 <= log_size <= 8)\n"
+
+static void do_set_coption(int argc, char **argv, const struct cmd_desc *cmd)
+{
+ struct f2fs_comp_option option;
+ int fd, ret;
+
+ if (argc != 4) {
+ fputs("Excess arguments\n\n", stderr);
+ fputs(cmd->cmd_help, stderr);
+ exit(1);
+ }
+
+ option.algorithm = atoi(argv[1]);
+ option.log_cluster_size = atoi(argv[2]);
+
+ fd = xopen(argv[3], O_WRONLY, 0);
+
+ ret = ioctl(fd, F2FS_IOC_SET_COMPRESS_OPTION, &option);
+ if (ret < 0)
+ die_errno("F2FS_IOC_SET_COMPRESS_OPTION failed");
+
+ printf("set compression option: algorithm=%u, log_cluster_size=%u\n",
+ option.algorithm, option.log_cluster_size);
+ exit(0);
+}
+
+#define decompress_desc "decompress an already compressed file"
+#define decompress_help "f2fs_io decompress [file_path]\n\n"
+
+static void do_decompress(int argc, char **argv, const struct cmd_desc *cmd)
+{
+ int fd, ret;
+
+ if (argc != 2) {
+ fputs("Excess arguments\n\n", stderr);
+ fputs(cmd->cmd_help, stderr);
+ exit(1);
+ }
+
+ fd = xopen(argv[1], O_WRONLY, 0);
+
+ ret = ioctl(fd, F2FS_IOC_DECOMPRESS_FILE);
+ if (ret < 0)
+ die_errno("F2FS_IOC_DECOMPRESS_FILE failed");
+
+ exit(0);
+}
+
+#define compress_desc "compress a compression enabled file"
+#define compress_help "f2fs_io compress [file_path]\n\n"
+
+static void do_compress(int argc, char **argv, const struct cmd_desc *cmd)
+{
+ int fd, ret;
+
+ if (argc != 2) {
+ fputs("Excess arguments\n\n", stderr);
+ fputs(cmd->cmd_help, stderr);
+ exit(1);
+ }
+
+ fd = xopen(argv[1], O_WRONLY, 0);
+
+ ret = ioctl(fd, F2FS_IOC_COMPRESS_FILE);
+ if (ret < 0)
+ die_errno("F2FS_IOC_COMPRESS_FILE failed");
+
+ exit(0);
+}
#define CMD_HIDDEN 0x0001
#define CMD(name) { #name, do_##name, name##_desc, name##_help, 0 }
@@ -950,6 +1110,7 @@ const struct cmd_desc cmd_list[] = {
CMD(shutdown),
CMD(pinfile),
CMD(fallocate),
+ CMD(erase),
CMD(write),
CMD(read),
CMD(randread),
@@ -960,6 +1121,10 @@ const struct cmd_desc cmd_list[] = {
CMD(get_cblocks),
CMD(release_cblocks),
CMD(reserve_cblocks),
+ CMD(get_coption),
+ CMD(set_coption),
+ CMD(decompress),
+ CMD(compress),
{ NULL, NULL, NULL, NULL, 0 }
};
diff --git a/tools/f2fs_io/f2fs_io.h b/tools/f2fs_io/f2fs_io.h
index bd19ff9..d53e576 100644
--- a/tools/f2fs_io/f2fs_io.h
+++ b/tools/f2fs_io/f2fs_io.h
@@ -10,6 +10,13 @@
#ifdef HAVE_LINUX_TYPES_H
#include <linux/types.h>
#endif
+#ifdef HAVE_LINUX_FIEMAP_H
+#include <linux/fiemap.h>
+#endif
+#ifdef HAVE_LINUX_FS_H
+#include <linux/fs.h>
+#endif
+
#include <sys/types.h>
#ifdef UNUSED
@@ -38,6 +45,9 @@ typedef u16 __be16;
typedef u32 __be32;
#endif
+#define F2FS_BLKSIZE 4096
+#define NEW_ADDR 0xFFFFFFFF
+
#ifndef FS_IOC_GETFLAGS
#define FS_IOC_GETFLAGS _IOR('f', 1, long)
#endif
@@ -74,6 +84,12 @@ typedef u32 __be32;
_IOR(F2FS_IOCTL_MAGIC, 18, __u64)
#define F2FS_IOC_RESERVE_COMPRESS_BLOCKS \
_IOR(F2FS_IOCTL_MAGIC, 19, __u64)
+#define F2FS_IOC_GET_COMPRESS_OPTION _IOR(F2FS_IOCTL_MAGIC, 21, \
+ struct f2fs_comp_option)
+#define F2FS_IOC_SET_COMPRESS_OPTION _IOW(F2FS_IOCTL_MAGIC, 22, \
+ struct f2fs_comp_option)
+#define F2FS_IOC_DECOMPRESS_FILE _IO(F2FS_IOCTL_MAGIC, 23)
+#define F2FS_IOC_COMPRESS_FILE _IO(F2FS_IOCTL_MAGIC, 24)
#define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
#define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
@@ -154,3 +170,8 @@ struct f2fs_flush_device {
u32 dev_num; /* device number to flush */
u32 segments; /* # of segments to flush */
};
+
+struct f2fs_comp_option {
+ u8 algorithm;
+ u8 log_cluster_size;
+};
diff --git a/tools/f2fs_io_parse.c b/tools/f2fs_io_parse.c
index d146ead..47f1194 100644
--- a/tools/f2fs_io_parse.c
+++ b/tools/f2fs_io_parse.c
@@ -271,7 +271,7 @@ static void __print_ftype()
int i;
setlocale(LC_ALL, "");
- printf("\n===== Data R/W in 4KB accoring to File types =====\n");
+ printf("\n===== Data R/W in 4KB according to File types =====\n");
for (i = 0; i < __NR_FILES; i++)
printf(" %17s |", file_type_string[i]);
printf("\n");
diff --git a/tools/f2fscrypt.8 b/tools/f2fscrypt.8
index a60adc8..5e2258a 100644
--- a/tools/f2fscrypt.8
+++ b/tools/f2fscrypt.8
@@ -40,7 +40,7 @@ identifier consisting of 16 hexadecimal characters.
The target directory must be empty.
.SH EXAMPLE
.nf
-Formats a f2fs filesytem that supports encrypt.
+Formats a f2fs filesystem that supports encrypt.
.ft R
# mkfs.f2fs -O encrypt /dev/sdxx