JFFS2: Import from Linux

Import of Journalling Flash File System, Version 2 from Linux 3.11.
This part of the Linux kernel is under a separate license which is
similar to the RTEMS license.

The file "cpukit/libfs/src/jffs2/include/linux/jffs2.h" is a copy of
"linux-3.11/include/uapi/linux/jffs2.h".

The file "LICENSE.JFFS2" is a copy of "linux-3.11/fs/jffs2/LICENCE".

The files

"linux-3.11/fs/jffs2/LICENCE",
"linux-3.11/fs/jffs2/acl.h",
"linux-3.11/fs/jffs2/build.c",
"linux-3.11/fs/jffs2/compr.c",
"linux-3.11/fs/jffs2/compr.h",
"linux-3.11/fs/jffs2/compr_rtime.c",
"linux-3.11/fs/jffs2/compr_rubin.c",
"linux-3.11/fs/jffs2/compr_zlib.c",
"linux-3.11/fs/jffs2/debug.c",
"linux-3.11/fs/jffs2/debug.h",
"linux-3.11/fs/jffs2/erase.c",
"linux-3.11/fs/jffs2/gc.c",
"linux-3.11/fs/jffs2/jffs2_fs_i.h",
"linux-3.11/fs/jffs2/jffs2_fs_sb.h",
"linux-3.11/fs/jffs2/nodelist.c",
"linux-3.11/fs/jffs2/nodelist.h",
"linux-3.11/fs/jffs2/nodemgmt.c",
"linux-3.11/fs/jffs2/read.c",
"linux-3.11/fs/jffs2/readinode.c",
"linux-3.11/fs/jffs2/scan.c",
"linux-3.11/fs/jffs2/summary.h",
"linux-3.11/fs/jffs2/write.c", and
"linux-3.11/fs/jffs2/xattr.h"

are copied to "cpukit/libfs/src/jffs2/src".
This commit is contained in:
Sebastian Huber
2013-09-12 15:57:47 +02:00
parent 8ac962e1a7
commit 0c0f128e11
25 changed files with 11380 additions and 0 deletions

30
LICENSE.JFFS2 Normal file
View File

@@ -0,0 +1,30 @@
The files in this directory and elsewhere which refer to this LICENCE
file are part of JFFS2, the Journalling Flash File System v2.
Copyright © 2001-2007 Red Hat, Inc. and others
JFFS2 is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2 or (at your option) any later
version.
JFFS2 is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License along
with JFFS2; if not, write to the Free Software Foundation, Inc.,
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
As a special exception, if other files instantiate templates or use
macros or inline functions from these files, or you compile these
files and link them with other works to produce a work based on these
files, these files do not by themselves cause the resulting work to be
covered by the GNU General Public License. However the source code for
these files must still be made available in accordance with section (3)
of the GNU General Public License.
This exception does not invalidate any other reasons why a work based on
this file might be covered by the GNU General Public License.

View File

@@ -0,0 +1,223 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in the
* jffs2 directory.
*/
#ifndef __LINUX_JFFS2_H__
#define __LINUX_JFFS2_H__
#include <linux/types.h>
#include <linux/magic.h>
/* You must include something which defines the C99 uintXX_t types.
We don't do it from here because this file is used in too many
different environments. */
/* Values we may expect to find in the 'magic' field */
#define JFFS2_OLD_MAGIC_BITMASK 0x1984
#define JFFS2_MAGIC_BITMASK 0x1985
#define KSAMTIB_CIGAM_2SFFJ 0x8519 /* For detecting wrong-endian fs */
#define JFFS2_EMPTY_BITMASK 0xffff
#define JFFS2_DIRTY_BITMASK 0x0000
/* Summary node MAGIC marker */
#define JFFS2_SUM_MAGIC 0x02851885
/* We only allow a single char for length, and 0xFF is empty flash so
we don't want it confused with a real length. Hence max 254.
*/
#define JFFS2_MAX_NAME_LEN 254
/* How small can we sensibly write nodes? */
#define JFFS2_MIN_DATA_LEN 128
#define JFFS2_COMPR_NONE 0x00
#define JFFS2_COMPR_ZERO 0x01
#define JFFS2_COMPR_RTIME 0x02
#define JFFS2_COMPR_RUBINMIPS 0x03
#define JFFS2_COMPR_COPY 0x04
#define JFFS2_COMPR_DYNRUBIN 0x05
#define JFFS2_COMPR_ZLIB 0x06
#define JFFS2_COMPR_LZO 0x07
/* Compatibility flags. */
#define JFFS2_COMPAT_MASK 0xc000 /* What do to if an unknown nodetype is found */
#define JFFS2_NODE_ACCURATE 0x2000
/* INCOMPAT: Fail to mount the filesystem */
#define JFFS2_FEATURE_INCOMPAT 0xc000
/* ROCOMPAT: Mount read-only */
#define JFFS2_FEATURE_ROCOMPAT 0x8000
/* RWCOMPAT_COPY: Mount read/write, and copy the node when it's GC'd */
#define JFFS2_FEATURE_RWCOMPAT_COPY 0x4000
/* RWCOMPAT_DELETE: Mount read/write, and delete the node when it's GC'd */
#define JFFS2_FEATURE_RWCOMPAT_DELETE 0x0000
#define JFFS2_NODETYPE_DIRENT (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 1)
#define JFFS2_NODETYPE_INODE (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 2)
#define JFFS2_NODETYPE_CLEANMARKER (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3)
#define JFFS2_NODETYPE_PADDING (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 4)
#define JFFS2_NODETYPE_SUMMARY (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 6)
#define JFFS2_NODETYPE_XATTR (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 8)
#define JFFS2_NODETYPE_XREF (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 9)
/* XATTR Related */
#define JFFS2_XPREFIX_USER 1 /* for "user." */
#define JFFS2_XPREFIX_SECURITY 2 /* for "security." */
#define JFFS2_XPREFIX_ACL_ACCESS 3 /* for "system.posix_acl_access" */
#define JFFS2_XPREFIX_ACL_DEFAULT 4 /* for "system.posix_acl_default" */
#define JFFS2_XPREFIX_TRUSTED 5 /* for "trusted.*" */
#define JFFS2_ACL_VERSION 0x0001
// Maybe later...
//#define JFFS2_NODETYPE_CHECKPOINT (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3)
//#define JFFS2_NODETYPE_OPTIONS (JFFS2_FEATURE_RWCOMPAT_COPY | JFFS2_NODE_ACCURATE | 4)
#define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at
mount time, don't wait for it to
happen later */
#define JFFS2_INO_FLAG_USERCOMPR 2 /* User has requested a specific
compression type */
/* These can go once we've made sure we've caught all uses without
byteswapping */
typedef struct {
__u32 v32;
} __attribute__((packed)) jint32_t;
typedef struct {
__u32 m;
} __attribute__((packed)) jmode_t;
typedef struct {
__u16 v16;
} __attribute__((packed)) jint16_t;
struct jffs2_unknown_node
{
/* All start like this */
jint16_t magic;
jint16_t nodetype;
jint32_t totlen; /* So we can skip over nodes we don't grok */
jint32_t hdr_crc;
};
struct jffs2_raw_dirent
{
jint16_t magic;
jint16_t nodetype; /* == JFFS2_NODETYPE_DIRENT */
jint32_t totlen;
jint32_t hdr_crc;
jint32_t pino;
jint32_t version;
jint32_t ino; /* == zero for unlink */
jint32_t mctime;
__u8 nsize;
__u8 type;
__u8 unused[2];
jint32_t node_crc;
jint32_t name_crc;
__u8 name[0];
};
/* The JFFS2 raw inode structure: Used for storage on physical media. */
/* The uid, gid, atime, mtime and ctime members could be longer, but
are left like this for space efficiency. If and when people decide
they really need them extended, it's simple enough to add support for
a new type of raw node.
*/
struct jffs2_raw_inode
{
jint16_t magic; /* A constant magic number. */
jint16_t nodetype; /* == JFFS2_NODETYPE_INODE */
jint32_t totlen; /* Total length of this node (inc data, etc.) */
jint32_t hdr_crc;
jint32_t ino; /* Inode number. */
jint32_t version; /* Version number. */
jmode_t mode; /* The file's type or mode. */
jint16_t uid; /* The file's owner. */
jint16_t gid; /* The file's group. */
jint32_t isize; /* Total resultant size of this inode (used for truncations) */
jint32_t atime; /* Last access time. */
jint32_t mtime; /* Last modification time. */
jint32_t ctime; /* Change time. */
jint32_t offset; /* Where to begin to write. */
jint32_t csize; /* (Compressed) data size */
jint32_t dsize; /* Size of the node's data. (after decompression) */
__u8 compr; /* Compression algorithm used */
__u8 usercompr; /* Compression algorithm requested by the user */
jint16_t flags; /* See JFFS2_INO_FLAG_* */
jint32_t data_crc; /* CRC for the (compressed) data. */
jint32_t node_crc; /* CRC for the raw inode (excluding data) */
__u8 data[0];
};
struct jffs2_raw_xattr {
jint16_t magic;
jint16_t nodetype; /* = JFFS2_NODETYPE_XATTR */
jint32_t totlen;
jint32_t hdr_crc;
jint32_t xid; /* XATTR identifier number */
jint32_t version;
__u8 xprefix;
__u8 name_len;
jint16_t value_len;
jint32_t data_crc;
jint32_t node_crc;
__u8 data[0];
} __attribute__((packed));
struct jffs2_raw_xref
{
jint16_t magic;
jint16_t nodetype; /* = JFFS2_NODETYPE_XREF */
jint32_t totlen;
jint32_t hdr_crc;
jint32_t ino; /* inode number */
jint32_t xid; /* XATTR identifier number */
jint32_t xseqno; /* xref sequential number */
jint32_t node_crc;
} __attribute__((packed));
struct jffs2_raw_summary
{
jint16_t magic;
jint16_t nodetype; /* = JFFS2_NODETYPE_SUMMARY */
jint32_t totlen;
jint32_t hdr_crc;
jint32_t sum_num; /* number of sum entries*/
jint32_t cln_mkr; /* clean marker size, 0 = no cleanmarker */
jint32_t padded; /* sum of the size of padding nodes */
jint32_t sum_crc; /* summary information crc */
jint32_t node_crc; /* node crc */
jint32_t sum[0]; /* inode summary info */
};
union jffs2_node_union
{
struct jffs2_raw_inode i;
struct jffs2_raw_dirent d;
struct jffs2_raw_xattr x;
struct jffs2_raw_xref r;
struct jffs2_raw_summary s;
struct jffs2_unknown_node u;
};
/* Data payload for device nodes. */
union jffs2_device_node {
jint16_t old_id;
jint32_t new_id;
};
#endif /* __LINUX_JFFS2_H__ */

View File

@@ -0,0 +1,30 @@
The files in this directory and elsewhere which refer to this LICENCE
file are part of JFFS2, the Journalling Flash File System v2.
Copyright © 2001-2007 Red Hat, Inc. and others
JFFS2 is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2 or (at your option) any later
version.
JFFS2 is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License along
with JFFS2; if not, write to the Free Software Foundation, Inc.,
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
As a special exception, if other files instantiate templates or use
macros or inline functions from these files, or you compile these
files and link them with other works to produce a work based on these
files, these files do not by themselves cause the resulting work to be
covered by the GNU General Public License. However the source code for
these files must still be made available in accordance with section (3)
of the GNU General Public License.
This exception does not invalidate any other reasons why a work based on
this file might be covered by the GNU General Public License.

View File

@@ -0,0 +1,44 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2006 NEC Corporation
*
* Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
struct jffs2_acl_entry {
jint16_t e_tag;
jint16_t e_perm;
jint32_t e_id;
};
struct jffs2_acl_entry_short {
jint16_t e_tag;
jint16_t e_perm;
};
struct jffs2_acl_header {
jint32_t a_version;
};
#ifdef CONFIG_JFFS2_FS_POSIX_ACL
struct posix_acl *jffs2_get_acl(struct inode *inode, int type);
extern int jffs2_acl_chmod(struct inode *);
extern int jffs2_init_acl_pre(struct inode *, struct inode *, umode_t *);
extern int jffs2_init_acl_post(struct inode *);
extern const struct xattr_handler jffs2_acl_access_xattr_handler;
extern const struct xattr_handler jffs2_acl_default_xattr_handler;
#else
#define jffs2_get_acl (NULL)
#define jffs2_acl_chmod(inode) (0)
#define jffs2_init_acl_pre(dir_i,inode,mode) (0)
#define jffs2_init_acl_post(inode) (0)
#endif /* CONFIG_JFFS2_FS_POSIX_ACL */

View File

@@ -0,0 +1,394 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mtd/mtd.h>
#include "nodelist.h"
static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *,
struct jffs2_inode_cache *, struct jffs2_full_dirent **);
static inline struct jffs2_inode_cache *
first_inode_chain(int *i, struct jffs2_sb_info *c)
{
for (; *i < c->inocache_hashsize; (*i)++) {
if (c->inocache_list[*i])
return c->inocache_list[*i];
}
return NULL;
}
static inline struct jffs2_inode_cache *
next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
{
/* More in this chain? */
if (ic->next)
return ic->next;
(*i)++;
return first_inode_chain(i, c);
}
#define for_each_inode(i, c, ic) \
for (i = 0, ic = first_inode_chain(&i, (c)); \
ic; \
ic = next_inode(&i, ic, (c)))
static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
struct jffs2_inode_cache *ic)
{
struct jffs2_full_dirent *fd;
dbg_fsbuild("building directory inode #%u\n", ic->ino);
/* For each child, increase nlink */
for(fd = ic->scan_dents; fd; fd = fd->next) {
struct jffs2_inode_cache *child_ic;
if (!fd->ino)
continue;
/* we can get high latency here with huge directories */
child_ic = jffs2_get_ino_cache(c, fd->ino);
if (!child_ic) {
dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
fd->name, fd->ino, ic->ino);
jffs2_mark_node_obsolete(c, fd->raw);
continue;
}
if (fd->type == DT_DIR) {
if (child_ic->pino_nlink) {
JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
fd->name, fd->ino, ic->ino);
/* TODO: What do we do about it? */
} else {
child_ic->pino_nlink = ic->ino;
}
} else
child_ic->pino_nlink++;
dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
/* Can't free scan_dents so far. We might need them in pass 2 */
}
}
/* Scan plan:
- Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go
- Scan directory tree from top down, setting nlink in inocaches
- Scan inocaches for inodes with nlink==0
*/
static int jffs2_build_filesystem(struct jffs2_sb_info *c)
{
int ret;
int i;
struct jffs2_inode_cache *ic;
struct jffs2_full_dirent *fd;
struct jffs2_full_dirent *dead_fds = NULL;
dbg_fsbuild("build FS data structures\n");
/* First, scan the medium and build all the inode caches with
lists of physical nodes */
c->flags |= JFFS2_SB_FLAG_SCANNING;
ret = jffs2_scan_medium(c);
c->flags &= ~JFFS2_SB_FLAG_SCANNING;
if (ret)
goto exit;
dbg_fsbuild("scanned flash completely\n");
jffs2_dbg_dump_block_lists_nolock(c);
dbg_fsbuild("pass 1 starting\n");
c->flags |= JFFS2_SB_FLAG_BUILDING;
/* Now scan the directory tree, increasing nlink according to every dirent found. */
for_each_inode(i, c, ic) {
if (ic->scan_dents) {
jffs2_build_inode_pass1(c, ic);
cond_resched();
}
}
dbg_fsbuild("pass 1 complete\n");
/* Next, scan for inodes with nlink == 0 and remove them. If
they were directories, then decrement the nlink of their
children too, and repeat the scan. As that's going to be
a fairly uncommon occurrence, it's not so evil to do it this
way. Recursion bad. */
dbg_fsbuild("pass 2 starting\n");
for_each_inode(i, c, ic) {
if (ic->pino_nlink)
continue;
jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
cond_resched();
}
dbg_fsbuild("pass 2a starting\n");
while (dead_fds) {
fd = dead_fds;
dead_fds = fd->next;
ic = jffs2_get_ino_cache(c, fd->ino);
if (ic)
jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
jffs2_free_full_dirent(fd);
}
dbg_fsbuild("pass 2a complete\n");
dbg_fsbuild("freeing temporary data structures\n");
/* Finally, we can scan again and free the dirent structs */
for_each_inode(i, c, ic) {
while(ic->scan_dents) {
fd = ic->scan_dents;
ic->scan_dents = fd->next;
jffs2_free_full_dirent(fd);
}
ic->scan_dents = NULL;
cond_resched();
}
jffs2_build_xattr_subsystem(c);
c->flags &= ~JFFS2_SB_FLAG_BUILDING;
dbg_fsbuild("FS build complete\n");
/* Rotate the lists by some number to ensure wear levelling */
jffs2_rotate_lists(c);
ret = 0;
exit:
if (ret) {
for_each_inode(i, c, ic) {
while(ic->scan_dents) {
fd = ic->scan_dents;
ic->scan_dents = fd->next;
jffs2_free_full_dirent(fd);
}
}
jffs2_clear_xattr_subsystem(c);
}
return ret;
}
static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
struct jffs2_inode_cache *ic,
struct jffs2_full_dirent **dead_fds)
{
struct jffs2_raw_node_ref *raw;
struct jffs2_full_dirent *fd;
dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino);
raw = ic->nodes;
while (raw != (void *)ic) {
struct jffs2_raw_node_ref *next = raw->next_in_ino;
dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw));
jffs2_mark_node_obsolete(c, raw);
raw = next;
}
if (ic->scan_dents) {
int whinged = 0;
dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino);
while(ic->scan_dents) {
struct jffs2_inode_cache *child_ic;
fd = ic->scan_dents;
ic->scan_dents = fd->next;
if (!fd->ino) {
/* It's a deletion dirent. Ignore it */
dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name);
jffs2_free_full_dirent(fd);
continue;
}
if (!whinged)
whinged = 1;
dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino);
child_ic = jffs2_get_ino_cache(c, fd->ino);
if (!child_ic) {
dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n",
fd->name, fd->ino);
jffs2_free_full_dirent(fd);
continue;
}
/* Reduce nlink of the child. If it's now zero, stick it on the
dead_fds list to be cleaned up later. Else just free the fd */
if (fd->type == DT_DIR)
child_ic->pino_nlink = 0;
else
child_ic->pino_nlink--;
if (!child_ic->pino_nlink) {
dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
fd->ino, fd->name);
fd->next = *dead_fds;
*dead_fds = fd;
} else {
dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
fd->ino, fd->name, child_ic->pino_nlink);
jffs2_free_full_dirent(fd);
}
}
}
/*
We don't delete the inocache from the hash list and free it yet.
The erase code will do that, when all the nodes are completely gone.
*/
}
static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
{
uint32_t size;
/* Deletion should almost _always_ be allowed. We're fairly
buggered once we stop allowing people to delete stuff
because there's not enough free space... */
c->resv_blocks_deletion = 2;
/* Be conservative about how much space we need before we allow writes.
On top of that which is required for deletia, require an extra 2%
of the medium to be available, for overhead caused by nodes being
split across blocks, etc. */
size = c->flash_size / 50; /* 2% of flash size */
size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */
size += c->sector_size - 1; /* ... and round up */
c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size);
/* When do we let the GC thread run in the background */
c->resv_blocks_gctrigger = c->resv_blocks_write + 1;
/* When do we allow garbage collection to merge nodes to make
long-term progress at the expense of short-term space exhaustion? */
c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1;
/* When do we allow garbage collection to eat from bad blocks rather
than actually making progress? */
c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2;
/* What number of 'very dirty' eraseblocks do we allow before we
trigger the GC thread even if we don't _need_ the space. When we
can't mark nodes obsolete on the medium, the old dirty nodes cause
performance problems because we have to inspect and discard them. */
c->vdirty_blocks_gctrigger = c->resv_blocks_gctrigger;
if (jffs2_can_mark_obsolete(c))
c->vdirty_blocks_gctrigger *= 10;
/* If there's less than this amount of dirty space, don't bother
trying to GC to make more space. It'll be a fruitless task */
c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
dbg_fsbuild("trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n",
c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024);
dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n",
c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024);
dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n",
c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024);
dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n",
c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024);
dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n",
c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024);
dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n",
c->nospc_dirty_size);
dbg_fsbuild("Very dirty blocks before GC triggered: %d\n",
c->vdirty_blocks_gctrigger);
}
int jffs2_do_mount_fs(struct jffs2_sb_info *c)
{
int ret;
int i;
int size;
c->free_size = c->flash_size;
c->nr_blocks = c->flash_size / c->sector_size;
size = sizeof(struct jffs2_eraseblock) * c->nr_blocks;
#ifndef __ECOS
if (jffs2_blocks_use_vmalloc(c))
c->blocks = vzalloc(size);
else
#endif
c->blocks = kzalloc(size, GFP_KERNEL);
if (!c->blocks)
return -ENOMEM;
for (i=0; i<c->nr_blocks; i++) {
INIT_LIST_HEAD(&c->blocks[i].list);
c->blocks[i].offset = i * c->sector_size;
c->blocks[i].free_size = c->sector_size;
}
INIT_LIST_HEAD(&c->clean_list);
INIT_LIST_HEAD(&c->very_dirty_list);
INIT_LIST_HEAD(&c->dirty_list);
INIT_LIST_HEAD(&c->erasable_list);
INIT_LIST_HEAD(&c->erasing_list);
INIT_LIST_HEAD(&c->erase_checking_list);
INIT_LIST_HEAD(&c->erase_pending_list);
INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
INIT_LIST_HEAD(&c->erase_complete_list);
INIT_LIST_HEAD(&c->free_list);
INIT_LIST_HEAD(&c->bad_list);
INIT_LIST_HEAD(&c->bad_used_list);
c->highest_ino = 1;
c->summary = NULL;
ret = jffs2_sum_init(c);
if (ret)
goto out_free;
if (jffs2_build_filesystem(c)) {
dbg_fsbuild("build_fs failed\n");
jffs2_free_ino_caches(c);
jffs2_free_raw_node_refs(c);
ret = -EIO;
goto out_free;
}
jffs2_calc_trigger_levels(c);
return 0;
out_free:
#ifndef __ECOS
if (jffs2_blocks_use_vmalloc(c))
vfree(c->blocks);
else
#endif
kfree(c->blocks);
return ret;
}

View File

@@ -0,0 +1,418 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
* Copyright © 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
* University of Szeged, Hungary
*
* Created by Arjan van de Ven <arjan@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "compr.h"
static DEFINE_SPINLOCK(jffs2_compressor_list_lock);
/* Available compressors are on this list */
static LIST_HEAD(jffs2_compressor_list);
/* Actual compression mode */
static int jffs2_compression_mode = JFFS2_COMPR_MODE_PRIORITY;
/* Statistics for blocks stored without compression */
static uint32_t none_stat_compr_blocks=0,none_stat_decompr_blocks=0,none_stat_compr_size=0;
/*
* Return 1 to use this compression
*/
static int jffs2_is_best_compression(struct jffs2_compressor *this,
struct jffs2_compressor *best, uint32_t size, uint32_t bestsize)
{
switch (jffs2_compression_mode) {
case JFFS2_COMPR_MODE_SIZE:
if (bestsize > size)
return 1;
return 0;
case JFFS2_COMPR_MODE_FAVOURLZO:
if ((this->compr == JFFS2_COMPR_LZO) && (bestsize > size))
return 1;
if ((best->compr != JFFS2_COMPR_LZO) && (bestsize > size))
return 1;
if ((this->compr == JFFS2_COMPR_LZO) && (bestsize > (size * FAVOUR_LZO_PERCENT / 100)))
return 1;
if ((bestsize * FAVOUR_LZO_PERCENT / 100) > size)
return 1;
return 0;
}
/* Shouldn't happen */
return 0;
}
/*
* jffs2_selected_compress:
* @compr: Explicit compression type to use (ie, JFFS2_COMPR_ZLIB).
* If 0, just take the first available compression mode.
* @data_in: Pointer to uncompressed data
* @cpage_out: Pointer to returned pointer to buffer for compressed data
* @datalen: On entry, holds the amount of data available for compression.
* On exit, expected to hold the amount of data actually compressed.
* @cdatalen: On entry, holds the amount of space available for compressed
* data. On exit, expected to hold the actual size of the compressed
* data.
*
* Returns: the compression type used. Zero is used to show that the data
* could not be compressed; probably because we couldn't find the requested
* compression mode.
*/
static int jffs2_selected_compress(u8 compr, unsigned char *data_in,
unsigned char **cpage_out, u32 *datalen, u32 *cdatalen)
{
struct jffs2_compressor *this;
int err, ret = JFFS2_COMPR_NONE;
uint32_t orig_slen, orig_dlen;
char *output_buf;
output_buf = kmalloc(*cdatalen, GFP_KERNEL);
if (!output_buf) {
pr_warn("No memory for compressor allocation. Compression failed.\n");
return ret;
}
orig_slen = *datalen;
orig_dlen = *cdatalen;
spin_lock(&jffs2_compressor_list_lock);
list_for_each_entry(this, &jffs2_compressor_list, list) {
/* Skip decompress-only and disabled modules */
if (!this->compress || this->disabled)
continue;
/* Skip if not the desired compression type */
if (compr && (compr != this->compr))
continue;
/*
* Either compression type was unspecified, or we found our
* compressor; either way, we're good to go.
*/
this->usecount++;
spin_unlock(&jffs2_compressor_list_lock);
*datalen = orig_slen;
*cdatalen = orig_dlen;
err = this->compress(data_in, output_buf, datalen, cdatalen);
spin_lock(&jffs2_compressor_list_lock);
this->usecount--;
if (!err) {
/* Success */
ret = this->compr;
this->stat_compr_blocks++;
this->stat_compr_orig_size += *datalen;
this->stat_compr_new_size += *cdatalen;
break;
}
}
spin_unlock(&jffs2_compressor_list_lock);
if (ret == JFFS2_COMPR_NONE)
kfree(output_buf);
else
*cpage_out = output_buf;
return ret;
}
/* jffs2_compress:
* @data_in: Pointer to uncompressed data
* @cpage_out: Pointer to returned pointer to buffer for compressed data
* @datalen: On entry, holds the amount of data available for compression.
* On exit, expected to hold the amount of data actually compressed.
* @cdatalen: On entry, holds the amount of space available for compressed
* data. On exit, expected to hold the actual size of the compressed
* data.
*
* Returns: Lower byte to be stored with data indicating compression type used.
* Zero is used to show that the data could not be compressed - the
* compressed version was actually larger than the original.
* Upper byte will be used later. (soon)
*
* If the cdata buffer isn't large enough to hold all the uncompressed data,
* jffs2_compress should compress as much as will fit, and should set
* *datalen accordingly to show the amount of data which were compressed.
*/
uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
unsigned char *data_in, unsigned char **cpage_out,
uint32_t *datalen, uint32_t *cdatalen)
{
int ret = JFFS2_COMPR_NONE;
int mode, compr_ret;
struct jffs2_compressor *this, *best=NULL;
unsigned char *output_buf = NULL, *tmp_buf;
uint32_t orig_slen, orig_dlen;
uint32_t best_slen=0, best_dlen=0;
if (c->mount_opts.override_compr)
mode = c->mount_opts.compr;
else
mode = jffs2_compression_mode;
switch (mode) {
case JFFS2_COMPR_MODE_NONE:
break;
case JFFS2_COMPR_MODE_PRIORITY:
ret = jffs2_selected_compress(0, data_in, cpage_out, datalen,
cdatalen);
break;
case JFFS2_COMPR_MODE_SIZE:
case JFFS2_COMPR_MODE_FAVOURLZO:
orig_slen = *datalen;
orig_dlen = *cdatalen;
spin_lock(&jffs2_compressor_list_lock);
list_for_each_entry(this, &jffs2_compressor_list, list) {
/* Skip decompress-only backwards-compatibility and disabled modules */
if ((!this->compress)||(this->disabled))
continue;
/* Allocating memory for output buffer if necessary */
if ((this->compr_buf_size < orig_slen) && (this->compr_buf)) {
spin_unlock(&jffs2_compressor_list_lock);
kfree(this->compr_buf);
spin_lock(&jffs2_compressor_list_lock);
this->compr_buf_size=0;
this->compr_buf=NULL;
}
if (!this->compr_buf) {
spin_unlock(&jffs2_compressor_list_lock);
tmp_buf = kmalloc(orig_slen, GFP_KERNEL);
spin_lock(&jffs2_compressor_list_lock);
if (!tmp_buf) {
pr_warn("No memory for compressor allocation. (%d bytes)\n",
orig_slen);
continue;
}
else {
this->compr_buf = tmp_buf;
this->compr_buf_size = orig_slen;
}
}
this->usecount++;
spin_unlock(&jffs2_compressor_list_lock);
*datalen = orig_slen;
*cdatalen = orig_dlen;
compr_ret = this->compress(data_in, this->compr_buf, datalen, cdatalen);
spin_lock(&jffs2_compressor_list_lock);
this->usecount--;
if (!compr_ret) {
if (((!best_dlen) || jffs2_is_best_compression(this, best, *cdatalen, best_dlen))
&& (*cdatalen < *datalen)) {
best_dlen = *cdatalen;
best_slen = *datalen;
best = this;
}
}
}
if (best_dlen) {
*cdatalen = best_dlen;
*datalen = best_slen;
output_buf = best->compr_buf;
best->compr_buf = NULL;
best->compr_buf_size = 0;
best->stat_compr_blocks++;
best->stat_compr_orig_size += best_slen;
best->stat_compr_new_size += best_dlen;
ret = best->compr;
*cpage_out = output_buf;
}
spin_unlock(&jffs2_compressor_list_lock);
break;
case JFFS2_COMPR_MODE_FORCELZO:
ret = jffs2_selected_compress(JFFS2_COMPR_LZO, data_in,
cpage_out, datalen, cdatalen);
break;
case JFFS2_COMPR_MODE_FORCEZLIB:
ret = jffs2_selected_compress(JFFS2_COMPR_ZLIB, data_in,
cpage_out, datalen, cdatalen);
break;
default:
pr_err("unknown compression mode\n");
}
if (ret == JFFS2_COMPR_NONE) {
*cpage_out = data_in;
*datalen = *cdatalen;
none_stat_compr_blocks++;
none_stat_compr_size += *datalen;
}
return ret;
}
int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
uint16_t comprtype, unsigned char *cdata_in,
unsigned char *data_out, uint32_t cdatalen, uint32_t datalen)
{
struct jffs2_compressor *this;
int ret;
/* Older code had a bug where it would write non-zero 'usercompr'
fields. Deal with it. */
if ((comprtype & 0xff) <= JFFS2_COMPR_ZLIB)
comprtype &= 0xff;
switch (comprtype & 0xff) {
case JFFS2_COMPR_NONE:
/* This should be special-cased elsewhere, but we might as well deal with it */
memcpy(data_out, cdata_in, datalen);
none_stat_decompr_blocks++;
break;
case JFFS2_COMPR_ZERO:
memset(data_out, 0, datalen);
break;
default:
spin_lock(&jffs2_compressor_list_lock);
list_for_each_entry(this, &jffs2_compressor_list, list) {
if (comprtype == this->compr) {
this->usecount++;
spin_unlock(&jffs2_compressor_list_lock);
ret = this->decompress(cdata_in, data_out, cdatalen, datalen);
spin_lock(&jffs2_compressor_list_lock);
if (ret) {
pr_warn("Decompressor \"%s\" returned %d\n",
this->name, ret);
}
else {
this->stat_decompr_blocks++;
}
this->usecount--;
spin_unlock(&jffs2_compressor_list_lock);
return ret;
}
}
pr_warn("compression type 0x%02x not available\n", comprtype);
spin_unlock(&jffs2_compressor_list_lock);
return -EIO;
}
return 0;
}
int jffs2_register_compressor(struct jffs2_compressor *comp)
{
struct jffs2_compressor *this;
if (!comp->name) {
pr_warn("NULL compressor name at registering JFFS2 compressor. Failed.\n");
return -1;
}
comp->compr_buf_size=0;
comp->compr_buf=NULL;
comp->usecount=0;
comp->stat_compr_orig_size=0;
comp->stat_compr_new_size=0;
comp->stat_compr_blocks=0;
comp->stat_decompr_blocks=0;
jffs2_dbg(1, "Registering JFFS2 compressor \"%s\"\n", comp->name);
spin_lock(&jffs2_compressor_list_lock);
list_for_each_entry(this, &jffs2_compressor_list, list) {
if (this->priority < comp->priority) {
list_add(&comp->list, this->list.prev);
goto out;
}
}
list_add_tail(&comp->list, &jffs2_compressor_list);
out:
D2(list_for_each_entry(this, &jffs2_compressor_list, list) {
printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority);
})
spin_unlock(&jffs2_compressor_list_lock);
return 0;
}
int jffs2_unregister_compressor(struct jffs2_compressor *comp)
{
D2(struct jffs2_compressor *this);
jffs2_dbg(1, "Unregistering JFFS2 compressor \"%s\"\n", comp->name);
spin_lock(&jffs2_compressor_list_lock);
if (comp->usecount) {
spin_unlock(&jffs2_compressor_list_lock);
pr_warn("Compressor module is in use. Unregister failed.\n");
return -1;
}
list_del(&comp->list);
D2(list_for_each_entry(this, &jffs2_compressor_list, list) {
printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority);
})
spin_unlock(&jffs2_compressor_list_lock);
return 0;
}
void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig)
{
if (orig != comprbuf)
kfree(comprbuf);
}
int __init jffs2_compressors_init(void)
{
/* Registering compressors */
#ifdef CONFIG_JFFS2_ZLIB
jffs2_zlib_init();
#endif
#ifdef CONFIG_JFFS2_RTIME
jffs2_rtime_init();
#endif
#ifdef CONFIG_JFFS2_RUBIN
jffs2_rubinmips_init();
jffs2_dynrubin_init();
#endif
#ifdef CONFIG_JFFS2_LZO
jffs2_lzo_init();
#endif
/* Setting default compression mode */
#ifdef CONFIG_JFFS2_CMODE_NONE
jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
jffs2_dbg(1, "default compression mode: none\n");
#else
#ifdef CONFIG_JFFS2_CMODE_SIZE
jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE;
jffs2_dbg(1, "default compression mode: size\n");
#else
#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO
jffs2_compression_mode = JFFS2_COMPR_MODE_FAVOURLZO;
jffs2_dbg(1, "default compression mode: favourlzo\n");
#else
jffs2_dbg(1, "default compression mode: priority\n");
#endif
#endif
#endif
return 0;
}
int jffs2_compressors_exit(void)
{
/* Unregistering compressors */
#ifdef CONFIG_JFFS2_LZO
jffs2_lzo_exit();
#endif
#ifdef CONFIG_JFFS2_RUBIN
jffs2_dynrubin_exit();
jffs2_rubinmips_exit();
#endif
#ifdef CONFIG_JFFS2_RTIME
jffs2_rtime_exit();
#endif
#ifdef CONFIG_JFFS2_ZLIB
jffs2_zlib_exit();
#endif
return 0;
}

View File

@@ -0,0 +1,105 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
* University of Szeged, Hungary
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#ifndef __JFFS2_COMPR_H__
#define __JFFS2_COMPR_H__
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/jffs2.h>
#include "jffs2_fs_i.h"
#include "jffs2_fs_sb.h"
#include "nodelist.h"
#define JFFS2_RUBINMIPS_PRIORITY 10
#define JFFS2_DYNRUBIN_PRIORITY 20
#define JFFS2_LZARI_PRIORITY 30
#define JFFS2_RTIME_PRIORITY 50
#define JFFS2_ZLIB_PRIORITY 60
#define JFFS2_LZO_PRIORITY 80
#define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */
#define JFFS2_DYNRUBIN_DISABLED /* for decompression */
#define JFFS2_COMPR_MODE_NONE 0
#define JFFS2_COMPR_MODE_PRIORITY 1
#define JFFS2_COMPR_MODE_SIZE 2
#define JFFS2_COMPR_MODE_FAVOURLZO 3
#define JFFS2_COMPR_MODE_FORCELZO 4
#define JFFS2_COMPR_MODE_FORCEZLIB 5
#define FAVOUR_LZO_PERCENT 80
struct jffs2_compressor {
struct list_head list;
int priority; /* used by prirority comr. mode */
char *name;
char compr; /* JFFS2_COMPR_XXX */
int (*compress)(unsigned char *data_in, unsigned char *cpage_out,
uint32_t *srclen, uint32_t *destlen);
int (*decompress)(unsigned char *cdata_in, unsigned char *data_out,
uint32_t cdatalen, uint32_t datalen);
int usecount;
int disabled; /* if set the compressor won't compress */
unsigned char *compr_buf; /* used by size compr. mode */
uint32_t compr_buf_size; /* used by size compr. mode */
uint32_t stat_compr_orig_size;
uint32_t stat_compr_new_size;
uint32_t stat_compr_blocks;
uint32_t stat_decompr_blocks;
};
int jffs2_register_compressor(struct jffs2_compressor *comp);
int jffs2_unregister_compressor(struct jffs2_compressor *comp);
int jffs2_compressors_init(void);
int jffs2_compressors_exit(void);
uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
unsigned char *data_in, unsigned char **cpage_out,
uint32_t *datalen, uint32_t *cdatalen);
int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
uint16_t comprtype, unsigned char *cdata_in,
unsigned char *data_out, uint32_t cdatalen, uint32_t datalen);
void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig);
/* Compressor modules */
/* These functions will be called by jffs2_compressors_init/exit */
#ifdef CONFIG_JFFS2_RUBIN
int jffs2_rubinmips_init(void);
void jffs2_rubinmips_exit(void);
int jffs2_dynrubin_init(void);
void jffs2_dynrubin_exit(void);
#endif
#ifdef CONFIG_JFFS2_RTIME
int jffs2_rtime_init(void);
void jffs2_rtime_exit(void);
#endif
#ifdef CONFIG_JFFS2_ZLIB
int jffs2_zlib_init(void);
void jffs2_zlib_exit(void);
#endif
#ifdef CONFIG_JFFS2_LZO
int jffs2_lzo_init(void);
void jffs2_lzo_exit(void);
#endif
#endif /* __JFFS2_COMPR_H__ */

View File

@@ -0,0 +1,130 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by Arjan van de Ven <arjanv@redhat.com>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*
*
* Very simple lz77-ish encoder.
*
* Theory of operation: Both encoder and decoder have a list of "last
* occurrences" for every possible source-value; after sending the
* first source-byte, the second byte indicated the "run" length of
* matches
*
* The algorithm is intended to only send "whole bytes", no bit-messing.
*
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/jffs2.h>
#include "compr.h"
/* _compress returns the compressed size, -1 if bigger */
static int jffs2_rtime_compress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t *sourcelen, uint32_t *dstlen)
{
short positions[256];
int outpos = 0;
int pos=0;
memset(positions,0,sizeof(positions));
while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
int backpos, runlen=0;
unsigned char value;
value = data_in[pos];
cpage_out[outpos++] = data_in[pos++];
backpos = positions[value];
positions[value]=pos;
while ((backpos < pos) && (pos < (*sourcelen)) &&
(data_in[pos]==data_in[backpos++]) && (runlen<255)) {
pos++;
runlen++;
}
cpage_out[outpos++] = runlen;
}
if (outpos >= pos) {
/* We failed */
return -1;
}
/* Tell the caller how much we managed to compress, and how much space it took */
*sourcelen = pos;
*dstlen = outpos;
return 0;
}
static int jffs2_rtime_decompress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t srclen, uint32_t destlen)
{
short positions[256];
int outpos = 0;
int pos=0;
memset(positions,0,sizeof(positions));
while (outpos<destlen) {
unsigned char value;
int backoffs;
int repeat;
value = data_in[pos++];
cpage_out[outpos++] = value; /* first the verbatim copied byte */
repeat = data_in[pos++];
backoffs = positions[value];
positions[value]=outpos;
if (repeat) {
if (backoffs + repeat >= outpos) {
while(repeat) {
cpage_out[outpos++] = cpage_out[backoffs++];
repeat--;
}
} else {
memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat);
outpos+=repeat;
}
}
}
return 0;
}
static struct jffs2_compressor jffs2_rtime_comp = {
.priority = JFFS2_RTIME_PRIORITY,
.name = "rtime",
.compr = JFFS2_COMPR_RTIME,
.compress = &jffs2_rtime_compress,
.decompress = &jffs2_rtime_decompress,
#ifdef JFFS2_RTIME_DISABLED
.disabled = 1,
#else
.disabled = 0,
#endif
};
int jffs2_rtime_init(void)
{
return jffs2_register_compressor(&jffs2_rtime_comp);
}
void jffs2_rtime_exit(void)
{
jffs2_unregister_compressor(&jffs2_rtime_comp);
}

View File

@@ -0,0 +1,457 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by Arjan van de Ven <arjanv@redhat.com>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/string.h>
#include <linux/types.h>
#include <linux/jffs2.h>
#include <linux/errno.h>
#include "compr.h"
#define RUBIN_REG_SIZE 16
#define UPPER_BIT_RUBIN (((long) 1)<<(RUBIN_REG_SIZE-1))
#define LOWER_BITS_RUBIN ((((long) 1)<<(RUBIN_REG_SIZE-1))-1)
#define BIT_DIVIDER_MIPS 1043
static int bits_mips[8] = { 277, 249, 290, 267, 229, 341, 212, 241};
struct pushpull {
unsigned char *buf;
unsigned int buflen;
unsigned int ofs;
unsigned int reserve;
};
struct rubin_state {
unsigned long p;
unsigned long q;
unsigned long rec_q;
long bit_number;
struct pushpull pp;
int bit_divider;
int bits[8];
};
static inline void init_pushpull(struct pushpull *pp, char *buf,
unsigned buflen, unsigned ofs,
unsigned reserve)
{
pp->buf = buf;
pp->buflen = buflen;
pp->ofs = ofs;
pp->reserve = reserve;
}
static inline int pushbit(struct pushpull *pp, int bit, int use_reserved)
{
if (pp->ofs >= pp->buflen - (use_reserved?0:pp->reserve))
return -ENOSPC;
if (bit)
pp->buf[pp->ofs >> 3] |= (1<<(7-(pp->ofs & 7)));
else
pp->buf[pp->ofs >> 3] &= ~(1<<(7-(pp->ofs & 7)));
pp->ofs++;
return 0;
}
static inline int pushedbits(struct pushpull *pp)
{
return pp->ofs;
}
static inline int pullbit(struct pushpull *pp)
{
int bit;
bit = (pp->buf[pp->ofs >> 3] >> (7-(pp->ofs & 7))) & 1;
pp->ofs++;
return bit;
}
static inline int pulledbits(struct pushpull *pp)
{
return pp->ofs;
}
static void init_rubin(struct rubin_state *rs, int div, int *bits)
{
int c;
rs->q = 0;
rs->p = (long) (2 * UPPER_BIT_RUBIN);
rs->bit_number = (long) 0;
rs->bit_divider = div;
for (c=0; c<8; c++)
rs->bits[c] = bits[c];
}
static int encode(struct rubin_state *rs, long A, long B, int symbol)
{
long i0, i1;
int ret;
while ((rs->q >= UPPER_BIT_RUBIN) ||
((rs->p + rs->q) <= UPPER_BIT_RUBIN)) {
rs->bit_number++;
ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0);
if (ret)
return ret;
rs->q &= LOWER_BITS_RUBIN;
rs->q <<= 1;
rs->p <<= 1;
}
i0 = A * rs->p / (A + B);
if (i0 <= 0)
i0 = 1;
if (i0 >= rs->p)
i0 = rs->p - 1;
i1 = rs->p - i0;
if (symbol == 0)
rs->p = i0;
else {
rs->p = i1;
rs->q += i0;
}
return 0;
}
static void end_rubin(struct rubin_state *rs)
{
int i;
for (i = 0; i < RUBIN_REG_SIZE; i++) {
pushbit(&rs->pp, (UPPER_BIT_RUBIN & rs->q) ? 1 : 0, 1);
rs->q &= LOWER_BITS_RUBIN;
rs->q <<= 1;
}
}
static void init_decode(struct rubin_state *rs, int div, int *bits)
{
init_rubin(rs, div, bits);
/* behalve lower */
rs->rec_q = 0;
for (rs->bit_number = 0; rs->bit_number++ < RUBIN_REG_SIZE;
rs->rec_q = rs->rec_q * 2 + (long) (pullbit(&rs->pp)))
;
}
static void __do_decode(struct rubin_state *rs, unsigned long p,
unsigned long q)
{
register unsigned long lower_bits_rubin = LOWER_BITS_RUBIN;
unsigned long rec_q;
int c, bits = 0;
/*
* First, work out how many bits we need from the input stream.
* Note that we have already done the initial check on this
* loop prior to calling this function.
*/
do {
bits++;
q &= lower_bits_rubin;
q <<= 1;
p <<= 1;
} while ((q >= UPPER_BIT_RUBIN) || ((p + q) <= UPPER_BIT_RUBIN));
rs->p = p;
rs->q = q;
rs->bit_number += bits;
/*
* Now get the bits. We really want this to be "get n bits".
*/
rec_q = rs->rec_q;
do {
c = pullbit(&rs->pp);
rec_q &= lower_bits_rubin;
rec_q <<= 1;
rec_q += c;
} while (--bits);
rs->rec_q = rec_q;
}
static int decode(struct rubin_state *rs, long A, long B)
{
unsigned long p = rs->p, q = rs->q;
long i0, threshold;
int symbol;
if (q >= UPPER_BIT_RUBIN || ((p + q) <= UPPER_BIT_RUBIN))
__do_decode(rs, p, q);
i0 = A * rs->p / (A + B);
if (i0 <= 0)
i0 = 1;
if (i0 >= rs->p)
i0 = rs->p - 1;
threshold = rs->q + i0;
symbol = rs->rec_q >= threshold;
if (rs->rec_q >= threshold) {
rs->q += i0;
i0 = rs->p - i0;
}
rs->p = i0;
return symbol;
}
static int out_byte(struct rubin_state *rs, unsigned char byte)
{
int i, ret;
struct rubin_state rs_copy;
rs_copy = *rs;
for (i=0; i<8; i++) {
ret = encode(rs, rs->bit_divider-rs->bits[i],
rs->bits[i], byte & 1);
if (ret) {
/* Failed. Restore old state */
*rs = rs_copy;
return ret;
}
byte >>= 1 ;
}
return 0;
}
static int in_byte(struct rubin_state *rs)
{
int i, result = 0, bit_divider = rs->bit_divider;
for (i = 0; i < 8; i++)
result |= decode(rs, bit_divider - rs->bits[i],
rs->bits[i]) << i;
return result;
}
static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in,
unsigned char *cpage_out, uint32_t *sourcelen,
uint32_t *dstlen)
{
int outpos = 0;
int pos=0;
struct rubin_state rs;
init_pushpull(&rs.pp, cpage_out, *dstlen * 8, 0, 32);
init_rubin(&rs, bit_divider, bits);
while (pos < (*sourcelen) && !out_byte(&rs, data_in[pos]))
pos++;
end_rubin(&rs);
if (outpos > pos) {
/* We failed */
return -1;
}
/* Tell the caller how much we managed to compress,
* and how much space it took */
outpos = (pushedbits(&rs.pp)+7)/8;
if (outpos >= pos)
return -1; /* We didn't actually compress */
*sourcelen = pos;
*dstlen = outpos;
return 0;
}
#if 0
/* _compress returns the compressed size, -1 if bigger */
int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out,
uint32_t *sourcelen, uint32_t *dstlen)
{
return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in,
cpage_out, sourcelen, dstlen);
}
#endif
static int jffs2_dynrubin_compress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t *sourcelen, uint32_t *dstlen)
{
int bits[8];
unsigned char histo[256];
int i;
int ret;
uint32_t mysrclen, mydstlen;
mysrclen = *sourcelen;
mydstlen = *dstlen - 8;
if (*dstlen <= 12)
return -1;
memset(histo, 0, 256);
for (i=0; i<mysrclen; i++)
histo[data_in[i]]++;
memset(bits, 0, sizeof(int)*8);
for (i=0; i<256; i++) {
if (i&128)
bits[7] += histo[i];
if (i&64)
bits[6] += histo[i];
if (i&32)
bits[5] += histo[i];
if (i&16)
bits[4] += histo[i];
if (i&8)
bits[3] += histo[i];
if (i&4)
bits[2] += histo[i];
if (i&2)
bits[1] += histo[i];
if (i&1)
bits[0] += histo[i];
}
for (i=0; i<8; i++) {
bits[i] = (bits[i] * 256) / mysrclen;
if (!bits[i]) bits[i] = 1;
if (bits[i] > 255) bits[i] = 255;
cpage_out[i] = bits[i];
}
ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen,
&mydstlen);
if (ret)
return ret;
/* Add back the 8 bytes we took for the probabilities */
mydstlen += 8;
if (mysrclen <= mydstlen) {
/* We compressed */
return -1;
}
*sourcelen = mysrclen;
*dstlen = mydstlen;
return 0;
}
static void rubin_do_decompress(int bit_divider, int *bits,
unsigned char *cdata_in,
unsigned char *page_out, uint32_t srclen,
uint32_t destlen)
{
int outpos = 0;
struct rubin_state rs;
init_pushpull(&rs.pp, cdata_in, srclen, 0, 0);
init_decode(&rs, bit_divider, bits);
while (outpos < destlen)
page_out[outpos++] = in_byte(&rs);
}
static int jffs2_rubinmips_decompress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t sourcelen, uint32_t dstlen)
{
rubin_do_decompress(BIT_DIVIDER_MIPS, bits_mips, data_in,
cpage_out, sourcelen, dstlen);
return 0;
}
static int jffs2_dynrubin_decompress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t sourcelen, uint32_t dstlen)
{
int bits[8];
int c;
for (c=0; c<8; c++)
bits[c] = data_in[c];
rubin_do_decompress(256, bits, data_in+8, cpage_out, sourcelen-8,
dstlen);
return 0;
}
static struct jffs2_compressor jffs2_rubinmips_comp = {
.priority = JFFS2_RUBINMIPS_PRIORITY,
.name = "rubinmips",
.compr = JFFS2_COMPR_DYNRUBIN,
.compress = NULL, /*&jffs2_rubinmips_compress,*/
.decompress = &jffs2_rubinmips_decompress,
#ifdef JFFS2_RUBINMIPS_DISABLED
.disabled = 1,
#else
.disabled = 0,
#endif
};
int jffs2_rubinmips_init(void)
{
return jffs2_register_compressor(&jffs2_rubinmips_comp);
}
void jffs2_rubinmips_exit(void)
{
jffs2_unregister_compressor(&jffs2_rubinmips_comp);
}
static struct jffs2_compressor jffs2_dynrubin_comp = {
.priority = JFFS2_DYNRUBIN_PRIORITY,
.name = "dynrubin",
.compr = JFFS2_COMPR_RUBINMIPS,
.compress = jffs2_dynrubin_compress,
.decompress = &jffs2_dynrubin_decompress,
#ifdef JFFS2_DYNRUBIN_DISABLED
.disabled = 1,
#else
.disabled = 0,
#endif
};
int jffs2_dynrubin_init(void)
{
return jffs2_register_compressor(&jffs2_dynrubin_comp);
}
void jffs2_dynrubin_exit(void)
{
jffs2_unregister_compressor(&jffs2_dynrubin_comp);
}

View File

@@ -0,0 +1,221 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#if !defined(__KERNEL__) && !defined(__ECOS)
#error "The userspace support got too messy and was removed. Update your mkfs.jffs2"
#endif
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/zlib.h>
#include <linux/zutil.h>
#include "nodelist.h"
#include "compr.h"
/* Plan: call deflate() with avail_in == *sourcelen,
avail_out = *dstlen - 12 and flush == Z_FINISH.
If it doesn't manage to finish, call it again with
avail_in == 0 and avail_out set to the remaining 12
bytes for it to clean up.
Q: Is 12 bytes sufficient?
*/
#define STREAM_END_SPACE 12
static DEFINE_MUTEX(deflate_mutex);
static DEFINE_MUTEX(inflate_mutex);
static z_stream inf_strm, def_strm;
#ifdef __KERNEL__ /* Linux-only */
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/mutex.h>
static int __init alloc_workspaces(void)
{
def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS,
MAX_MEM_LEVEL));
if (!def_strm.workspace)
return -ENOMEM;
jffs2_dbg(1, "Allocated %d bytes for deflate workspace\n",
zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL));
inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
if (!inf_strm.workspace) {
vfree(def_strm.workspace);
return -ENOMEM;
}
jffs2_dbg(1, "Allocated %d bytes for inflate workspace\n",
zlib_inflate_workspacesize());
return 0;
}
static void free_workspaces(void)
{
vfree(def_strm.workspace);
vfree(inf_strm.workspace);
}
#else
#define alloc_workspaces() (0)
#define free_workspaces() do { } while(0)
#endif /* __KERNEL__ */
static int jffs2_zlib_compress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t *sourcelen, uint32_t *dstlen)
{
int ret;
if (*dstlen <= STREAM_END_SPACE)
return -1;
mutex_lock(&deflate_mutex);
if (Z_OK != zlib_deflateInit(&def_strm, 3)) {
pr_warn("deflateInit failed\n");
mutex_unlock(&deflate_mutex);
return -1;
}
def_strm.next_in = data_in;
def_strm.total_in = 0;
def_strm.next_out = cpage_out;
def_strm.total_out = 0;
while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) {
def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE);
def_strm.avail_in = min((unsigned)(*sourcelen-def_strm.total_in), def_strm.avail_out);
jffs2_dbg(1, "calling deflate with avail_in %d, avail_out %d\n",
def_strm.avail_in, def_strm.avail_out);
ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH);
jffs2_dbg(1, "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n",
def_strm.avail_in, def_strm.avail_out,
def_strm.total_in, def_strm.total_out);
if (ret != Z_OK) {
jffs2_dbg(1, "deflate in loop returned %d\n", ret);
zlib_deflateEnd(&def_strm);
mutex_unlock(&deflate_mutex);
return -1;
}
}
def_strm.avail_out += STREAM_END_SPACE;
def_strm.avail_in = 0;
ret = zlib_deflate(&def_strm, Z_FINISH);
zlib_deflateEnd(&def_strm);
if (ret != Z_STREAM_END) {
jffs2_dbg(1, "final deflate returned %d\n", ret);
ret = -1;
goto out;
}
if (def_strm.total_out >= def_strm.total_in) {
jffs2_dbg(1, "zlib compressed %ld bytes into %ld; failing\n",
def_strm.total_in, def_strm.total_out);
ret = -1;
goto out;
}
jffs2_dbg(1, "zlib compressed %ld bytes into %ld\n",
def_strm.total_in, def_strm.total_out);
*dstlen = def_strm.total_out;
*sourcelen = def_strm.total_in;
ret = 0;
out:
mutex_unlock(&deflate_mutex);
return ret;
}
static int jffs2_zlib_decompress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t srclen, uint32_t destlen)
{
int ret;
int wbits = MAX_WBITS;
mutex_lock(&inflate_mutex);
inf_strm.next_in = data_in;
inf_strm.avail_in = srclen;
inf_strm.total_in = 0;
inf_strm.next_out = cpage_out;
inf_strm.avail_out = destlen;
inf_strm.total_out = 0;
/* If it's deflate, and it's got no preset dictionary, then
we can tell zlib to skip the adler32 check. */
if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
((data_in[0] & 0x0f) == Z_DEFLATED) &&
!(((data_in[0]<<8) + data_in[1]) % 31)) {
jffs2_dbg(2, "inflate skipping adler32\n");
wbits = -((data_in[0] >> 4) + 8);
inf_strm.next_in += 2;
inf_strm.avail_in -= 2;
} else {
/* Let this remain D1 for now -- it should never happen */
jffs2_dbg(1, "inflate not skipping adler32\n");
}
if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) {
pr_warn("inflateInit failed\n");
mutex_unlock(&inflate_mutex);
return 1;
}
while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK)
;
if (ret != Z_STREAM_END) {
pr_notice("inflate returned %d\n", ret);
}
zlib_inflateEnd(&inf_strm);
mutex_unlock(&inflate_mutex);
return 0;
}
static struct jffs2_compressor jffs2_zlib_comp = {
.priority = JFFS2_ZLIB_PRIORITY,
.name = "zlib",
.compr = JFFS2_COMPR_ZLIB,
.compress = &jffs2_zlib_compress,
.decompress = &jffs2_zlib_decompress,
#ifdef JFFS2_ZLIB_DISABLED
.disabled = 1,
#else
.disabled = 0,
#endif
};
int __init jffs2_zlib_init(void)
{
int ret;
ret = alloc_workspaces();
if (ret)
return ret;
ret = jffs2_register_compressor(&jffs2_zlib_comp);
if (ret)
free_workspaces();
return ret;
}
void jffs2_zlib_exit(void)
{
jffs2_unregister_compressor(&jffs2_zlib_comp);
free_workspaces();
}

View File

@@ -0,0 +1,866 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pagemap.h>
#include <linux/crc32.h>
#include <linux/jffs2.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include "nodelist.h"
#include "debug.h"
#ifdef JFFS2_DBG_SANITY_CHECKS
void
__jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb)
{
if (unlikely(jeb && jeb->used_size + jeb->dirty_size +
jeb->free_size + jeb->wasted_size +
jeb->unchecked_size != c->sector_size)) {
JFFS2_ERROR("eeep, space accounting for block at 0x%08x is screwed.\n", jeb->offset);
JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n",
jeb->free_size, jeb->dirty_size, jeb->used_size,
jeb->wasted_size, jeb->unchecked_size, c->sector_size);
BUG();
}
if (unlikely(c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size
+ c->wasted_size + c->unchecked_size != c->flash_size)) {
JFFS2_ERROR("eeep, space accounting superblock info is screwed.\n");
JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + erasing %#08x + bad %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n",
c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size,
c->wasted_size, c->unchecked_size, c->flash_size);
BUG();
}
}
void
__jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb)
{
spin_lock(&c->erase_completion_lock);
jffs2_dbg_acct_sanity_check_nolock(c, jeb);
spin_unlock(&c->erase_completion_lock);
}
#endif /* JFFS2_DBG_SANITY_CHECKS */
#ifdef JFFS2_DBG_PARANOIA_CHECKS
/*
* Check the fragtree.
*/
void
__jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f)
{
mutex_lock(&f->sem);
__jffs2_dbg_fragtree_paranoia_check_nolock(f);
mutex_unlock(&f->sem);
}
void
__jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f)
{
struct jffs2_node_frag *frag;
int bitched = 0;
for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) {
struct jffs2_full_dnode *fn = frag->node;
if (!fn || !fn->raw)
continue;
if (ref_flags(fn->raw) == REF_PRISTINE) {
if (fn->frags > 1) {
JFFS2_ERROR("REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2.\n",
ref_offset(fn->raw), fn->frags);
bitched = 1;
}
/* A hole node which isn't multi-page should be garbage-collected
and merged anyway, so we just check for the frag size here,
rather than mucking around with actually reading the node
and checking the compression type, which is the real way
to tell a hole node. */
if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag)
&& frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) {
JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n",
ref_offset(fn->raw));
bitched = 1;
}
if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag)
&& frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) {
JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n",
ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size);
bitched = 1;
}
}
}
if (bitched) {
JFFS2_ERROR("fragtree is corrupted.\n");
__jffs2_dbg_dump_fragtree_nolock(f);
BUG();
}
}
/*
* Check if the flash contains all 0xFF before we start writing.
*/
void
__jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c,
uint32_t ofs, int len)
{
size_t retlen;
int ret, i;
unsigned char *buf;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return;
ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
if (ret || (retlen != len)) {
JFFS2_WARNING("read %d bytes failed or short. ret %d, retlen %zd.\n",
len, ret, retlen);
kfree(buf);
return;
}
ret = 0;
for (i = 0; i < len; i++)
if (buf[i] != 0xff)
ret = 1;
if (ret) {
JFFS2_ERROR("argh, about to write node to %#08x on flash, but there are data already there. The first corrupted byte is at %#08x offset.\n",
ofs, ofs + i);
__jffs2_dbg_dump_buffer(buf, len, ofs);
kfree(buf);
BUG();
}
kfree(buf);
}
void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c)
{
struct jffs2_eraseblock *jeb;
uint32_t free = 0, dirty = 0, used = 0, wasted = 0,
erasing = 0, bad = 0, unchecked = 0;
int nr_counted = 0;
int dump = 0;
if (c->gcblock) {
nr_counted++;
free += c->gcblock->free_size;
dirty += c->gcblock->dirty_size;
used += c->gcblock->used_size;
wasted += c->gcblock->wasted_size;
unchecked += c->gcblock->unchecked_size;
}
if (c->nextblock) {
nr_counted++;
free += c->nextblock->free_size;
dirty += c->nextblock->dirty_size;
used += c->nextblock->used_size;
wasted += c->nextblock->wasted_size;
unchecked += c->nextblock->unchecked_size;
}
list_for_each_entry(jeb, &c->clean_list, list) {
nr_counted++;
free += jeb->free_size;
dirty += jeb->dirty_size;
used += jeb->used_size;
wasted += jeb->wasted_size;
unchecked += jeb->unchecked_size;
}
list_for_each_entry(jeb, &c->very_dirty_list, list) {
nr_counted++;
free += jeb->free_size;
dirty += jeb->dirty_size;
used += jeb->used_size;
wasted += jeb->wasted_size;
unchecked += jeb->unchecked_size;
}
list_for_each_entry(jeb, &c->dirty_list, list) {
nr_counted++;
free += jeb->free_size;
dirty += jeb->dirty_size;
used += jeb->used_size;
wasted += jeb->wasted_size;
unchecked += jeb->unchecked_size;
}
list_for_each_entry(jeb, &c->erasable_list, list) {
nr_counted++;
free += jeb->free_size;
dirty += jeb->dirty_size;
used += jeb->used_size;
wasted += jeb->wasted_size;
unchecked += jeb->unchecked_size;
}
list_for_each_entry(jeb, &c->erasable_pending_wbuf_list, list) {
nr_counted++;
free += jeb->free_size;
dirty += jeb->dirty_size;
used += jeb->used_size;
wasted += jeb->wasted_size;
unchecked += jeb->unchecked_size;
}
list_for_each_entry(jeb, &c->erase_pending_list, list) {
nr_counted++;
free += jeb->free_size;
dirty += jeb->dirty_size;
used += jeb->used_size;
wasted += jeb->wasted_size;
unchecked += jeb->unchecked_size;
}
list_for_each_entry(jeb, &c->free_list, list) {
nr_counted++;
free += jeb->free_size;
dirty += jeb->dirty_size;
used += jeb->used_size;
wasted += jeb->wasted_size;
unchecked += jeb->unchecked_size;
}
list_for_each_entry(jeb, &c->bad_used_list, list) {
nr_counted++;
free += jeb->free_size;
dirty += jeb->dirty_size;
used += jeb->used_size;
wasted += jeb->wasted_size;
unchecked += jeb->unchecked_size;
}
list_for_each_entry(jeb, &c->erasing_list, list) {
nr_counted++;
erasing += c->sector_size;
}
list_for_each_entry(jeb, &c->erase_checking_list, list) {
nr_counted++;
erasing += c->sector_size;
}
list_for_each_entry(jeb, &c->erase_complete_list, list) {
nr_counted++;
erasing += c->sector_size;
}
list_for_each_entry(jeb, &c->bad_list, list) {
nr_counted++;
bad += c->sector_size;
}
#define check(sz) \
do { \
if (sz != c->sz##_size) { \
pr_warn("%s_size mismatch counted 0x%x, c->%s_size 0x%x\n", \
#sz, sz, #sz, c->sz##_size); \
dump = 1; \
} \
} while (0)
check(free);
check(dirty);
check(used);
check(wasted);
check(unchecked);
check(bad);
check(erasing);
#undef check
if (nr_counted != c->nr_blocks) {
pr_warn("%s counted only 0x%x blocks of 0x%x. Where are the others?\n",
__func__, nr_counted, c->nr_blocks);
dump = 1;
}
if (dump) {
__jffs2_dbg_dump_block_lists_nolock(c);
BUG();
}
}
/*
* Check the space accounting and node_ref list correctness for the JFFS2 erasable block 'jeb'.
*/
void
__jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb)
{
spin_lock(&c->erase_completion_lock);
__jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
spin_unlock(&c->erase_completion_lock);
}
void
__jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb)
{
uint32_t my_used_size = 0;
uint32_t my_unchecked_size = 0;
uint32_t my_dirty_size = 0;
struct jffs2_raw_node_ref *ref2 = jeb->first_node;
while (ref2) {
uint32_t totlen = ref_totlen(c, jeb, ref2);
if (ref_offset(ref2) < jeb->offset ||
ref_offset(ref2) > jeb->offset + c->sector_size) {
JFFS2_ERROR("node_ref %#08x shouldn't be in block at %#08x.\n",
ref_offset(ref2), jeb->offset);
goto error;
}
if (ref_flags(ref2) == REF_UNCHECKED)
my_unchecked_size += totlen;
else if (!ref_obsolete(ref2))
my_used_size += totlen;
else
my_dirty_size += totlen;
if ((!ref_next(ref2)) != (ref2 == jeb->last_node)) {
JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next at %#08x (mem %p), last_node is at %#08x (mem %p).\n",
ref_offset(ref2), ref2, ref_offset(ref_next(ref2)), ref_next(ref2),
ref_offset(jeb->last_node), jeb->last_node);
goto error;
}
ref2 = ref_next(ref2);
}
if (my_used_size != jeb->used_size) {
JFFS2_ERROR("Calculated used size %#08x != stored used size %#08x.\n",
my_used_size, jeb->used_size);
goto error;
}
if (my_unchecked_size != jeb->unchecked_size) {
JFFS2_ERROR("Calculated unchecked size %#08x != stored unchecked size %#08x.\n",
my_unchecked_size, jeb->unchecked_size);
goto error;
}
#if 0
/* This should work when we implement ref->__totlen elemination */
if (my_dirty_size != jeb->dirty_size + jeb->wasted_size) {
JFFS2_ERROR("Calculated dirty+wasted size %#08x != stored dirty + wasted size %#08x\n",
my_dirty_size, jeb->dirty_size + jeb->wasted_size);
goto error;
}
if (jeb->free_size == 0
&& my_used_size + my_unchecked_size + my_dirty_size != c->sector_size) {
JFFS2_ERROR("The sum of all nodes in block (%#x) != size of block (%#x)\n",
my_used_size + my_unchecked_size + my_dirty_size,
c->sector_size);
goto error;
}
#endif
if (!(c->flags & (JFFS2_SB_FLAG_BUILDING|JFFS2_SB_FLAG_SCANNING)))
__jffs2_dbg_superblock_counts(c);
return;
error:
__jffs2_dbg_dump_node_refs_nolock(c, jeb);
__jffs2_dbg_dump_jeb_nolock(jeb);
__jffs2_dbg_dump_block_lists_nolock(c);
BUG();
}
#endif /* JFFS2_DBG_PARANOIA_CHECKS */
#if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS)
/*
* Dump the node_refs of the 'jeb' JFFS2 eraseblock.
*/
void
__jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb)
{
spin_lock(&c->erase_completion_lock);
__jffs2_dbg_dump_node_refs_nolock(c, jeb);
spin_unlock(&c->erase_completion_lock);
}
void
__jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb)
{
struct jffs2_raw_node_ref *ref;
int i = 0;
printk(JFFS2_DBG_MSG_PREFIX " Dump node_refs of the eraseblock %#08x\n", jeb->offset);
if (!jeb->first_node) {
printk(JFFS2_DBG_MSG_PREFIX " no nodes in the eraseblock %#08x\n", jeb->offset);
return;
}
printk(JFFS2_DBG);
for (ref = jeb->first_node; ; ref = ref_next(ref)) {
printk("%#08x", ref_offset(ref));
#ifdef TEST_TOTLEN
printk("(%x)", ref->__totlen);
#endif
if (ref_next(ref))
printk("->");
else
break;
if (++i == 4) {
i = 0;
printk("\n" JFFS2_DBG);
}
}
printk("\n");
}
/*
* Dump an eraseblock's space accounting.
*/
void
__jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
spin_lock(&c->erase_completion_lock);
__jffs2_dbg_dump_jeb_nolock(jeb);
spin_unlock(&c->erase_completion_lock);
}
void
__jffs2_dbg_dump_jeb_nolock(struct jffs2_eraseblock *jeb)
{
if (!jeb)
return;
printk(JFFS2_DBG_MSG_PREFIX " dump space accounting for the eraseblock at %#08x:\n",
jeb->offset);
printk(JFFS2_DBG "used_size: %#08x\n", jeb->used_size);
printk(JFFS2_DBG "dirty_size: %#08x\n", jeb->dirty_size);
printk(JFFS2_DBG "wasted_size: %#08x\n", jeb->wasted_size);
printk(JFFS2_DBG "unchecked_size: %#08x\n", jeb->unchecked_size);
printk(JFFS2_DBG "free_size: %#08x\n", jeb->free_size);
}
void
__jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c)
{
spin_lock(&c->erase_completion_lock);
__jffs2_dbg_dump_block_lists_nolock(c);
spin_unlock(&c->erase_completion_lock);
}
void
__jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c)
{
printk(JFFS2_DBG_MSG_PREFIX " dump JFFS2 blocks lists:\n");
printk(JFFS2_DBG "flash_size: %#08x\n", c->flash_size);
printk(JFFS2_DBG "used_size: %#08x\n", c->used_size);
printk(JFFS2_DBG "dirty_size: %#08x\n", c->dirty_size);
printk(JFFS2_DBG "wasted_size: %#08x\n", c->wasted_size);
printk(JFFS2_DBG "unchecked_size: %#08x\n", c->unchecked_size);
printk(JFFS2_DBG "free_size: %#08x\n", c->free_size);
printk(JFFS2_DBG "erasing_size: %#08x\n", c->erasing_size);
printk(JFFS2_DBG "bad_size: %#08x\n", c->bad_size);
printk(JFFS2_DBG "sector_size: %#08x\n", c->sector_size);
printk(JFFS2_DBG "jffs2_reserved_blocks size: %#08x\n",
c->sector_size * c->resv_blocks_write);
if (c->nextblock)
printk(JFFS2_DBG "nextblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
c->nextblock->offset, c->nextblock->used_size,
c->nextblock->dirty_size, c->nextblock->wasted_size,
c->nextblock->unchecked_size, c->nextblock->free_size);
else
printk(JFFS2_DBG "nextblock: NULL\n");
if (c->gcblock)
printk(JFFS2_DBG "gcblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size,
c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size);
else
printk(JFFS2_DBG "gcblock: NULL\n");
if (list_empty(&c->clean_list)) {
printk(JFFS2_DBG "clean_list: empty\n");
} else {
struct list_head *this;
int numblocks = 0;
uint32_t dirty = 0;
list_for_each(this, &c->clean_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
numblocks ++;
dirty += jeb->wasted_size;
if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
printk(JFFS2_DBG "clean_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
jeb->unchecked_size, jeb->free_size);
}
}
printk (JFFS2_DBG "Contains %d blocks with total wasted size %u, average wasted size: %u\n",
numblocks, dirty, dirty / numblocks);
}
if (list_empty(&c->very_dirty_list)) {
printk(JFFS2_DBG "very_dirty_list: empty\n");
} else {
struct list_head *this;
int numblocks = 0;
uint32_t dirty = 0;
list_for_each(this, &c->very_dirty_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
numblocks ++;
dirty += jeb->dirty_size;
if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
printk(JFFS2_DBG "very_dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
jeb->unchecked_size, jeb->free_size);
}
}
printk (JFFS2_DBG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
numblocks, dirty, dirty / numblocks);
}
if (list_empty(&c->dirty_list)) {
printk(JFFS2_DBG "dirty_list: empty\n");
} else {
struct list_head *this;
int numblocks = 0;
uint32_t dirty = 0;
list_for_each(this, &c->dirty_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
numblocks ++;
dirty += jeb->dirty_size;
if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
printk(JFFS2_DBG "dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
jeb->unchecked_size, jeb->free_size);
}
}
printk (JFFS2_DBG "contains %d blocks with total dirty size %u, average dirty size: %u\n",
numblocks, dirty, dirty / numblocks);
}
if (list_empty(&c->erasable_list)) {
printk(JFFS2_DBG "erasable_list: empty\n");
} else {
struct list_head *this;
list_for_each(this, &c->erasable_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
printk(JFFS2_DBG "erasable_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
jeb->unchecked_size, jeb->free_size);
}
}
}
if (list_empty(&c->erasing_list)) {
printk(JFFS2_DBG "erasing_list: empty\n");
} else {
struct list_head *this;
list_for_each(this, &c->erasing_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
printk(JFFS2_DBG "erasing_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
jeb->unchecked_size, jeb->free_size);
}
}
}
if (list_empty(&c->erase_checking_list)) {
printk(JFFS2_DBG "erase_checking_list: empty\n");
} else {
struct list_head *this;
list_for_each(this, &c->erase_checking_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
printk(JFFS2_DBG "erase_checking_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
jeb->unchecked_size, jeb->free_size);
}
}
}
if (list_empty(&c->erase_pending_list)) {
printk(JFFS2_DBG "erase_pending_list: empty\n");
} else {
struct list_head *this;
list_for_each(this, &c->erase_pending_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
printk(JFFS2_DBG "erase_pending_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
jeb->unchecked_size, jeb->free_size);
}
}
}
if (list_empty(&c->erasable_pending_wbuf_list)) {
printk(JFFS2_DBG "erasable_pending_wbuf_list: empty\n");
} else {
struct list_head *this;
list_for_each(this, &c->erasable_pending_wbuf_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
printk(JFFS2_DBG "erasable_pending_wbuf_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
jeb->unchecked_size, jeb->free_size);
}
}
}
if (list_empty(&c->free_list)) {
printk(JFFS2_DBG "free_list: empty\n");
} else {
struct list_head *this;
list_for_each(this, &c->free_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
printk(JFFS2_DBG "free_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
jeb->unchecked_size, jeb->free_size);
}
}
}
if (list_empty(&c->bad_list)) {
printk(JFFS2_DBG "bad_list: empty\n");
} else {
struct list_head *this;
list_for_each(this, &c->bad_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
printk(JFFS2_DBG "bad_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
jeb->unchecked_size, jeb->free_size);
}
}
}
if (list_empty(&c->bad_used_list)) {
printk(JFFS2_DBG "bad_used_list: empty\n");
} else {
struct list_head *this;
list_for_each(this, &c->bad_used_list) {
struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
printk(JFFS2_DBG "bad_used_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
jeb->unchecked_size, jeb->free_size);
}
}
}
}
void
__jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f)
{
mutex_lock(&f->sem);
jffs2_dbg_dump_fragtree_nolock(f);
mutex_unlock(&f->sem);
}
void
__jffs2_dbg_dump_fragtree_nolock(struct jffs2_inode_info *f)
{
struct jffs2_node_frag *this = frag_first(&f->fragtree);
uint32_t lastofs = 0;
int buggy = 0;
printk(JFFS2_DBG_MSG_PREFIX " dump fragtree of ino #%u\n", f->inocache->ino);
while(this) {
if (this->node)
printk(JFFS2_DBG "frag %#04x-%#04x: %#08x(%d) on flash (*%p), left (%p), right (%p), parent (%p)\n",
this->ofs, this->ofs+this->size, ref_offset(this->node->raw),
ref_flags(this->node->raw), this, frag_left(this), frag_right(this),
frag_parent(this));
else
printk(JFFS2_DBG "frag %#04x-%#04x: hole (*%p). left (%p), right (%p), parent (%p)\n",
this->ofs, this->ofs+this->size, this, frag_left(this),
frag_right(this), frag_parent(this));
if (this->ofs != lastofs)
buggy = 1;
lastofs = this->ofs + this->size;
this = frag_next(this);
}
if (f->metadata)
printk(JFFS2_DBG "metadata at 0x%08x\n", ref_offset(f->metadata->raw));
if (buggy) {
JFFS2_ERROR("frag tree got a hole in it.\n");
BUG();
}
}
#define JFFS2_BUFDUMP_BYTES_PER_LINE 32
void
__jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs)
{
int skip;
int i;
printk(JFFS2_DBG_MSG_PREFIX " dump from offset %#08x to offset %#08x (%x bytes).\n",
offs, offs + len, len);
i = skip = offs % JFFS2_BUFDUMP_BYTES_PER_LINE;
offs = offs & ~(JFFS2_BUFDUMP_BYTES_PER_LINE - 1);
if (skip != 0)
printk(JFFS2_DBG "%#08x: ", offs);
while (skip--)
printk(" ");
while (i < len) {
if ((i % JFFS2_BUFDUMP_BYTES_PER_LINE) == 0 && i != len -1) {
if (i != 0)
printk("\n");
offs += JFFS2_BUFDUMP_BYTES_PER_LINE;
printk(JFFS2_DBG "%0#8x: ", offs);
}
printk("%02x ", buf[i]);
i += 1;
}
printk("\n");
}
/*
* Dump a JFFS2 node.
*/
void
__jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs)
{
union jffs2_node_union node;
int len = sizeof(union jffs2_node_union);
size_t retlen;
uint32_t crc;
int ret;
printk(JFFS2_DBG_MSG_PREFIX " dump node at offset %#08x.\n", ofs);
ret = jffs2_flash_read(c, ofs, len, &retlen, (unsigned char *)&node);
if (ret || (retlen != len)) {
JFFS2_ERROR("read %d bytes failed or short. ret %d, retlen %zd.\n",
len, ret, retlen);
return;
}
printk(JFFS2_DBG "magic:\t%#04x\n", je16_to_cpu(node.u.magic));
printk(JFFS2_DBG "nodetype:\t%#04x\n", je16_to_cpu(node.u.nodetype));
printk(JFFS2_DBG "totlen:\t%#08x\n", je32_to_cpu(node.u.totlen));
printk(JFFS2_DBG "hdr_crc:\t%#08x\n", je32_to_cpu(node.u.hdr_crc));
crc = crc32(0, &node.u, sizeof(node.u) - 4);
if (crc != je32_to_cpu(node.u.hdr_crc)) {
JFFS2_ERROR("wrong common header CRC.\n");
return;
}
if (je16_to_cpu(node.u.magic) != JFFS2_MAGIC_BITMASK &&
je16_to_cpu(node.u.magic) != JFFS2_OLD_MAGIC_BITMASK)
{
JFFS2_ERROR("wrong node magic: %#04x instead of %#04x.\n",
je16_to_cpu(node.u.magic), JFFS2_MAGIC_BITMASK);
return;
}
switch(je16_to_cpu(node.u.nodetype)) {
case JFFS2_NODETYPE_INODE:
printk(JFFS2_DBG "the node is inode node\n");
printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.i.ino));
printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.i.version));
printk(JFFS2_DBG "mode:\t%#08x\n", node.i.mode.m);
printk(JFFS2_DBG "uid:\t%#04x\n", je16_to_cpu(node.i.uid));
printk(JFFS2_DBG "gid:\t%#04x\n", je16_to_cpu(node.i.gid));
printk(JFFS2_DBG "isize:\t%#08x\n", je32_to_cpu(node.i.isize));
printk(JFFS2_DBG "atime:\t%#08x\n", je32_to_cpu(node.i.atime));
printk(JFFS2_DBG "mtime:\t%#08x\n", je32_to_cpu(node.i.mtime));
printk(JFFS2_DBG "ctime:\t%#08x\n", je32_to_cpu(node.i.ctime));
printk(JFFS2_DBG "offset:\t%#08x\n", je32_to_cpu(node.i.offset));
printk(JFFS2_DBG "csize:\t%#08x\n", je32_to_cpu(node.i.csize));
printk(JFFS2_DBG "dsize:\t%#08x\n", je32_to_cpu(node.i.dsize));
printk(JFFS2_DBG "compr:\t%#02x\n", node.i.compr);
printk(JFFS2_DBG "usercompr:\t%#02x\n", node.i.usercompr);
printk(JFFS2_DBG "flags:\t%#04x\n", je16_to_cpu(node.i.flags));
printk(JFFS2_DBG "data_crc:\t%#08x\n", je32_to_cpu(node.i.data_crc));
printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.i.node_crc));
crc = crc32(0, &node.i, sizeof(node.i) - 8);
if (crc != je32_to_cpu(node.i.node_crc)) {
JFFS2_ERROR("wrong node header CRC.\n");
return;
}
break;
case JFFS2_NODETYPE_DIRENT:
printk(JFFS2_DBG "the node is dirent node\n");
printk(JFFS2_DBG "pino:\t%#08x\n", je32_to_cpu(node.d.pino));
printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.d.version));
printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.d.ino));
printk(JFFS2_DBG "mctime:\t%#08x\n", je32_to_cpu(node.d.mctime));
printk(JFFS2_DBG "nsize:\t%#02x\n", node.d.nsize);
printk(JFFS2_DBG "type:\t%#02x\n", node.d.type);
printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.d.node_crc));
printk(JFFS2_DBG "name_crc:\t%#08x\n", je32_to_cpu(node.d.name_crc));
node.d.name[node.d.nsize] = '\0';
printk(JFFS2_DBG "name:\t\"%s\"\n", node.d.name);
crc = crc32(0, &node.d, sizeof(node.d) - 8);
if (crc != je32_to_cpu(node.d.node_crc)) {
JFFS2_ERROR("wrong node header CRC.\n");
return;
}
break;
default:
printk(JFFS2_DBG "node type is unknown\n");
break;
}
}
#endif /* JFFS2_DBG_DUMPS || JFFS2_DBG_PARANOIA_CHECKS */

View File

@@ -0,0 +1,275 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#ifndef _JFFS2_DEBUG_H_
#define _JFFS2_DEBUG_H_
#include <linux/sched.h>
#ifndef CONFIG_JFFS2_FS_DEBUG
#define CONFIG_JFFS2_FS_DEBUG 0
#endif
#if CONFIG_JFFS2_FS_DEBUG > 0
/* Enable "paranoia" checks and dumps */
#define JFFS2_DBG_PARANOIA_CHECKS
#define JFFS2_DBG_DUMPS
/*
* By defining/undefining the below macros one may select debugging messages
* fro specific JFFS2 subsystems.
*/
#define JFFS2_DBG_READINODE_MESSAGES
#define JFFS2_DBG_FRAGTREE_MESSAGES
#define JFFS2_DBG_DENTLIST_MESSAGES
#define JFFS2_DBG_NODEREF_MESSAGES
#define JFFS2_DBG_INOCACHE_MESSAGES
#define JFFS2_DBG_SUMMARY_MESSAGES
#define JFFS2_DBG_FSBUILD_MESSAGES
#endif
#if CONFIG_JFFS2_FS_DEBUG > 1
#define JFFS2_DBG_FRAGTREE2_MESSAGES
#define JFFS2_DBG_READINODE2_MESSAGES
#define JFFS2_DBG_MEMALLOC_MESSAGES
#endif
/* Sanity checks are supposed to be light-weight and enabled by default */
#define JFFS2_DBG_SANITY_CHECKS
/*
* Dx() are mainly used for debugging messages, they must go away and be
* superseded by nicer dbg_xxx() macros...
*/
#if CONFIG_JFFS2_FS_DEBUG > 0
#define DEBUG
#define D1(x) x
#else
#define D1(x)
#endif
#if CONFIG_JFFS2_FS_DEBUG > 1
#define D2(x) x
#else
#define D2(x)
#endif
#define jffs2_dbg(level, fmt, ...) \
do { \
if (CONFIG_JFFS2_FS_DEBUG >= level) \
pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
/* The prefixes of JFFS2 messages */
#define JFFS2_DBG KERN_DEBUG
#define JFFS2_DBG_PREFIX "[JFFS2 DBG]"
#define JFFS2_DBG_MSG_PREFIX JFFS2_DBG JFFS2_DBG_PREFIX
/* JFFS2 message macros */
#define JFFS2_ERROR(fmt, ...) \
pr_err("error: (%d) %s: " fmt, \
task_pid_nr(current), __func__, ##__VA_ARGS__)
#define JFFS2_WARNING(fmt, ...) \
pr_warn("warning: (%d) %s: " fmt, \
task_pid_nr(current), __func__, ##__VA_ARGS__)
#define JFFS2_NOTICE(fmt, ...) \
pr_notice("notice: (%d) %s: " fmt, \
task_pid_nr(current), __func__, ##__VA_ARGS__)
#define JFFS2_DEBUG(fmt, ...) \
printk(KERN_DEBUG "[JFFS2 DBG] (%d) %s: " fmt, \
task_pid_nr(current), __func__, ##__VA_ARGS__)
/*
* We split our debugging messages on several parts, depending on the JFFS2
* subsystem the message belongs to.
*/
/* Read inode debugging messages */
#ifdef JFFS2_DBG_READINODE_MESSAGES
#define dbg_readinode(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
#else
#define dbg_readinode(fmt, ...)
#endif
#ifdef JFFS2_DBG_READINODE2_MESSAGES
#define dbg_readinode2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
#else
#define dbg_readinode2(fmt, ...)
#endif
/* Fragtree build debugging messages */
#ifdef JFFS2_DBG_FRAGTREE_MESSAGES
#define dbg_fragtree(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
#else
#define dbg_fragtree(fmt, ...)
#endif
#ifdef JFFS2_DBG_FRAGTREE2_MESSAGES
#define dbg_fragtree2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
#else
#define dbg_fragtree2(fmt, ...)
#endif
/* Directory entry list manilulation debugging messages */
#ifdef JFFS2_DBG_DENTLIST_MESSAGES
#define dbg_dentlist(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
#else
#define dbg_dentlist(fmt, ...)
#endif
/* Print the messages about manipulating node_refs */
#ifdef JFFS2_DBG_NODEREF_MESSAGES
#define dbg_noderef(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
#else
#define dbg_noderef(fmt, ...)
#endif
/* Manipulations with the list of inodes (JFFS2 inocache) */
#ifdef JFFS2_DBG_INOCACHE_MESSAGES
#define dbg_inocache(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
#else
#define dbg_inocache(fmt, ...)
#endif
/* Summary debugging messages */
#ifdef JFFS2_DBG_SUMMARY_MESSAGES
#define dbg_summary(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
#else
#define dbg_summary(fmt, ...)
#endif
/* File system build messages */
#ifdef JFFS2_DBG_FSBUILD_MESSAGES
#define dbg_fsbuild(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
#else
#define dbg_fsbuild(fmt, ...)
#endif
/* Watch the object allocations */
#ifdef JFFS2_DBG_MEMALLOC_MESSAGES
#define dbg_memalloc(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
#else
#define dbg_memalloc(fmt, ...)
#endif
/* Watch the XATTR subsystem */
#ifdef JFFS2_DBG_XATTR_MESSAGES
#define dbg_xattr(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
#else
#define dbg_xattr(fmt, ...)
#endif
/* "Sanity" checks */
void
__jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb);
void
__jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb);
/* "Paranoia" checks */
void
__jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f);
void
__jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f);
void
__jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb);
void
__jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb);
void
__jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c,
uint32_t ofs, int len);
/* "Dump" functions */
void
__jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
void
__jffs2_dbg_dump_jeb_nolock(struct jffs2_eraseblock *jeb);
void
__jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c);
void
__jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c);
void
__jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb);
void
__jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb);
void
__jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f);
void
__jffs2_dbg_dump_fragtree_nolock(struct jffs2_inode_info *f);
void
__jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs);
void
__jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs);
#ifdef JFFS2_DBG_PARANOIA_CHECKS
#define jffs2_dbg_fragtree_paranoia_check(f) \
__jffs2_dbg_fragtree_paranoia_check(f)
#define jffs2_dbg_fragtree_paranoia_check_nolock(f) \
__jffs2_dbg_fragtree_paranoia_check_nolock(f)
#define jffs2_dbg_acct_paranoia_check(c, jeb) \
__jffs2_dbg_acct_paranoia_check(c,jeb)
#define jffs2_dbg_acct_paranoia_check_nolock(c, jeb) \
__jffs2_dbg_acct_paranoia_check_nolock(c,jeb)
#define jffs2_dbg_prewrite_paranoia_check(c, ofs, len) \
__jffs2_dbg_prewrite_paranoia_check(c, ofs, len)
#else
#define jffs2_dbg_fragtree_paranoia_check(f)
#define jffs2_dbg_fragtree_paranoia_check_nolock(f)
#define jffs2_dbg_acct_paranoia_check(c, jeb)
#define jffs2_dbg_acct_paranoia_check_nolock(c, jeb)
#define jffs2_dbg_prewrite_paranoia_check(c, ofs, len)
#endif /* !JFFS2_PARANOIA_CHECKS */
#ifdef JFFS2_DBG_DUMPS
#define jffs2_dbg_dump_jeb(c, jeb) \
__jffs2_dbg_dump_jeb(c, jeb);
#define jffs2_dbg_dump_jeb_nolock(jeb) \
__jffs2_dbg_dump_jeb_nolock(jeb);
#define jffs2_dbg_dump_block_lists(c) \
__jffs2_dbg_dump_block_lists(c)
#define jffs2_dbg_dump_block_lists_nolock(c) \
__jffs2_dbg_dump_block_lists_nolock(c)
#define jffs2_dbg_dump_fragtree(f) \
__jffs2_dbg_dump_fragtree(f);
#define jffs2_dbg_dump_fragtree_nolock(f) \
__jffs2_dbg_dump_fragtree_nolock(f);
#define jffs2_dbg_dump_buffer(buf, len, offs) \
__jffs2_dbg_dump_buffer(*buf, len, offs);
#define jffs2_dbg_dump_node(c, ofs) \
__jffs2_dbg_dump_node(c, ofs);
#else
#define jffs2_dbg_dump_jeb(c, jeb)
#define jffs2_dbg_dump_jeb_nolock(jeb)
#define jffs2_dbg_dump_block_lists(c)
#define jffs2_dbg_dump_block_lists_nolock(c)
#define jffs2_dbg_dump_fragtree(f)
#define jffs2_dbg_dump_fragtree_nolock(f)
#define jffs2_dbg_dump_buffer(buf, len, offs)
#define jffs2_dbg_dump_node(c, ofs)
#endif /* !JFFS2_DBG_DUMPS */
#ifdef JFFS2_DBG_SANITY_CHECKS
#define jffs2_dbg_acct_sanity_check(c, jeb) \
__jffs2_dbg_acct_sanity_check(c, jeb)
#define jffs2_dbg_acct_sanity_check_nolock(c, jeb) \
__jffs2_dbg_acct_sanity_check_nolock(c, jeb)
#else
#define jffs2_dbg_acct_sanity_check(c, jeb)
#define jffs2_dbg_acct_sanity_check_nolock(c, jeb)
#endif /* !JFFS2_DBG_SANITY_CHECKS */
#endif /* _JFFS2_DEBUG_H_ */

View File

@@ -0,0 +1,515 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/compiler.h>
#include <linux/crc32.h>
#include <linux/sched.h>
#include <linux/pagemap.h>
#include "nodelist.h"
struct erase_priv_struct {
struct jffs2_eraseblock *jeb;
struct jffs2_sb_info *c;
};
#ifndef __ECOS
static void jffs2_erase_callback(struct erase_info *);
#endif
static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
static void jffs2_erase_block(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb)
{
int ret;
uint32_t bad_offset;
#ifdef __ECOS
ret = jffs2_flash_erase(c, jeb);
if (!ret) {
jffs2_erase_succeeded(c, jeb);
return;
}
bad_offset = jeb->offset;
#else /* Linux */
struct erase_info *instr;
jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n",
__func__,
jeb->offset, jeb->offset, jeb->offset + c->sector_size);
instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
if (!instr) {
pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
return;
}
memset(instr, 0, sizeof(*instr));
instr->mtd = c->mtd;
instr->addr = jeb->offset;
instr->len = c->sector_size;
instr->callback = jffs2_erase_callback;
instr->priv = (unsigned long)(&instr[1]);
((struct erase_priv_struct *)instr->priv)->jeb = jeb;
((struct erase_priv_struct *)instr->priv)->c = c;
ret = mtd_erase(c->mtd, instr);
if (!ret)
return;
bad_offset = instr->fail_addr;
kfree(instr);
#endif /* __ECOS */
if (ret == -ENOMEM || ret == -EAGAIN) {
/* Erase failed immediately. Refile it on the list */
jffs2_dbg(1, "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n",
jeb->offset, ret);
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
return;
}
if (ret == -EROFS)
pr_warn("Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n",
jeb->offset);
else
pr_warn("Erase at 0x%08x failed immediately: errno %d\n",
jeb->offset, ret);
jffs2_erase_failed(c, jeb, bad_offset);
}
int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
{
struct jffs2_eraseblock *jeb;
int work_done = 0;
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
while (!list_empty(&c->erase_complete_list) ||
!list_empty(&c->erase_pending_list)) {
if (!list_empty(&c->erase_complete_list)) {
jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list);
list_move(&jeb->list, &c->erase_checking_list);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
jffs2_mark_erased_block(c, jeb);
work_done++;
if (!--count) {
jffs2_dbg(1, "Count reached. jffs2_erase_pending_blocks leaving\n");
goto done;
}
} else if (!list_empty(&c->erase_pending_list)) {
jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list);
jffs2_dbg(1, "Starting erase of pending block 0x%08x\n",
jeb->offset);
list_del(&jeb->list);
c->erasing_size += c->sector_size;
c->wasted_size -= jeb->wasted_size;
c->free_size -= jeb->free_size;
c->used_size -= jeb->used_size;
c->dirty_size -= jeb->dirty_size;
jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0;
jffs2_free_jeb_node_refs(c, jeb);
list_add(&jeb->list, &c->erasing_list);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
jffs2_erase_block(c, jeb);
} else {
BUG();
}
/* Be nice */
cond_resched();
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
}
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
done:
jffs2_dbg(1, "jffs2_erase_pending_blocks completed\n");
return work_done;
}
static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
jffs2_dbg(1, "Erase completed successfully at 0x%08x\n", jeb->offset);
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move_tail(&jeb->list, &c->erase_complete_list);
/* Wake the GC thread to mark them clean */
jffs2_garbage_collect_trigger(c);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
wake_up(&c->erase_wait);
}
static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
{
/* For NAND, if the failure did not occur at the device level for a
specific physical page, don't bother updating the bad block table. */
if (jffs2_cleanmarker_oob(c) && (bad_offset != (uint32_t)MTD_FAIL_ADDR_UNKNOWN)) {
/* We had a device-level failure to erase. Let's see if we've
failed too many times. */
if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
/* We'd like to give this block another try. */
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
return;
}
}
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
c->erasing_size -= c->sector_size;
c->bad_size += c->sector_size;
list_move(&jeb->list, &c->bad_list);
c->nr_erasing_blocks--;
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
wake_up(&c->erase_wait);
}
#ifndef __ECOS
static void jffs2_erase_callback(struct erase_info *instr)
{
struct erase_priv_struct *priv = (void *)instr->priv;
if(instr->state != MTD_ERASE_DONE) {
pr_warn("Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n",
(unsigned long long)instr->addr, instr->state);
jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr);
} else {
jffs2_erase_succeeded(priv->c, priv->jeb);
}
kfree(instr);
}
#endif /* !__ECOS */
/* Hmmm. Maybe we should accept the extra space it takes and make
this a standard doubly-linked list? */
static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb)
{
struct jffs2_inode_cache *ic = NULL;
struct jffs2_raw_node_ref **prev;
prev = &ref->next_in_ino;
/* Walk the inode's list once, removing any nodes from this eraseblock */
while (1) {
if (!(*prev)->next_in_ino) {
/* We're looking at the jffs2_inode_cache, which is
at the end of the linked list. Stash it and continue
from the beginning of the list */
ic = (struct jffs2_inode_cache *)(*prev);
prev = &ic->nodes;
continue;
}
if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) {
/* It's in the block we're erasing */
struct jffs2_raw_node_ref *this;
this = *prev;
*prev = this->next_in_ino;
this->next_in_ino = NULL;
if (this == ref)
break;
continue;
}
/* Not to be deleted. Skip */
prev = &((*prev)->next_in_ino);
}
/* PARANOIA */
if (!ic) {
JFFS2_WARNING("inode_cache/xattr_datum/xattr_ref"
" not found in remove_node_refs()!!\n");
return;
}
jffs2_dbg(1, "Removed nodes in range 0x%08x-0x%08x from ino #%u\n",
jeb->offset, jeb->offset + c->sector_size, ic->ino);
D2({
int i=0;
struct jffs2_raw_node_ref *this;
printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n");
this = ic->nodes;
printk(KERN_DEBUG);
while(this) {
pr_cont("0x%08x(%d)->",
ref_offset(this), ref_flags(this));
if (++i == 5) {
printk(KERN_DEBUG);
i=0;
}
this = this->next_in_ino;
}
pr_cont("\n");
});
switch (ic->class) {
#ifdef CONFIG_JFFS2_FS_XATTR
case RAWNODE_CLASS_XATTR_DATUM:
jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
break;
case RAWNODE_CLASS_XATTR_REF:
jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
break;
#endif
default:
if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
jffs2_del_ino_cache(c, ic);
}
}
void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
struct jffs2_raw_node_ref *block, *ref;
jffs2_dbg(1, "Freeing all node refs for eraseblock offset 0x%08x\n",
jeb->offset);
block = ref = jeb->first_node;
while (ref) {
if (ref->flash_offset == REF_LINK_NODE) {
ref = ref->next_in_ino;
jffs2_free_refblock(block);
block = ref;
continue;
}
if (ref->flash_offset != REF_EMPTY_NODE && ref->next_in_ino)
jffs2_remove_node_refs_from_ino_list(c, ref, jeb);
/* else it was a non-inode node or already removed, so don't bother */
ref++;
}
jeb->first_node = jeb->last_node = NULL;
}
static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset)
{
void *ebuf;
uint32_t ofs;
size_t retlen;
int ret;
unsigned long *wordebuf;
ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen,
&ebuf, NULL);
if (ret != -EOPNOTSUPP) {
if (ret) {
jffs2_dbg(1, "MTD point failed %d\n", ret);
goto do_flash_read;
}
if (retlen < c->sector_size) {
/* Don't muck about if it won't let us point to the whole erase sector */
jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n",
retlen);
mtd_unpoint(c->mtd, jeb->offset, retlen);
goto do_flash_read;
}
wordebuf = ebuf-sizeof(*wordebuf);
retlen /= sizeof(*wordebuf);
do {
if (*++wordebuf != ~0)
break;
} while(--retlen);
mtd_unpoint(c->mtd, jeb->offset, c->sector_size);
if (retlen) {
pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
*wordebuf,
jeb->offset +
c->sector_size-retlen * sizeof(*wordebuf));
return -EIO;
}
return 0;
}
do_flash_read:
ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!ebuf) {
pr_warn("Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n",
jeb->offset);
return -EAGAIN;
}
jffs2_dbg(1, "Verifying erase at 0x%08x\n", jeb->offset);
for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) {
uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
int i;
*bad_offset = ofs;
ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf);
if (ret) {
pr_warn("Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n",
ofs, ret);
ret = -EIO;
goto fail;
}
if (retlen != readlen) {
pr_warn("Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n",
ofs, readlen, retlen);
ret = -EIO;
goto fail;
}
for (i=0; i<readlen; i += sizeof(unsigned long)) {
/* It's OK. We know it's properly aligned */
unsigned long *datum = ebuf + i;
if (*datum + 1) {
*bad_offset += i;
pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08x\n",
*datum, *bad_offset);
ret = -EIO;
goto fail;
}
}
ofs += readlen;
cond_resched();
}
ret = 0;
fail:
kfree(ebuf);
return ret;
}
static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
size_t retlen;
int ret;
uint32_t uninitialized_var(bad_offset);
switch (jffs2_block_check_erase(c, jeb, &bad_offset)) {
case -EAGAIN: goto refile;
case -EIO: goto filebad;
}
/* Write the erase complete marker */
jffs2_dbg(1, "Writing erased marker to block at 0x%08x\n", jeb->offset);
bad_offset = jeb->offset;
/* Cleanmarker in oob area or no cleanmarker at all ? */
if (jffs2_cleanmarker_oob(c) || c->cleanmarker_size == 0) {
if (jffs2_cleanmarker_oob(c)) {
if (jffs2_write_nand_cleanmarker(c, jeb))
goto filebad;
}
} else {
struct kvec vecs[1];
struct jffs2_unknown_node marker = {
.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
.totlen = cpu_to_je32(c->cleanmarker_size)
};
jffs2_prealloc_raw_node_refs(c, jeb, 1);
marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
vecs[0].iov_base = (unsigned char *) &marker;
vecs[0].iov_len = sizeof(marker);
ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen);
if (ret || retlen != sizeof(marker)) {
if (ret)
pr_warn("Write clean marker to block at 0x%08x failed: %d\n",
jeb->offset, ret);
else
pr_warn("Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
jeb->offset, sizeof(marker), retlen);
goto filebad;
}
}
/* Everything else got zeroed before the erase */
jeb->free_size = c->sector_size;
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
c->erasing_size -= c->sector_size;
c->free_size += c->sector_size;
/* Account for cleanmarker now, if it's in-band */
if (c->cleanmarker_size && !jffs2_cleanmarker_oob(c))
jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL);
list_move_tail(&jeb->list, &c->free_list);
c->nr_erasing_blocks--;
c->nr_free_blocks++;
jffs2_dbg_acct_sanity_check_nolock(c, jeb);
jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
wake_up(&c->erase_wait);
return;
filebad:
jffs2_erase_failed(c, jeb, bad_offset);
return;
refile:
/* Stick it back on the list from whence it came and come back later */
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
jffs2_garbage_collect_trigger(c);
list_move(&jeb->list, &c->erase_complete_list);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
return;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,56 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#ifndef _JFFS2_FS_I
#define _JFFS2_FS_I
#include <linux/rbtree.h>
#include <linux/posix_acl.h>
#include <linux/mutex.h>
struct jffs2_inode_info {
/* We need an internal mutex similar to inode->i_mutex.
Unfortunately, we can't used the existing one, because
either the GC would deadlock, or we'd have to release it
before letting GC proceed. Or we'd have to put ugliness
into the GC code so it didn't attempt to obtain the i_mutex
for the inode(s) which are already locked */
struct mutex sem;
/* The highest (datanode) version number used for this ino */
uint32_t highest_version;
/* List of data fragments which make up the file */
struct rb_root fragtree;
/* There may be one datanode which isn't referenced by any of the
above fragments, if it contains a metadata update but no actual
data - or if this is a directory inode */
/* This also holds the _only_ dnode for symlinks/device nodes,
etc. */
struct jffs2_full_dnode *metadata;
/* Directory entries */
struct jffs2_full_dirent *dents;
/* The target path if this is the inode of a symlink */
unsigned char *target;
/* Some stuff we just have to keep in-core at all times, for each inode. */
struct jffs2_inode_cache *inocache;
uint16_t flags;
uint8_t usercompr;
struct inode vfs_inode;
};
#endif /* _JFFS2_FS_I */

View File

@@ -0,0 +1,164 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#ifndef _JFFS2_FS_SB
#define _JFFS2_FS_SB
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/rwsem.h>
#define JFFS2_SB_FLAG_RO 1
#define JFFS2_SB_FLAG_SCANNING 2 /* Flash scanning is in progress */
#define JFFS2_SB_FLAG_BUILDING 4 /* File system building is in progress */
struct jffs2_inodirty;
struct jffs2_mount_opts {
bool override_compr;
unsigned int compr;
/* The size of the reserved pool. The reserved pool is the JFFS2 flash
* space which may only be used by root cannot be used by the other
* users. This is implemented simply by means of not allowing the
* latter users to write to the file system if the amount if the
* available space is less then 'rp_size'. */
unsigned int rp_size;
};
/* A struct for the overall file system control. Pointers to
jffs2_sb_info structs are named `c' in the source code.
Nee jffs_control
*/
struct jffs2_sb_info {
struct mtd_info *mtd;
uint32_t highest_ino;
uint32_t checked_ino;
unsigned int flags;
struct task_struct *gc_task; /* GC task struct */
struct completion gc_thread_start; /* GC thread start completion */
struct completion gc_thread_exit; /* GC thread exit completion port */
struct mutex alloc_sem; /* Used to protect all the following
fields, and also to protect against
out-of-order writing of nodes. And GC. */
uint32_t cleanmarker_size; /* Size of an _inline_ CLEANMARKER
(i.e. zero for OOB CLEANMARKER */
uint32_t flash_size;
uint32_t used_size;
uint32_t dirty_size;
uint32_t wasted_size;
uint32_t free_size;
uint32_t erasing_size;
uint32_t bad_size;
uint32_t sector_size;
uint32_t unchecked_size;
uint32_t nr_free_blocks;
uint32_t nr_erasing_blocks;
/* Number of free blocks there must be before we... */
uint8_t resv_blocks_write; /* ... allow a normal filesystem write */
uint8_t resv_blocks_deletion; /* ... allow a normal filesystem deletion */
uint8_t resv_blocks_gctrigger; /* ... wake up the GC thread */
uint8_t resv_blocks_gcbad; /* ... pick a block from the bad_list to GC */
uint8_t resv_blocks_gcmerge; /* ... merge pages when garbage collecting */
/* Number of 'very dirty' blocks before we trigger immediate GC */
uint8_t vdirty_blocks_gctrigger;
uint32_t nospc_dirty_size;
uint32_t nr_blocks;
struct jffs2_eraseblock *blocks; /* The whole array of blocks. Used for getting blocks
* from the offset (blocks[ofs / sector_size]) */
struct jffs2_eraseblock *nextblock; /* The block we're currently filling */
struct jffs2_eraseblock *gcblock; /* The block we're currently garbage-collecting */
struct list_head clean_list; /* Blocks 100% full of clean data */
struct list_head very_dirty_list; /* Blocks with lots of dirty space */
struct list_head dirty_list; /* Blocks with some dirty space */
struct list_head erasable_list; /* Blocks which are completely dirty, and need erasing */
struct list_head erasable_pending_wbuf_list; /* Blocks which need erasing but only after the current wbuf is flushed */
struct list_head erasing_list; /* Blocks which are currently erasing */
struct list_head erase_checking_list; /* Blocks which are being checked and marked */
struct list_head erase_pending_list; /* Blocks which need erasing now */
struct list_head erase_complete_list; /* Blocks which are erased and need the clean marker written to them */
struct list_head free_list; /* Blocks which are free and ready to be used */
struct list_head bad_list; /* Bad blocks. */
struct list_head bad_used_list; /* Bad blocks with valid data in. */
spinlock_t erase_completion_lock; /* Protect free_list and erasing_list
against erase completion handler */
wait_queue_head_t erase_wait; /* For waiting for erases to complete */
wait_queue_head_t inocache_wq;
int inocache_hashsize;
struct jffs2_inode_cache **inocache_list;
spinlock_t inocache_lock;
/* Sem to allow jffs2_garbage_collect_deletion_dirent to
drop the erase_completion_lock while it's holding a pointer
to an obsoleted node. I don't like this. Alternatives welcomed. */
struct mutex erase_free_sem;
uint32_t wbuf_pagesize; /* 0 for NOR and other flashes with no wbuf */
#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
unsigned char *wbuf_verify; /* read-back buffer for verification */
#endif
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
unsigned char *wbuf; /* Write-behind buffer for NAND flash */
uint32_t wbuf_ofs;
uint32_t wbuf_len;
struct jffs2_inodirty *wbuf_inodes;
struct rw_semaphore wbuf_sem; /* Protects the write buffer */
struct delayed_work wbuf_dwork; /* write-buffer write-out work */
int wbuf_queued; /* non-zero delayed work is queued */
spinlock_t wbuf_dwork_lock; /* protects wbuf_dwork and and wbuf_queued */
unsigned char *oobbuf;
int oobavail; /* How many bytes are available for JFFS2 in OOB */
#endif
struct jffs2_summary *summary; /* Summary information */
struct jffs2_mount_opts mount_opts;
#ifdef CONFIG_JFFS2_FS_XATTR
#define XATTRINDEX_HASHSIZE (57)
uint32_t highest_xid;
uint32_t highest_xseqno;
struct list_head xattrindex[XATTRINDEX_HASHSIZE];
struct list_head xattr_unchecked;
struct list_head xattr_dead_list;
struct jffs2_xattr_ref *xref_dead_list;
struct jffs2_xattr_ref *xref_temp;
struct rw_semaphore xattr_sem;
uint32_t xdatum_mem_usage;
uint32_t xdatum_mem_threshold;
#endif
/* OS-private pointer for getting back to master superblock info */
void *os_priv;
};
#endif /* _JFFS2_FS_SB */

View File

@@ -0,0 +1,779 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mtd/mtd.h>
#include <linux/rbtree.h>
#include <linux/crc32.h>
#include <linux/pagemap.h>
#include "nodelist.h"
static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c,
struct jffs2_node_frag *this);
void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list)
{
struct jffs2_full_dirent **prev = list;
dbg_dentlist("add dirent \"%s\", ino #%u\n", new->name, new->ino);
while ((*prev) && (*prev)->nhash <= new->nhash) {
if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) {
/* Duplicate. Free one */
if (new->version < (*prev)->version) {
dbg_dentlist("Eep! Marking new dirent node obsolete, old is \"%s\", ino #%u\n",
(*prev)->name, (*prev)->ino);
jffs2_mark_node_obsolete(c, new->raw);
jffs2_free_full_dirent(new);
} else {
dbg_dentlist("marking old dirent \"%s\", ino #%u obsolete\n",
(*prev)->name, (*prev)->ino);
new->next = (*prev)->next;
/* It may have been a 'placeholder' deletion dirent,
if jffs2_can_mark_obsolete() (see jffs2_do_unlink()) */
if ((*prev)->raw)
jffs2_mark_node_obsolete(c, ((*prev)->raw));
jffs2_free_full_dirent(*prev);
*prev = new;
}
return;
}
prev = &((*prev)->next);
}
new->next = *prev;
*prev = new;
}
uint32_t jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list, uint32_t size)
{
struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size);
dbg_fragtree("truncating fragtree to 0x%08x bytes\n", size);
/* We know frag->ofs <= size. That's what lookup does for us */
if (frag && frag->ofs != size) {
if (frag->ofs+frag->size > size) {
frag->size = size - frag->ofs;
}
frag = frag_next(frag);
}
while (frag && frag->ofs >= size) {
struct jffs2_node_frag *next = frag_next(frag);
frag_erase(frag, list);
jffs2_obsolete_node_frag(c, frag);
frag = next;
}
if (size == 0)
return 0;
frag = frag_last(list);
/* Sanity check for truncation to longer than we started with... */
if (!frag)
return 0;
if (frag->ofs + frag->size < size)
return frag->ofs + frag->size;
/* If the last fragment starts at the RAM page boundary, it is
* REF_PRISTINE irrespective of its size. */
if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) {
dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n",
frag->ofs, frag->ofs + frag->size);
frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE;
}
return size;
}
static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c,
struct jffs2_node_frag *this)
{
if (this->node) {
this->node->frags--;
if (!this->node->frags) {
/* The node has no valid frags left. It's totally obsoleted */
dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) obsolete\n",
ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size);
jffs2_mark_node_obsolete(c, this->node->raw);
jffs2_free_full_dnode(this->node);
} else {
dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n",
ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, this->node->frags);
mark_ref_normal(this->node->raw);
}
}
jffs2_free_node_frag(this);
}
static void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base)
{
struct rb_node *parent = &base->rb;
struct rb_node **link = &parent;
dbg_fragtree2("insert frag (0x%04x-0x%04x)\n", newfrag->ofs, newfrag->ofs + newfrag->size);
while (*link) {
parent = *link;
base = rb_entry(parent, struct jffs2_node_frag, rb);
if (newfrag->ofs > base->ofs)
link = &base->rb.rb_right;
else if (newfrag->ofs < base->ofs)
link = &base->rb.rb_left;
else {
JFFS2_ERROR("duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base);
BUG();
}
}
rb_link_node(&newfrag->rb, &base->rb, link);
}
/*
* Allocate and initializes a new fragment.
*/
static struct jffs2_node_frag * new_fragment(struct jffs2_full_dnode *fn, uint32_t ofs, uint32_t size)
{
struct jffs2_node_frag *newfrag;
newfrag = jffs2_alloc_node_frag();
if (likely(newfrag)) {
newfrag->ofs = ofs;
newfrag->size = size;
newfrag->node = fn;
} else {
JFFS2_ERROR("cannot allocate a jffs2_node_frag object\n");
}
return newfrag;
}
/*
* Called when there is no overlapping fragment exist. Inserts a hole before the new
* fragment and inserts the new fragment to the fragtree.
*/
static int no_overlapping_node(struct jffs2_sb_info *c, struct rb_root *root,
struct jffs2_node_frag *newfrag,
struct jffs2_node_frag *this, uint32_t lastend)
{
if (lastend < newfrag->node->ofs) {
/* put a hole in before the new fragment */
struct jffs2_node_frag *holefrag;
holefrag= new_fragment(NULL, lastend, newfrag->node->ofs - lastend);
if (unlikely(!holefrag)) {
jffs2_free_node_frag(newfrag);
return -ENOMEM;
}
if (this) {
/* By definition, the 'this' node has no right-hand child,
because there are no frags with offset greater than it.
So that's where we want to put the hole */
dbg_fragtree2("add hole frag %#04x-%#04x on the right of the new frag.\n",
holefrag->ofs, holefrag->ofs + holefrag->size);
rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right);
} else {
dbg_fragtree2("Add hole frag %#04x-%#04x to the root of the tree.\n",
holefrag->ofs, holefrag->ofs + holefrag->size);
rb_link_node(&holefrag->rb, NULL, &root->rb_node);
}
rb_insert_color(&holefrag->rb, root);
this = holefrag;
}
if (this) {
/* By definition, the 'this' node has no right-hand child,
because there are no frags with offset greater than it.
So that's where we want to put new fragment */
dbg_fragtree2("add the new node at the right\n");
rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right);
} else {
dbg_fragtree2("insert the new node at the root of the tree\n");
rb_link_node(&newfrag->rb, NULL, &root->rb_node);
}
rb_insert_color(&newfrag->rb, root);
return 0;
}
/* Doesn't set inode->i_size */
static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *root, struct jffs2_node_frag *newfrag)
{
struct jffs2_node_frag *this;
uint32_t lastend;
/* Skip all the nodes which are completed before this one starts */
this = jffs2_lookup_node_frag(root, newfrag->node->ofs);
if (this) {
dbg_fragtree2("lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n",
this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this);
lastend = this->ofs + this->size;
} else {
dbg_fragtree2("lookup gave no frag\n");
lastend = 0;
}
/* See if we ran off the end of the fragtree */
if (lastend <= newfrag->ofs) {
/* We did */
/* Check if 'this' node was on the same page as the new node.
If so, both 'this' and the new node get marked REF_NORMAL so
the GC can take a look.
*/
if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) {
if (this->node)
mark_ref_normal(this->node->raw);
mark_ref_normal(newfrag->node->raw);
}
return no_overlapping_node(c, root, newfrag, this, lastend);
}
if (this->node)
dbg_fragtree2("dealing with frag %u-%u, phys %#08x(%d).\n",
this->ofs, this->ofs + this->size,
ref_offset(this->node->raw), ref_flags(this->node->raw));
else
dbg_fragtree2("dealing with hole frag %u-%u.\n",
this->ofs, this->ofs + this->size);
/* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes,
* - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs
*/
if (newfrag->ofs > this->ofs) {
/* This node isn't completely obsoleted. The start of it remains valid */
/* Mark the new node and the partially covered node REF_NORMAL -- let
the GC take a look at them */
mark_ref_normal(newfrag->node->raw);
if (this->node)
mark_ref_normal(this->node->raw);
if (this->ofs + this->size > newfrag->ofs + newfrag->size) {
/* The new node splits 'this' frag into two */
struct jffs2_node_frag *newfrag2;
if (this->node)
dbg_fragtree2("split old frag 0x%04x-0x%04x, phys 0x%08x\n",
this->ofs, this->ofs+this->size, ref_offset(this->node->raw));
else
dbg_fragtree2("split old hole frag 0x%04x-0x%04x\n",
this->ofs, this->ofs+this->size);
/* New second frag pointing to this's node */
newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size,
this->ofs + this->size - newfrag->ofs - newfrag->size);
if (unlikely(!newfrag2))
return -ENOMEM;
if (this->node)
this->node->frags++;
/* Adjust size of original 'this' */
this->size = newfrag->ofs - this->ofs;
/* Now, we know there's no node with offset
greater than this->ofs but smaller than
newfrag2->ofs or newfrag->ofs, for obvious
reasons. So we can do a tree insert from
'this' to insert newfrag, and a tree insert
from newfrag to insert newfrag2. */
jffs2_fragtree_insert(newfrag, this);
rb_insert_color(&newfrag->rb, root);
jffs2_fragtree_insert(newfrag2, newfrag);
rb_insert_color(&newfrag2->rb, root);
return 0;
}
/* New node just reduces 'this' frag in size, doesn't split it */
this->size = newfrag->ofs - this->ofs;
/* Again, we know it lives down here in the tree */
jffs2_fragtree_insert(newfrag, this);
rb_insert_color(&newfrag->rb, root);
} else {
/* New frag starts at the same point as 'this' used to. Replace
it in the tree without doing a delete and insertion */
dbg_fragtree2("inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n",
newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, this, this->ofs, this->ofs+this->size);
rb_replace_node(&this->rb, &newfrag->rb, root);
if (newfrag->ofs + newfrag->size >= this->ofs+this->size) {
dbg_fragtree2("obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size);
jffs2_obsolete_node_frag(c, this);
} else {
this->ofs += newfrag->size;
this->size -= newfrag->size;
jffs2_fragtree_insert(this, newfrag);
rb_insert_color(&this->rb, root);
return 0;
}
}
/* OK, now we have newfrag added in the correct place in the tree, but
frag_next(newfrag) may be a fragment which is overlapped by it
*/
while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) {
/* 'this' frag is obsoleted completely. */
dbg_fragtree2("obsoleting node frag %p (%x-%x) and removing from tree\n",
this, this->ofs, this->ofs+this->size);
rb_erase(&this->rb, root);
jffs2_obsolete_node_frag(c, this);
}
/* Now we're pointing at the first frag which isn't totally obsoleted by
the new frag */
if (!this || newfrag->ofs + newfrag->size == this->ofs)
return 0;
/* Still some overlap but we don't need to move it in the tree */
this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size);
this->ofs = newfrag->ofs + newfrag->size;
/* And mark them REF_NORMAL so the GC takes a look at them */
if (this->node)
mark_ref_normal(this->node->raw);
mark_ref_normal(newfrag->node->raw);
return 0;
}
/*
* Given an inode, probably with existing tree of fragments, add the new node
* to the fragment tree.
*/
int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
{
int ret;
struct jffs2_node_frag *newfrag;
if (unlikely(!fn->size))
return 0;
newfrag = new_fragment(fn, fn->ofs, fn->size);
if (unlikely(!newfrag))
return -ENOMEM;
newfrag->node->frags = 1;
dbg_fragtree("adding node %#04x-%#04x @0x%08x on flash, newfrag *%p\n",
fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag);
ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag);
if (unlikely(ret))
return ret;
/* If we now share a page with other nodes, mark either previous
or next node REF_NORMAL, as appropriate. */
if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) {
struct jffs2_node_frag *prev = frag_prev(newfrag);
mark_ref_normal(fn->raw);
/* If we don't start at zero there's _always_ a previous */
if (prev->node)
mark_ref_normal(prev->node->raw);
}
if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) {
struct jffs2_node_frag *next = frag_next(newfrag);
if (next) {
mark_ref_normal(fn->raw);
if (next->node)
mark_ref_normal(next->node->raw);
}
}
jffs2_dbg_fragtree_paranoia_check_nolock(f);
return 0;
}
void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state)
{
spin_lock(&c->inocache_lock);
ic->state = state;
wake_up(&c->inocache_wq);
spin_unlock(&c->inocache_lock);
}
/* During mount, this needs no locking. During normal operation, its
callers want to do other stuff while still holding the inocache_lock.
Rather than introducing special case get_ino_cache functions or
callbacks, we just let the caller do the locking itself. */
struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
{
struct jffs2_inode_cache *ret;
ret = c->inocache_list[ino % c->inocache_hashsize];
while (ret && ret->ino < ino) {
ret = ret->next;
}
if (ret && ret->ino != ino)
ret = NULL;
return ret;
}
void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new)
{
struct jffs2_inode_cache **prev;
spin_lock(&c->inocache_lock);
if (!new->ino)
new->ino = ++c->highest_ino;
dbg_inocache("add %p (ino #%u)\n", new, new->ino);
prev = &c->inocache_list[new->ino % c->inocache_hashsize];
while ((*prev) && (*prev)->ino < new->ino) {
prev = &(*prev)->next;
}
new->next = *prev;
*prev = new;
spin_unlock(&c->inocache_lock);
}
void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old)
{
struct jffs2_inode_cache **prev;
#ifdef CONFIG_JFFS2_FS_XATTR
BUG_ON(old->xref);
#endif
dbg_inocache("del %p (ino #%u)\n", old, old->ino);
spin_lock(&c->inocache_lock);
prev = &c->inocache_list[old->ino % c->inocache_hashsize];
while ((*prev) && (*prev)->ino < old->ino) {
prev = &(*prev)->next;
}
if ((*prev) == old) {
*prev = old->next;
}
/* Free it now unless it's in READING or CLEARING state, which
are the transitions upon read_inode() and clear_inode(). The
rest of the time we know nobody else is looking at it, and
if it's held by read_inode() or clear_inode() they'll free it
for themselves. */
if (old->state != INO_STATE_READING && old->state != INO_STATE_CLEARING)
jffs2_free_inode_cache(old);
spin_unlock(&c->inocache_lock);
}
void jffs2_free_ino_caches(struct jffs2_sb_info *c)
{
int i;
struct jffs2_inode_cache *this, *next;
for (i=0; i < c->inocache_hashsize; i++) {
this = c->inocache_list[i];
while (this) {
next = this->next;
jffs2_xattr_free_inode(c, this);
jffs2_free_inode_cache(this);
this = next;
}
c->inocache_list[i] = NULL;
}
}
void jffs2_free_raw_node_refs(struct jffs2_sb_info *c)
{
int i;
struct jffs2_raw_node_ref *this, *next;
for (i=0; i<c->nr_blocks; i++) {
this = c->blocks[i].first_node;
while (this) {
if (this[REFS_PER_BLOCK].flash_offset == REF_LINK_NODE)
next = this[REFS_PER_BLOCK].next_in_ino;
else
next = NULL;
jffs2_free_refblock(this);
this = next;
}
c->blocks[i].first_node = c->blocks[i].last_node = NULL;
}
}
struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset)
{
/* The common case in lookup is that there will be a node
which precisely matches. So we go looking for that first */
struct rb_node *next;
struct jffs2_node_frag *prev = NULL;
struct jffs2_node_frag *frag = NULL;
dbg_fragtree2("root %p, offset %d\n", fragtree, offset);
next = fragtree->rb_node;
while(next) {
frag = rb_entry(next, struct jffs2_node_frag, rb);
if (frag->ofs + frag->size <= offset) {
/* Remember the closest smaller match on the way down */
if (!prev || frag->ofs > prev->ofs)
prev = frag;
next = frag->rb.rb_right;
} else if (frag->ofs > offset) {
next = frag->rb.rb_left;
} else {
return frag;
}
}
/* Exact match not found. Go back up looking at each parent,
and return the closest smaller one */
if (prev)
dbg_fragtree2("no match. Returning frag %#04x-%#04x, closest previous\n",
prev->ofs, prev->ofs+prev->size);
else
dbg_fragtree2("returning NULL, empty fragtree\n");
return prev;
}
/* Pass 'c' argument to indicate that nodes should be marked obsolete as
they're killed. */
void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c)
{
struct jffs2_node_frag *frag;
struct jffs2_node_frag *parent;
if (!root->rb_node)
return;
dbg_fragtree("killing\n");
frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb));
while(frag) {
if (frag->rb.rb_left) {
frag = frag_left(frag);
continue;
}
if (frag->rb.rb_right) {
frag = frag_right(frag);
continue;
}
if (frag->node && !(--frag->node->frags)) {
/* Not a hole, and it's the final remaining frag
of this node. Free the node */
if (c)
jffs2_mark_node_obsolete(c, frag->node->raw);
jffs2_free_full_dnode(frag->node);
}
parent = frag_parent(frag);
if (parent) {
if (frag_left(parent) == frag)
parent->rb.rb_left = NULL;
else
parent->rb.rb_right = NULL;
}
jffs2_free_node_frag(frag);
frag = parent;
cond_resched();
}
}
struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb,
uint32_t ofs, uint32_t len,
struct jffs2_inode_cache *ic)
{
struct jffs2_raw_node_ref *ref;
BUG_ON(!jeb->allocated_refs);
jeb->allocated_refs--;
ref = jeb->last_node;
dbg_noderef("Last node at %p is (%08x,%p)\n", ref, ref->flash_offset,
ref->next_in_ino);
while (ref->flash_offset != REF_EMPTY_NODE) {
if (ref->flash_offset == REF_LINK_NODE)
ref = ref->next_in_ino;
else
ref++;
}
dbg_noderef("New ref is %p (%08x becomes %08x,%p) len 0x%x\n", ref,
ref->flash_offset, ofs, ref->next_in_ino, len);
ref->flash_offset = ofs;
if (!jeb->first_node) {
jeb->first_node = ref;
BUG_ON(ref_offset(ref) != jeb->offset);
} else if (unlikely(ref_offset(ref) != jeb->offset + c->sector_size - jeb->free_size)) {
uint32_t last_len = ref_totlen(c, jeb, jeb->last_node);
JFFS2_ERROR("Adding new ref %p at (0x%08x-0x%08x) not immediately after previous (0x%08x-0x%08x)\n",
ref, ref_offset(ref), ref_offset(ref)+len,
ref_offset(jeb->last_node),
ref_offset(jeb->last_node)+last_len);
BUG();
}
jeb->last_node = ref;
if (ic) {
ref->next_in_ino = ic->nodes;
ic->nodes = ref;
} else {
ref->next_in_ino = NULL;
}
switch(ref_flags(ref)) {
case REF_UNCHECKED:
c->unchecked_size += len;
jeb->unchecked_size += len;
break;
case REF_NORMAL:
case REF_PRISTINE:
c->used_size += len;
jeb->used_size += len;
break;
case REF_OBSOLETE:
c->dirty_size += len;
jeb->dirty_size += len;
break;
}
c->free_size -= len;
jeb->free_size -= len;
#ifdef TEST_TOTLEN
/* Set (and test) __totlen field... for now */
ref->__totlen = len;
ref_totlen(c, jeb, ref);
#endif
return ref;
}
/* No locking, no reservation of 'ref'. Do not use on a live file system */
int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
uint32_t size)
{
if (!size)
return 0;
if (unlikely(size > jeb->free_size)) {
pr_crit("Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n",
size, jeb->free_size, jeb->wasted_size);
BUG();
}
/* REF_EMPTY_NODE is !obsolete, so that works OK */
if (jeb->last_node && ref_obsolete(jeb->last_node)) {
#ifdef TEST_TOTLEN
jeb->last_node->__totlen += size;
#endif
c->dirty_size += size;
c->free_size -= size;
jeb->dirty_size += size;
jeb->free_size -= size;
} else {
uint32_t ofs = jeb->offset + c->sector_size - jeb->free_size;
ofs |= REF_OBSOLETE;
jffs2_link_node_ref(c, jeb, ofs, size, NULL);
}
return 0;
}
/* Calculate totlen from surrounding nodes or eraseblock */
static inline uint32_t __ref_totlen(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb,
struct jffs2_raw_node_ref *ref)
{
uint32_t ref_end;
struct jffs2_raw_node_ref *next_ref = ref_next(ref);
if (next_ref)
ref_end = ref_offset(next_ref);
else {
if (!jeb)
jeb = &c->blocks[ref->flash_offset / c->sector_size];
/* Last node in block. Use free_space */
if (unlikely(ref != jeb->last_node)) {
pr_crit("ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n",
ref, ref_offset(ref), jeb->last_node,
jeb->last_node ?
ref_offset(jeb->last_node) : 0);
BUG();
}
ref_end = jeb->offset + c->sector_size - jeb->free_size;
}
return ref_end - ref_offset(ref);
}
uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_raw_node_ref *ref)
{
uint32_t ret;
ret = __ref_totlen(c, jeb, ref);
#ifdef TEST_TOTLEN
if (unlikely(ret != ref->__totlen)) {
if (!jeb)
jeb = &c->blocks[ref->flash_offset / c->sector_size];
pr_crit("Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
ref, ref_offset(ref), ref_offset(ref) + ref->__totlen,
ret, ref->__totlen);
if (ref_next(ref)) {
pr_crit("next %p (0x%08x-0x%08x)\n",
ref_next(ref), ref_offset(ref_next(ref)),
ref_offset(ref_next(ref)) + ref->__totlen);
} else
pr_crit("No next ref. jeb->last_node is %p\n",
jeb->last_node);
pr_crit("jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n",
jeb->wasted_size, jeb->dirty_size, jeb->used_size,
jeb->free_size);
#if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS)
__jffs2_dbg_dump_node_refs_nolock(c, jeb);
#endif
WARN_ON(1);
ret = ref->__totlen;
}
#endif /* TEST_TOTLEN */
return ret;
}

View File

@@ -0,0 +1,480 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#ifndef __JFFS2_NODELIST_H__
#define __JFFS2_NODELIST_H__
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/jffs2.h>
#include "jffs2_fs_sb.h"
#include "jffs2_fs_i.h"
#include "xattr.h"
#include "acl.h"
#include "summary.h"
#ifdef __ECOS
#include "os-ecos.h"
#else
#include "os-linux.h"
#endif
#define JFFS2_NATIVE_ENDIAN
/* Note we handle mode bits conversion from JFFS2 (i.e. Linux) to/from
whatever OS we're actually running on here too. */
#if defined(JFFS2_NATIVE_ENDIAN)
#define cpu_to_je16(x) ((jint16_t){x})
#define cpu_to_je32(x) ((jint32_t){x})
#define cpu_to_jemode(x) ((jmode_t){os_to_jffs2_mode(x)})
#define constant_cpu_to_je16(x) ((jint16_t){x})
#define constant_cpu_to_je32(x) ((jint32_t){x})
#define je16_to_cpu(x) ((x).v16)
#define je32_to_cpu(x) ((x).v32)
#define jemode_to_cpu(x) (jffs2_to_os_mode((x).m))
#elif defined(JFFS2_BIG_ENDIAN)
#define cpu_to_je16(x) ((jint16_t){cpu_to_be16(x)})
#define cpu_to_je32(x) ((jint32_t){cpu_to_be32(x)})
#define cpu_to_jemode(x) ((jmode_t){cpu_to_be32(os_to_jffs2_mode(x))})
#define constant_cpu_to_je16(x) ((jint16_t){__constant_cpu_to_be16(x)})
#define constant_cpu_to_je32(x) ((jint32_t){__constant_cpu_to_be32(x)})
#define je16_to_cpu(x) (be16_to_cpu(x.v16))
#define je32_to_cpu(x) (be32_to_cpu(x.v32))
#define jemode_to_cpu(x) (be32_to_cpu(jffs2_to_os_mode((x).m)))
#elif defined(JFFS2_LITTLE_ENDIAN)
#define cpu_to_je16(x) ((jint16_t){cpu_to_le16(x)})
#define cpu_to_je32(x) ((jint32_t){cpu_to_le32(x)})
#define cpu_to_jemode(x) ((jmode_t){cpu_to_le32(os_to_jffs2_mode(x))})
#define constant_cpu_to_je16(x) ((jint16_t){__constant_cpu_to_le16(x)})
#define constant_cpu_to_je32(x) ((jint32_t){__constant_cpu_to_le32(x)})
#define je16_to_cpu(x) (le16_to_cpu(x.v16))
#define je32_to_cpu(x) (le32_to_cpu(x.v32))
#define jemode_to_cpu(x) (le32_to_cpu(jffs2_to_os_mode((x).m)))
#else
#error wibble
#endif
/* The minimal node header size */
#define JFFS2_MIN_NODE_HEADER sizeof(struct jffs2_raw_dirent)
/*
This is all we need to keep in-core for each raw node during normal
operation. As and when we do read_inode on a particular inode, we can
scan the nodes which are listed for it and build up a proper map of
which nodes are currently valid. JFFSv1 always used to keep that whole
map in core for each inode.
*/
struct jffs2_raw_node_ref
{
struct jffs2_raw_node_ref *next_in_ino; /* Points to the next raw_node_ref
for this object. If this _is_ the last, it points to the inode_cache,
xattr_ref or xattr_datum instead. The common part of those structures
has NULL in the first word. See jffs2_raw_ref_to_ic() below */
uint32_t flash_offset;
#undef TEST_TOTLEN
#ifdef TEST_TOTLEN
uint32_t __totlen; /* This may die; use ref_totlen(c, jeb, ) below */
#endif
};
#define REF_LINK_NODE ((int32_t)-1)
#define REF_EMPTY_NODE ((int32_t)-2)
/* Use blocks of about 256 bytes */
#define REFS_PER_BLOCK ((255/sizeof(struct jffs2_raw_node_ref))-1)
static inline struct jffs2_raw_node_ref *ref_next(struct jffs2_raw_node_ref *ref)
{
ref++;
/* Link to another block of refs */
if (ref->flash_offset == REF_LINK_NODE) {
ref = ref->next_in_ino;
if (!ref)
return ref;
}
/* End of chain */
if (ref->flash_offset == REF_EMPTY_NODE)
return NULL;
return ref;
}
static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_node_ref *raw)
{
while(raw->next_in_ino)
raw = raw->next_in_ino;
/* NB. This can be a jffs2_xattr_datum or jffs2_xattr_ref and
not actually a jffs2_inode_cache. Check ->class */
return ((struct jffs2_inode_cache *)raw);
}
/* flash_offset & 3 always has to be zero, because nodes are
always aligned at 4 bytes. So we have a couple of extra bits
to play with, which indicate the node's status; see below: */
#define REF_UNCHECKED 0 /* We haven't yet checked the CRC or built its inode */
#define REF_OBSOLETE 1 /* Obsolete, can be completely ignored */
#define REF_PRISTINE 2 /* Completely clean. GC without looking */
#define REF_NORMAL 3 /* Possibly overlapped. Read the page and write again on GC */
#define ref_flags(ref) ((ref)->flash_offset & 3)
#define ref_offset(ref) ((ref)->flash_offset & ~3)
#define ref_obsolete(ref) (((ref)->flash_offset & 3) == REF_OBSOLETE)
#define mark_ref_normal(ref) do { (ref)->flash_offset = ref_offset(ref) | REF_NORMAL; } while(0)
/* Dirent nodes should be REF_PRISTINE only if they are not a deletion
dirent. Deletion dirents should be REF_NORMAL so that GC gets to
throw them away when appropriate */
#define dirent_node_state(rd) ( (je32_to_cpu((rd)->ino)?REF_PRISTINE:REF_NORMAL) )
/* NB: REF_PRISTINE for an inode-less node (ref->next_in_ino == NULL) indicates
it is an unknown node of type JFFS2_NODETYPE_RWCOMPAT_COPY, so it'll get
copied. If you need to do anything different to GC inode-less nodes, then
you need to modify gc.c accordingly. */
/* For each inode in the filesystem, we need to keep a record of
nlink, because it would be a PITA to scan the whole directory tree
at read_inode() time to calculate it, and to keep sufficient information
in the raw_node_ref (basically both parent and child inode number for
dirent nodes) would take more space than this does. We also keep
a pointer to the first physical node which is part of this inode, too.
*/
struct jffs2_inode_cache {
/* First part of structure is shared with other objects which
can terminate the raw node refs' next_in_ino list -- which
currently struct jffs2_xattr_datum and struct jffs2_xattr_ref. */
struct jffs2_full_dirent *scan_dents; /* Used during scan to hold
temporary lists of dirents, and later must be set to
NULL to mark the end of the raw_node_ref->next_in_ino
chain. */
struct jffs2_raw_node_ref *nodes;
uint8_t class; /* It's used for identification */
/* end of shared structure */
uint8_t flags;
uint16_t state;
uint32_t ino;
struct jffs2_inode_cache *next;
#ifdef CONFIG_JFFS2_FS_XATTR
struct jffs2_xattr_ref *xref;
#endif
uint32_t pino_nlink; /* Directories store parent inode
here; other inodes store nlink.
Zero always means that it's
completely unlinked. */
};
/* Inode states for 'state' above. We need the 'GC' state to prevent
someone from doing a read_inode() while we're moving a 'REF_PRISTINE'
node without going through all the iget() nonsense */
#define INO_STATE_UNCHECKED 0 /* CRC checks not yet done */
#define INO_STATE_CHECKING 1 /* CRC checks in progress */
#define INO_STATE_PRESENT 2 /* In core */
#define INO_STATE_CHECKEDABSENT 3 /* Checked, cleared again */
#define INO_STATE_GC 4 /* GCing a 'pristine' node */
#define INO_STATE_READING 5 /* In read_inode() */
#define INO_STATE_CLEARING 6 /* In clear_inode() */
#define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */
#define RAWNODE_CLASS_INODE_CACHE 0
#define RAWNODE_CLASS_XATTR_DATUM 1
#define RAWNODE_CLASS_XATTR_REF 2
#define INOCACHE_HASHSIZE_MIN 128
#define INOCACHE_HASHSIZE_MAX 1024
#define write_ofs(c) ((c)->nextblock->offset + (c)->sector_size - (c)->nextblock->free_size)
/*
Larger representation of a raw node, kept in-core only when the
struct inode for this particular ino is instantiated.
*/
struct jffs2_full_dnode
{
struct jffs2_raw_node_ref *raw;
uint32_t ofs; /* The offset to which the data of this node belongs */
uint32_t size;
uint32_t frags; /* Number of fragments which currently refer
to this node. When this reaches zero,
the node is obsolete. */
};
/*
Even larger representation of a raw node, kept in-core only while
we're actually building up the original map of which nodes go where,
in read_inode()
*/
struct jffs2_tmp_dnode_info
{
struct rb_node rb;
struct jffs2_full_dnode *fn;
uint32_t version;
uint32_t data_crc;
uint32_t partial_crc;
uint16_t csize;
uint16_t overlapped;
};
/* Temporary data structure used during readinode. */
struct jffs2_readinode_info
{
struct rb_root tn_root;
struct jffs2_tmp_dnode_info *mdata_tn;
uint32_t highest_version;
uint32_t latest_mctime;
uint32_t mctime_ver;
struct jffs2_full_dirent *fds;
struct jffs2_raw_node_ref *latest_ref;
};
struct jffs2_full_dirent
{
struct jffs2_raw_node_ref *raw;
struct jffs2_full_dirent *next;
uint32_t version;
uint32_t ino; /* == zero for unlink */
unsigned int nhash;
unsigned char type;
unsigned char name[0];
};
/*
Fragments - used to build a map of which raw node to obtain
data from for each part of the ino
*/
struct jffs2_node_frag
{
struct rb_node rb;
struct jffs2_full_dnode *node; /* NULL for holes */
uint32_t size;
uint32_t ofs; /* The offset to which this fragment belongs */
};
struct jffs2_eraseblock
{
struct list_head list;
int bad_count;
uint32_t offset; /* of this block in the MTD */
uint32_t unchecked_size;
uint32_t used_size;
uint32_t dirty_size;
uint32_t wasted_size;
uint32_t free_size; /* Note that sector_size - free_size
is the address of the first free space */
uint32_t allocated_refs;
struct jffs2_raw_node_ref *first_node;
struct jffs2_raw_node_ref *last_node;
struct jffs2_raw_node_ref *gc_node; /* Next node to be garbage collected */
};
static inline int jffs2_blocks_use_vmalloc(struct jffs2_sb_info *c)
{
return ((c->flash_size / c->sector_size) * sizeof (struct jffs2_eraseblock)) > (128 * 1024);
}
#define ref_totlen(a, b, c) __jffs2_ref_totlen((a), (b), (c))
#define ALLOC_NORMAL 0 /* Normal allocation */
#define ALLOC_DELETION 1 /* Deletion node. Best to allow it */
#define ALLOC_GC 2 /* Space requested for GC. Give it or die */
#define ALLOC_NORETRY 3 /* For jffs2_write_dnode: On failure, return -EAGAIN instead of retrying */
/* How much dirty space before it goes on the very_dirty_list */
#define VERYDIRTY(c, size) ((size) >= ((c)->sector_size / 2))
/* check if dirty space is more than 255 Byte */
#define ISDIRTY(size) ((size) > sizeof (struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN)
#define PAD(x) (((x)+3)&~3)
static inline int jffs2_encode_dev(union jffs2_device_node *jdev, dev_t rdev)
{
if (old_valid_dev(rdev)) {
jdev->old_id = cpu_to_je16(old_encode_dev(rdev));
return sizeof(jdev->old_id);
} else {
jdev->new_id = cpu_to_je32(new_encode_dev(rdev));
return sizeof(jdev->new_id);
}
}
static inline struct jffs2_node_frag *frag_first(struct rb_root *root)
{
struct rb_node *node = rb_first(root);
if (!node)
return NULL;
return rb_entry(node, struct jffs2_node_frag, rb);
}
static inline struct jffs2_node_frag *frag_last(struct rb_root *root)
{
struct rb_node *node = rb_last(root);
if (!node)
return NULL;
return rb_entry(node, struct jffs2_node_frag, rb);
}
#define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb)
#define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb)
#define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb)
#define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb)
#define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb)
#define frag_erase(frag, list) rb_erase(&frag->rb, list);
#define tn_next(tn) rb_entry(rb_next(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
#define tn_prev(tn) rb_entry(rb_prev(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
#define tn_parent(tn) rb_entry(rb_parent(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
#define tn_left(tn) rb_entry((tn)->rb.rb_left, struct jffs2_tmp_dnode_info, rb)
#define tn_right(tn) rb_entry((tn)->rb.rb_right, struct jffs2_tmp_dnode_info, rb)
#define tn_erase(tn, list) rb_erase(&tn->rb, list);
#define tn_last(list) rb_entry(rb_last(list), struct jffs2_tmp_dnode_info, rb)
#define tn_first(list) rb_entry(rb_first(list), struct jffs2_tmp_dnode_info, rb)
/* nodelist.c */
void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list);
void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state);
struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino);
void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new);
void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old);
void jffs2_free_ino_caches(struct jffs2_sb_info *c);
void jffs2_free_raw_node_refs(struct jffs2_sb_info *c);
struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset);
void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete);
int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn);
uint32_t jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size);
struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb,
uint32_t ofs, uint32_t len,
struct jffs2_inode_cache *ic);
extern uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb,
struct jffs2_raw_node_ref *ref);
/* nodemgmt.c */
int jffs2_thread_should_wake(struct jffs2_sb_info *c);
int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
uint32_t *len, int prio, uint32_t sumsize);
int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
uint32_t *len, uint32_t sumsize);
struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
uint32_t ofs, uint32_t len,
struct jffs2_inode_cache *ic);
void jffs2_complete_reservation(struct jffs2_sb_info *c);
void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw);
/* write.c */
int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri);
struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
struct jffs2_raw_inode *ri, const unsigned char *data,
uint32_t datalen, int alloc_mode);
struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
struct jffs2_raw_dirent *rd, const unsigned char *name,
uint32_t namelen, int alloc_mode);
int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
struct jffs2_raw_inode *ri, unsigned char *buf,
uint32_t offset, uint32_t writelen, uint32_t *retlen);
int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f,
struct jffs2_raw_inode *ri, const struct qstr *qstr);
int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name,
int namelen, struct jffs2_inode_info *dead_f, uint32_t time);
int jffs2_do_link(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino,
uint8_t type, const char *name, int namelen, uint32_t time);
/* readinode.c */
int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
uint32_t ino, struct jffs2_raw_inode *latest_node);
int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f);
/* malloc.c */
int jffs2_create_slab_caches(void);
void jffs2_destroy_slab_caches(void);
struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize);
void jffs2_free_full_dirent(struct jffs2_full_dirent *);
struct jffs2_full_dnode *jffs2_alloc_full_dnode(void);
void jffs2_free_full_dnode(struct jffs2_full_dnode *);
struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void);
void jffs2_free_raw_dirent(struct jffs2_raw_dirent *);
struct jffs2_raw_inode *jffs2_alloc_raw_inode(void);
void jffs2_free_raw_inode(struct jffs2_raw_inode *);
struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void);
void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *);
int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb, int nr);
void jffs2_free_refblock(struct jffs2_raw_node_ref *);
struct jffs2_node_frag *jffs2_alloc_node_frag(void);
void jffs2_free_node_frag(struct jffs2_node_frag *);
struct jffs2_inode_cache *jffs2_alloc_inode_cache(void);
void jffs2_free_inode_cache(struct jffs2_inode_cache *);
#ifdef CONFIG_JFFS2_FS_XATTR
struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void);
void jffs2_free_xattr_datum(struct jffs2_xattr_datum *);
struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void);
void jffs2_free_xattr_ref(struct jffs2_xattr_ref *);
#endif
/* gc.c */
int jffs2_garbage_collect_pass(struct jffs2_sb_info *c);
/* read.c */
int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
struct jffs2_full_dnode *fd, unsigned char *buf,
int ofs, int len);
int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
unsigned char *buf, uint32_t offset, uint32_t len);
char *jffs2_getlink(struct jffs2_sb_info *c, struct jffs2_inode_info *f);
/* scan.c */
int jffs2_scan_medium(struct jffs2_sb_info *c);
void jffs2_rotate_lists(struct jffs2_sb_info *c);
struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino);
int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t size);
/* build.c */
int jffs2_do_mount_fs(struct jffs2_sb_info *c);
/* erase.c */
int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count);
void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
/* wbuf.c */
int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino);
int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c);
int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
#endif
#include "debug.h"
#endif /* __JFFS2_NODELIST_H__ */

View File

@@ -0,0 +1,877 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/mtd/mtd.h>
#include <linux/compiler.h>
#include <linux/sched.h> /* For cond_resched() */
#include "nodelist.h"
#include "debug.h"
/*
* Check whether the user is allowed to write.
*/
static int jffs2_rp_can_write(struct jffs2_sb_info *c)
{
uint32_t avail;
struct jffs2_mount_opts *opts = &c->mount_opts;
avail = c->dirty_size + c->free_size + c->unchecked_size +
c->erasing_size - c->resv_blocks_write * c->sector_size
- c->nospc_dirty_size;
if (avail < 2 * opts->rp_size)
jffs2_dbg(1, "rpsize %u, dirty_size %u, free_size %u, "
"erasing_size %u, unchecked_size %u, "
"nr_erasing_blocks %u, avail %u, resrv %u\n",
opts->rp_size, c->dirty_size, c->free_size,
c->erasing_size, c->unchecked_size,
c->nr_erasing_blocks, avail, c->nospc_dirty_size);
if (avail > opts->rp_size)
return 1;
/* Always allow root */
if (capable(CAP_SYS_RESOURCE))
return 1;
jffs2_dbg(1, "forbid writing\n");
return 0;
}
/**
* jffs2_reserve_space - request physical space to write nodes to flash
* @c: superblock info
* @minsize: Minimum acceptable size of allocation
* @len: Returned value of allocation length
* @prio: Allocation type - ALLOC_{NORMAL,DELETION}
*
* Requests a block of physical space on the flash. Returns zero for success
* and puts 'len' into the appropriate place, or returns -ENOSPC or other
* error if appropriate. Doesn't return len since that's
*
* If it returns zero, jffs2_reserve_space() also downs the per-filesystem
* allocation semaphore, to prevent more than one allocation from being
* active at any time. The semaphore is later released by jffs2_commit_allocation()
*
* jffs2_reserve_space() may trigger garbage collection in order to make room
* for the requested allocation.
*/
static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
uint32_t *len, uint32_t sumsize);
int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
uint32_t *len, int prio, uint32_t sumsize)
{
int ret = -EAGAIN;
int blocksneeded = c->resv_blocks_write;
/* align it */
minsize = PAD(minsize);
jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
mutex_lock(&c->alloc_sem);
jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
spin_lock(&c->erase_completion_lock);
/*
* Check if the free space is greater then size of the reserved pool.
* If not, only allow root to proceed with writing.
*/
if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) {
ret = -ENOSPC;
goto out;
}
/* this needs a little more thought (true <tglx> :)) */
while(ret == -EAGAIN) {
while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
uint32_t dirty, avail;
/* calculate real dirty size
* dirty_size contains blocks on erase_pending_list
* those blocks are counted in c->nr_erasing_blocks.
* If one block is actually erased, it is not longer counted as dirty_space
* but it is counted in c->nr_erasing_blocks, so we add it and subtract it
* with c->nr_erasing_blocks * c->sector_size again.
* Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
* This helps us to force gc and pick eventually a clean block to spread the load.
* We add unchecked_size here, as we hopefully will find some space to use.
* This will affect the sum only once, as gc first finishes checking
* of nodes.
*/
dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
if (dirty < c->nospc_dirty_size) {
if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
__func__);
break;
}
jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
dirty, c->unchecked_size,
c->sector_size);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
return -ENOSPC;
}
/* Calc possibly available space. Possibly available means that we
* don't know, if unchecked size contains obsoleted nodes, which could give us some
* more usable space. This will affect the sum only once, as gc first finishes checking
* of nodes.
+ Return -ENOSPC, if the maximum possibly available space is less or equal than
* blocksneeded * sector_size.
* This blocks endless gc looping on a filesystem, which is nearly full, even if
* the check above passes.
*/
avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
if ( (avail / c->sector_size) <= blocksneeded) {
if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
__func__);
break;
}
jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
avail, blocksneeded * c->sector_size);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
return -ENOSPC;
}
mutex_unlock(&c->alloc_sem);
jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
c->nr_free_blocks, c->nr_erasing_blocks,
c->free_size, c->dirty_size, c->wasted_size,
c->used_size, c->erasing_size, c->bad_size,
c->free_size + c->dirty_size +
c->wasted_size + c->used_size +
c->erasing_size + c->bad_size,
c->flash_size);
spin_unlock(&c->erase_completion_lock);
ret = jffs2_garbage_collect_pass(c);
if (ret == -EAGAIN) {
spin_lock(&c->erase_completion_lock);
if (c->nr_erasing_blocks &&
list_empty(&c->erase_pending_list) &&
list_empty(&c->erase_complete_list)) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&c->erase_wait, &wait);
jffs2_dbg(1, "%s waiting for erase to complete\n",
__func__);
spin_unlock(&c->erase_completion_lock);
schedule();
} else
spin_unlock(&c->erase_completion_lock);
} else if (ret)
return ret;
cond_resched();
if (signal_pending(current))
return -EINTR;
mutex_lock(&c->alloc_sem);
spin_lock(&c->erase_completion_lock);
}
ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
if (ret) {
jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
}
}
out:
spin_unlock(&c->erase_completion_lock);
if (!ret)
ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
if (ret)
mutex_unlock(&c->alloc_sem);
return ret;
}
int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
uint32_t *len, uint32_t sumsize)
{
int ret = -EAGAIN;
minsize = PAD(minsize);
jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
spin_lock(&c->erase_completion_lock);
while(ret == -EAGAIN) {
ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
if (ret) {
jffs2_dbg(1, "%s(): looping, ret is %d\n",
__func__, ret);
}
}
spin_unlock(&c->erase_completion_lock);
if (!ret)
ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
return ret;
}
/* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
if (c->nextblock == NULL) {
jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
__func__, jeb->offset);
return;
}
/* Check, if we have a dirty block now, or if it was dirty already */
if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
c->dirty_size += jeb->wasted_size;
c->wasted_size -= jeb->wasted_size;
jeb->dirty_size += jeb->wasted_size;
jeb->wasted_size = 0;
if (VERYDIRTY(c, jeb->dirty_size)) {
jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
jeb->offset, jeb->free_size, jeb->dirty_size,
jeb->used_size);
list_add_tail(&jeb->list, &c->very_dirty_list);
} else {
jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
jeb->offset, jeb->free_size, jeb->dirty_size,
jeb->used_size);
list_add_tail(&jeb->list, &c->dirty_list);
}
} else {
jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
jeb->offset, jeb->free_size, jeb->dirty_size,
jeb->used_size);
list_add_tail(&jeb->list, &c->clean_list);
}
c->nextblock = NULL;
}
/* Select a new jeb for nextblock */
static int jffs2_find_nextblock(struct jffs2_sb_info *c)
{
struct list_head *next;
/* Take the next block off the 'free' list */
if (list_empty(&c->free_list)) {
if (!c->nr_erasing_blocks &&
!list_empty(&c->erasable_list)) {
struct jffs2_eraseblock *ejeb;
ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
list_move_tail(&ejeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
jffs2_garbage_collect_trigger(c);
jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
__func__, ejeb->offset);
}
if (!c->nr_erasing_blocks &&
!list_empty(&c->erasable_pending_wbuf_list)) {
jffs2_dbg(1, "%s(): Flushing write buffer\n",
__func__);
/* c->nextblock is NULL, no update to c->nextblock allowed */
spin_unlock(&c->erase_completion_lock);
jffs2_flush_wbuf_pad(c);
spin_lock(&c->erase_completion_lock);
/* Have another go. It'll be on the erasable_list now */
return -EAGAIN;
}
if (!c->nr_erasing_blocks) {
/* Ouch. We're in GC, or we wouldn't have got here.
And there's no space left. At all. */
pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
c->nr_erasing_blocks, c->nr_free_blocks,
list_empty(&c->erasable_list) ? "yes" : "no",
list_empty(&c->erasing_list) ? "yes" : "no",
list_empty(&c->erase_pending_list) ? "yes" : "no");
return -ENOSPC;
}
spin_unlock(&c->erase_completion_lock);
/* Don't wait for it; just erase one right now */
jffs2_erase_pending_blocks(c, 1);
spin_lock(&c->erase_completion_lock);
/* An erase may have failed, decreasing the
amount of free space available. So we must
restart from the beginning */
return -EAGAIN;
}
next = c->free_list.next;
list_del(next);
c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
c->nr_free_blocks--;
jffs2_sum_reset_collected(c->summary); /* reset collected summary */
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
/* adjust write buffer offset, else we get a non contiguous write bug */
if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
c->wbuf_ofs = 0xffffffff;
#endif
jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
__func__, c->nextblock->offset);
return 0;
}
/* Called with alloc sem _and_ erase_completion_lock */
static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
uint32_t *len, uint32_t sumsize)
{
struct jffs2_eraseblock *jeb = c->nextblock;
uint32_t reserved_size; /* for summary information at the end of the jeb */
int ret;
restart:
reserved_size = 0;
if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
/* NOSUM_SIZE means not to generate summary */
if (jeb) {
reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
dbg_summary("minsize=%d , jeb->free=%d ,"
"summary->size=%d , sumsize=%d\n",
minsize, jeb->free_size,
c->summary->sum_size, sumsize);
}
/* Is there enough space for writing out the current node, or we have to
write out summary information now, close this jeb and select new nextblock? */
if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
/* Has summary been disabled for this jeb? */
if (jffs2_sum_is_disabled(c->summary)) {
sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
goto restart;
}
/* Writing out the collected summary information */
dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
ret = jffs2_sum_write_sumnode(c);
if (ret)
return ret;
if (jffs2_sum_is_disabled(c->summary)) {
/* jffs2_write_sumnode() couldn't write out the summary information
diabling summary for this jeb and free the collected information
*/
sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
goto restart;
}
jffs2_close_nextblock(c, jeb);
jeb = NULL;
/* keep always valid value in reserved_size */
reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
}
} else {
if (jeb && minsize > jeb->free_size) {
uint32_t waste;
/* Skip the end of this block and file it as having some dirty space */
/* If there's a pending write to it, flush now */
if (jffs2_wbuf_dirty(c)) {
spin_unlock(&c->erase_completion_lock);
jffs2_dbg(1, "%s(): Flushing write buffer\n",
__func__);
jffs2_flush_wbuf_pad(c);
spin_lock(&c->erase_completion_lock);
jeb = c->nextblock;
goto restart;
}
spin_unlock(&c->erase_completion_lock);
ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
/* Just lock it again and continue. Nothing much can change because
we hold c->alloc_sem anyway. In fact, it's not entirely clear why
we hold c->erase_completion_lock in the majority of this function...
but that's a question for another (more caffeine-rich) day. */
spin_lock(&c->erase_completion_lock);
if (ret)
return ret;
waste = jeb->free_size;
jffs2_link_node_ref(c, jeb,
(jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
waste, NULL);
/* FIXME: that made it count as dirty. Convert to wasted */
jeb->dirty_size -= waste;
c->dirty_size -= waste;
jeb->wasted_size += waste;
c->wasted_size += waste;
jffs2_close_nextblock(c, jeb);
jeb = NULL;
}
}
if (!jeb) {
ret = jffs2_find_nextblock(c);
if (ret)
return ret;
jeb = c->nextblock;
if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
jeb->offset, jeb->free_size);
goto restart;
}
}
/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
enough space */
*len = jeb->free_size - reserved_size;
if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
!jeb->first_node->next_in_ino) {
/* Only node in it beforehand was a CLEANMARKER node (we think).
So mark it obsolete now that there's going to be another node
in the block. This will reduce used_size to zero but We've
already set c->nextblock so that jffs2_mark_node_obsolete()
won't try to refile it to the dirty_list.
*/
spin_unlock(&c->erase_completion_lock);
jffs2_mark_node_obsolete(c, jeb->first_node);
spin_lock(&c->erase_completion_lock);
}
jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
__func__,
*len, jeb->offset + (c->sector_size - jeb->free_size));
return 0;
}
/**
* jffs2_add_physical_node_ref - add a physical node reference to the list
* @c: superblock info
* @new: new node reference to add
* @len: length of this physical node
*
* Should only be used to report nodes for which space has been allocated
* by jffs2_reserve_space.
*
* Must be called with the alloc_sem held.
*/
struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
uint32_t ofs, uint32_t len,
struct jffs2_inode_cache *ic)
{
struct jffs2_eraseblock *jeb;
struct jffs2_raw_node_ref *new;
jeb = &c->blocks[ofs / c->sector_size];
jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
__func__, ofs & ~3, ofs & 3, len);
#if 1
/* Allow non-obsolete nodes only to be added at the end of c->nextblock,
if c->nextblock is set. Note that wbuf.c will file obsolete nodes
even after refiling c->nextblock */
if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
&& (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
ofs & ~3, ofs & 3);
if (c->nextblock)
pr_warn("nextblock 0x%08x", c->nextblock->offset);
else
pr_warn("No nextblock");
pr_cont(", expected at %08x\n",
jeb->offset + (c->sector_size - jeb->free_size));
return ERR_PTR(-EINVAL);
}
#endif
spin_lock(&c->erase_completion_lock);
new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
jeb->offset, jeb->free_size, jeb->dirty_size,
jeb->used_size);
if (jffs2_wbuf_dirty(c)) {
/* Flush the last write in the block if it's outstanding */
spin_unlock(&c->erase_completion_lock);
jffs2_flush_wbuf_pad(c);
spin_lock(&c->erase_completion_lock);
}
list_add_tail(&jeb->list, &c->clean_list);
c->nextblock = NULL;
}
jffs2_dbg_acct_sanity_check_nolock(c,jeb);
jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
spin_unlock(&c->erase_completion_lock);
return new;
}
void jffs2_complete_reservation(struct jffs2_sb_info *c)
{
jffs2_dbg(1, "jffs2_complete_reservation()\n");
spin_lock(&c->erase_completion_lock);
jffs2_garbage_collect_trigger(c);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
}
static inline int on_list(struct list_head *obj, struct list_head *head)
{
struct list_head *this;
list_for_each(this, head) {
if (this == obj) {
jffs2_dbg(1, "%p is on list at %p\n", obj, head);
return 1;
}
}
return 0;
}
void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
{
struct jffs2_eraseblock *jeb;
int blocknr;
struct jffs2_unknown_node n;
int ret, addedsize;
size_t retlen;
uint32_t freed_len;
if(unlikely(!ref)) {
pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
return;
}
if (ref_obsolete(ref)) {
jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
__func__, ref_offset(ref));
return;
}
blocknr = ref->flash_offset / c->sector_size;
if (blocknr >= c->nr_blocks) {
pr_notice("raw node at 0x%08x is off the end of device!\n",
ref->flash_offset);
BUG();
}
jeb = &c->blocks[blocknr];
if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
!(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
/* Hm. This may confuse static lock analysis. If any of the above
three conditions is false, we're going to return from this
function without actually obliterating any nodes or freeing
any jffs2_raw_node_refs. So we don't need to stop erases from
happening, or protect against people holding an obsolete
jffs2_raw_node_ref without the erase_completion_lock. */
mutex_lock(&c->erase_free_sem);
}
spin_lock(&c->erase_completion_lock);
freed_len = ref_totlen(c, jeb, ref);
if (ref_flags(ref) == REF_UNCHECKED) {
D1(if (unlikely(jeb->unchecked_size < freed_len)) {
pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
freed_len, blocknr,
ref->flash_offset, jeb->used_size);
BUG();
})
jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
ref_offset(ref), freed_len);
jeb->unchecked_size -= freed_len;
c->unchecked_size -= freed_len;
} else {
D1(if (unlikely(jeb->used_size < freed_len)) {
pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
freed_len, blocknr,
ref->flash_offset, jeb->used_size);
BUG();
})
jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
ref_offset(ref), freed_len);
jeb->used_size -= freed_len;
c->used_size -= freed_len;
}
// Take care, that wasted size is taken into concern
if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
jffs2_dbg(1, "Dirtying\n");
addedsize = freed_len;
jeb->dirty_size += freed_len;
c->dirty_size += freed_len;
/* Convert wasted space to dirty, if not a bad block */
if (jeb->wasted_size) {
if (on_list(&jeb->list, &c->bad_used_list)) {
jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
jeb->offset);
addedsize = 0; /* To fool the refiling code later */
} else {
jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
jeb->wasted_size, jeb->offset);
addedsize += jeb->wasted_size;
jeb->dirty_size += jeb->wasted_size;
c->dirty_size += jeb->wasted_size;
c->wasted_size -= jeb->wasted_size;
jeb->wasted_size = 0;
}
}
} else {
jffs2_dbg(1, "Wasting\n");
addedsize = 0;
jeb->wasted_size += freed_len;
c->wasted_size += freed_len;
}
ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
jffs2_dbg_acct_sanity_check_nolock(c, jeb);
jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
if (c->flags & JFFS2_SB_FLAG_SCANNING) {
/* Flash scanning is in progress. Don't muck about with the block
lists because they're not ready yet, and don't actually
obliterate nodes that look obsolete. If they weren't
marked obsolete on the flash at the time they _became_
obsolete, there was probably a reason for that. */
spin_unlock(&c->erase_completion_lock);
/* We didn't lock the erase_free_sem */
return;
}
if (jeb == c->nextblock) {
jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
jeb->offset);
} else if (!jeb->used_size && !jeb->unchecked_size) {
if (jeb == c->gcblock) {
jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
jeb->offset);
c->gcblock = NULL;
} else {
jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
jeb->offset);
list_del(&jeb->list);
}
if (jffs2_wbuf_dirty(c)) {
jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
} else {
if (jiffies & 127) {
/* Most of the time, we just erase it immediately. Otherwise we
spend ages scanning it on mount, etc. */
jffs2_dbg(1, "...and adding to erase_pending_list\n");
list_add_tail(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
jffs2_garbage_collect_trigger(c);
} else {
/* Sometimes, however, we leave it elsewhere so it doesn't get
immediately reused, and we spread the load a bit. */
jffs2_dbg(1, "...and adding to erasable_list\n");
list_add_tail(&jeb->list, &c->erasable_list);
}
}
jffs2_dbg(1, "Done OK\n");
} else if (jeb == c->gcblock) {
jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
jeb->offset);
} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
jeb->offset);
list_del(&jeb->list);
jffs2_dbg(1, "...and adding to dirty_list\n");
list_add_tail(&jeb->list, &c->dirty_list);
} else if (VERYDIRTY(c, jeb->dirty_size) &&
!VERYDIRTY(c, jeb->dirty_size - addedsize)) {
jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
jeb->offset);
list_del(&jeb->list);
jffs2_dbg(1, "...and adding to very_dirty_list\n");
list_add_tail(&jeb->list, &c->very_dirty_list);
} else {
jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
jeb->offset, jeb->free_size, jeb->dirty_size,
jeb->used_size);
}
spin_unlock(&c->erase_completion_lock);
if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
(c->flags & JFFS2_SB_FLAG_BUILDING)) {
/* We didn't lock the erase_free_sem */
return;
}
/* The erase_free_sem is locked, and has been since before we marked the node obsolete
and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
ref_offset(ref));
ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
if (ret) {
pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
ref_offset(ref), ret);
goto out_erase_sem;
}
if (retlen != sizeof(n)) {
pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
ref_offset(ref), retlen);
goto out_erase_sem;
}
if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
je32_to_cpu(n.totlen), freed_len);
goto out_erase_sem;
}
if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
ref_offset(ref), je16_to_cpu(n.nodetype));
goto out_erase_sem;
}
/* XXX FIXME: This is ugly now */
n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
if (ret) {
pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
ref_offset(ref), ret);
goto out_erase_sem;
}
if (retlen != sizeof(n)) {
pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
ref_offset(ref), retlen);
goto out_erase_sem;
}
/* Nodes which have been marked obsolete no longer need to be
associated with any inode. Remove them from the per-inode list.
Note we can't do this for NAND at the moment because we need
obsolete dirent nodes to stay on the lists, because of the
horridness in jffs2_garbage_collect_deletion_dirent(). Also
because we delete the inocache, and on NAND we need that to
stay around until all the nodes are actually erased, in order
to stop us from giving the same inode number to another newly
created inode. */
if (ref->next_in_ino) {
struct jffs2_inode_cache *ic;
struct jffs2_raw_node_ref **p;
spin_lock(&c->erase_completion_lock);
ic = jffs2_raw_ref_to_ic(ref);
for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
;
*p = ref->next_in_ino;
ref->next_in_ino = NULL;
switch (ic->class) {
#ifdef CONFIG_JFFS2_FS_XATTR
case RAWNODE_CLASS_XATTR_DATUM:
jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
break;
case RAWNODE_CLASS_XATTR_REF:
jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
break;
#endif
default:
if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
jffs2_del_ino_cache(c, ic);
break;
}
spin_unlock(&c->erase_completion_lock);
}
out_erase_sem:
mutex_unlock(&c->erase_free_sem);
}
int jffs2_thread_should_wake(struct jffs2_sb_info *c)
{
int ret = 0;
uint32_t dirty;
int nr_very_dirty = 0;
struct jffs2_eraseblock *jeb;
if (!list_empty(&c->erase_complete_list) ||
!list_empty(&c->erase_pending_list))
return 1;
if (c->unchecked_size) {
jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
c->unchecked_size, c->checked_ino);
return 1;
}
/* dirty_size contains blocks on erase_pending_list
* those blocks are counted in c->nr_erasing_blocks.
* If one block is actually erased, it is not longer counted as dirty_space
* but it is counted in c->nr_erasing_blocks, so we add it and subtract it
* with c->nr_erasing_blocks * c->sector_size again.
* Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
* This helps us to force gc and pick eventually a clean block to spread the load.
*/
dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
(dirty > c->nospc_dirty_size))
ret = 1;
list_for_each_entry(jeb, &c->very_dirty_list, list) {
nr_very_dirty++;
if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
ret = 1;
/* In debug mode, actually go through and count them all */
D1(continue);
break;
}
}
jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
__func__, c->nr_free_blocks, c->nr_erasing_blocks,
c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
return ret;
}

View File

@@ -0,0 +1,228 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/crc32.h>
#include <linux/pagemap.h>
#include <linux/mtd/mtd.h>
#include <linux/compiler.h>
#include "nodelist.h"
#include "compr.h"
int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
struct jffs2_full_dnode *fd, unsigned char *buf,
int ofs, int len)
{
struct jffs2_raw_inode *ri;
size_t readlen;
uint32_t crc;
unsigned char *decomprbuf = NULL;
unsigned char *readbuf = NULL;
int ret = 0;
ri = jffs2_alloc_raw_inode();
if (!ri)
return -ENOMEM;
ret = jffs2_flash_read(c, ref_offset(fd->raw), sizeof(*ri), &readlen, (char *)ri);
if (ret) {
jffs2_free_raw_inode(ri);
pr_warn("Error reading node from 0x%08x: %d\n",
ref_offset(fd->raw), ret);
return ret;
}
if (readlen != sizeof(*ri)) {
jffs2_free_raw_inode(ri);
pr_warn("Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n",
ref_offset(fd->raw), sizeof(*ri), readlen);
return -EIO;
}
crc = crc32(0, ri, sizeof(*ri)-8);
jffs2_dbg(1, "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n",
ref_offset(fd->raw), je32_to_cpu(ri->node_crc),
crc, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize),
je32_to_cpu(ri->offset), buf);
if (crc != je32_to_cpu(ri->node_crc)) {
pr_warn("Node CRC %08x != calculated CRC %08x for node at %08x\n",
je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw));
ret = -EIO;
goto out_ri;
}
/* There was a bug where we wrote hole nodes out with csize/dsize
swapped. Deal with it */
if (ri->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(ri->dsize) &&
je32_to_cpu(ri->csize)) {
ri->dsize = ri->csize;
ri->csize = cpu_to_je32(0);
}
D1(if(ofs + len > je32_to_cpu(ri->dsize)) {
pr_warn("jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n",
len, ofs, je32_to_cpu(ri->dsize));
ret = -EINVAL;
goto out_ri;
});
if (ri->compr == JFFS2_COMPR_ZERO) {
memset(buf, 0, len);
goto out_ri;
}
/* Cases:
Reading whole node and it's uncompressed - read directly to buffer provided, check CRC.
Reading whole node and it's compressed - read into comprbuf, check CRC and decompress to buffer provided
Reading partial node and it's uncompressed - read into readbuf, check CRC, and copy
Reading partial node and it's compressed - read into readbuf, check checksum, decompress to decomprbuf and copy
*/
if (ri->compr == JFFS2_COMPR_NONE && len == je32_to_cpu(ri->dsize)) {
readbuf = buf;
} else {
readbuf = kmalloc(je32_to_cpu(ri->csize), GFP_KERNEL);
if (!readbuf) {
ret = -ENOMEM;
goto out_ri;
}
}
if (ri->compr != JFFS2_COMPR_NONE) {
if (len < je32_to_cpu(ri->dsize)) {
decomprbuf = kmalloc(je32_to_cpu(ri->dsize), GFP_KERNEL);
if (!decomprbuf) {
ret = -ENOMEM;
goto out_readbuf;
}
} else {
decomprbuf = buf;
}
} else {
decomprbuf = readbuf;
}
jffs2_dbg(2, "Read %d bytes to %p\n", je32_to_cpu(ri->csize),
readbuf);
ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri),
je32_to_cpu(ri->csize), &readlen, readbuf);
if (!ret && readlen != je32_to_cpu(ri->csize))
ret = -EIO;
if (ret)
goto out_decomprbuf;
crc = crc32(0, readbuf, je32_to_cpu(ri->csize));
if (crc != je32_to_cpu(ri->data_crc)) {
pr_warn("Data CRC %08x != calculated CRC %08x for node at %08x\n",
je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw));
ret = -EIO;
goto out_decomprbuf;
}
jffs2_dbg(2, "Data CRC matches calculated CRC %08x\n", crc);
if (ri->compr != JFFS2_COMPR_NONE) {
jffs2_dbg(2, "Decompress %d bytes from %p to %d bytes at %p\n",
je32_to_cpu(ri->csize), readbuf,
je32_to_cpu(ri->dsize), decomprbuf);
ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize));
if (ret) {
pr_warn("Error: jffs2_decompress returned %d\n", ret);
goto out_decomprbuf;
}
}
if (len < je32_to_cpu(ri->dsize)) {
memcpy(buf, decomprbuf+ofs, len);
}
out_decomprbuf:
if(decomprbuf != buf && decomprbuf != readbuf)
kfree(decomprbuf);
out_readbuf:
if(readbuf != buf)
kfree(readbuf);
out_ri:
jffs2_free_raw_inode(ri);
return ret;
}
int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
unsigned char *buf, uint32_t offset, uint32_t len)
{
uint32_t end = offset + len;
struct jffs2_node_frag *frag;
int ret;
jffs2_dbg(1, "%s(): ino #%u, range 0x%08x-0x%08x\n",
__func__, f->inocache->ino, offset, offset + len);
frag = jffs2_lookup_node_frag(&f->fragtree, offset);
/* XXX FIXME: Where a single physical node actually shows up in two
frags, we read it twice. Don't do that. */
/* Now we're pointing at the first frag which overlaps our page
* (or perhaps is before it, if we've been asked to read off the
* end of the file). */
while(offset < end) {
jffs2_dbg(2, "%s(): offset %d, end %d\n",
__func__, offset, end);
if (unlikely(!frag || frag->ofs > offset ||
frag->ofs + frag->size <= offset)) {
uint32_t holesize = end - offset;
if (frag && frag->ofs > offset) {
jffs2_dbg(1, "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n",
f->inocache->ino, frag->ofs, offset);
holesize = min(holesize, frag->ofs - offset);
}
jffs2_dbg(1, "Filling non-frag hole from %d-%d\n",
offset, offset + holesize);
memset(buf, 0, holesize);
buf += holesize;
offset += holesize;
continue;
} else if (unlikely(!frag->node)) {
uint32_t holeend = min(end, frag->ofs + frag->size);
jffs2_dbg(1, "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n",
offset, holeend, frag->ofs,
frag->ofs + frag->size);
memset(buf, 0, holeend - offset);
buf += holeend - offset;
offset = holeend;
frag = frag_next(frag);
continue;
} else {
uint32_t readlen;
uint32_t fragofs; /* offset within the frag to start reading */
fragofs = offset - frag->ofs;
readlen = min(frag->size - fragofs, end - offset);
jffs2_dbg(1, "Reading %d-%d from node at 0x%08x (%d)\n",
frag->ofs+fragofs,
frag->ofs + fragofs+readlen,
ref_offset(frag->node->raw),
ref_flags(frag->node->raw));
ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen);
jffs2_dbg(2, "node read done\n");
if (ret) {
jffs2_dbg(1, "%s(): error %d\n",
__func__, ret);
memset(buf, 0, readlen);
return ret;
}
buf += readlen;
offset += readlen;
frag = frag_next(frag);
jffs2_dbg(2, "node read was OK. Looping\n");
}
}
return 0;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,213 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
* Zoltan Sogor <weth@inf.u-szeged.hu>,
* Patrik Kluba <pajko@halom.u-szeged.hu>,
* University of Szeged, Hungary
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#ifndef JFFS2_SUMMARY_H
#define JFFS2_SUMMARY_H
/* Limit summary size to 64KiB so that we can kmalloc it. If the summary
is larger than that, we have to just ditch it and avoid using summary
for the eraseblock in question... and it probably doesn't hurt us much
anyway. */
#define MAX_SUMMARY_SIZE 65536
#include <linux/uio.h>
#include <linux/jffs2.h>
#define BLK_STATE_ALLFF 0
#define BLK_STATE_CLEAN 1
#define BLK_STATE_PARTDIRTY 2
#define BLK_STATE_CLEANMARKER 3
#define BLK_STATE_ALLDIRTY 4
#define BLK_STATE_BADBLOCK 5
#define JFFS2_SUMMARY_NOSUM_SIZE 0xffffffff
#define JFFS2_SUMMARY_INODE_SIZE (sizeof(struct jffs2_sum_inode_flash))
#define JFFS2_SUMMARY_DIRENT_SIZE(x) (sizeof(struct jffs2_sum_dirent_flash) + (x))
#define JFFS2_SUMMARY_XATTR_SIZE (sizeof(struct jffs2_sum_xattr_flash))
#define JFFS2_SUMMARY_XREF_SIZE (sizeof(struct jffs2_sum_xref_flash))
/* Summary structures used on flash */
struct jffs2_sum_unknown_flash
{
jint16_t nodetype; /* node type */
};
struct jffs2_sum_inode_flash
{
jint16_t nodetype; /* node type */
jint32_t inode; /* inode number */
jint32_t version; /* inode version */
jint32_t offset; /* offset on jeb */
jint32_t totlen; /* record length */
} __attribute__((packed));
struct jffs2_sum_dirent_flash
{
jint16_t nodetype; /* == JFFS_NODETYPE_DIRENT */
jint32_t totlen; /* record length */
jint32_t offset; /* offset on jeb */
jint32_t pino; /* parent inode */
jint32_t version; /* dirent version */
jint32_t ino; /* == zero for unlink */
uint8_t nsize; /* dirent name size */
uint8_t type; /* dirent type */
uint8_t name[0]; /* dirent name */
} __attribute__((packed));
struct jffs2_sum_xattr_flash
{
jint16_t nodetype; /* == JFFS2_NODETYPE_XATR */
jint32_t xid; /* xattr identifier */
jint32_t version; /* version number */
jint32_t offset; /* offset on jeb */
jint32_t totlen; /* node length */
} __attribute__((packed));
struct jffs2_sum_xref_flash
{
jint16_t nodetype; /* == JFFS2_NODETYPE_XREF */
jint32_t offset; /* offset on jeb */
} __attribute__((packed));
union jffs2_sum_flash
{
struct jffs2_sum_unknown_flash u;
struct jffs2_sum_inode_flash i;
struct jffs2_sum_dirent_flash d;
struct jffs2_sum_xattr_flash x;
struct jffs2_sum_xref_flash r;
};
/* Summary structures used in the memory */
struct jffs2_sum_unknown_mem
{
union jffs2_sum_mem *next;
jint16_t nodetype; /* node type */
};
struct jffs2_sum_inode_mem
{
union jffs2_sum_mem *next;
jint16_t nodetype; /* node type */
jint32_t inode; /* inode number */
jint32_t version; /* inode version */
jint32_t offset; /* offset on jeb */
jint32_t totlen; /* record length */
} __attribute__((packed));
struct jffs2_sum_dirent_mem
{
union jffs2_sum_mem *next;
jint16_t nodetype; /* == JFFS_NODETYPE_DIRENT */
jint32_t totlen; /* record length */
jint32_t offset; /* ofset on jeb */
jint32_t pino; /* parent inode */
jint32_t version; /* dirent version */
jint32_t ino; /* == zero for unlink */
uint8_t nsize; /* dirent name size */
uint8_t type; /* dirent type */
uint8_t name[0]; /* dirent name */
} __attribute__((packed));
struct jffs2_sum_xattr_mem
{
union jffs2_sum_mem *next;
jint16_t nodetype;
jint32_t xid;
jint32_t version;
jint32_t offset;
jint32_t totlen;
} __attribute__((packed));
struct jffs2_sum_xref_mem
{
union jffs2_sum_mem *next;
jint16_t nodetype;
jint32_t offset;
} __attribute__((packed));
union jffs2_sum_mem
{
struct jffs2_sum_unknown_mem u;
struct jffs2_sum_inode_mem i;
struct jffs2_sum_dirent_mem d;
struct jffs2_sum_xattr_mem x;
struct jffs2_sum_xref_mem r;
};
/* Summary related information stored in superblock */
struct jffs2_summary
{
uint32_t sum_size; /* collected summary information for nextblock */
uint32_t sum_num;
uint32_t sum_padded;
union jffs2_sum_mem *sum_list_head;
union jffs2_sum_mem *sum_list_tail;
jint32_t *sum_buf; /* buffer for writing out summary */
};
/* Summary marker is stored at the end of every sumarized erase block */
struct jffs2_sum_marker
{
jint32_t offset; /* offset of the summary node in the jeb */
jint32_t magic; /* == JFFS2_SUM_MAGIC */
};
#define JFFS2_SUMMARY_FRAME_SIZE (sizeof(struct jffs2_raw_summary) + sizeof(struct jffs2_sum_marker))
#ifdef CONFIG_JFFS2_SUMMARY /* SUMMARY SUPPORT ENABLED */
#define jffs2_sum_active() (1)
int jffs2_sum_init(struct jffs2_sb_info *c);
void jffs2_sum_exit(struct jffs2_sb_info *c);
void jffs2_sum_disable_collecting(struct jffs2_summary *s);
int jffs2_sum_is_disabled(struct jffs2_summary *s);
void jffs2_sum_reset_collected(struct jffs2_summary *s);
void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s);
int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs,
unsigned long count, uint32_t to);
int jffs2_sum_write_sumnode(struct jffs2_sb_info *c);
int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size);
int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, uint32_t ofs);
int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, uint32_t ofs);
int jffs2_sum_add_xattr_mem(struct jffs2_summary *s, struct jffs2_raw_xattr *rx, uint32_t ofs);
int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs);
int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_raw_summary *summary, uint32_t sumlen,
uint32_t *pseudo_random);
#else /* SUMMARY DISABLED */
#define jffs2_sum_active() (0)
#define jffs2_sum_init(a) (0)
#define jffs2_sum_exit(a)
#define jffs2_sum_disable_collecting(a)
#define jffs2_sum_is_disabled(a) (0)
#define jffs2_sum_reset_collected(a)
#define jffs2_sum_add_kvec(a,b,c,d) (0)
#define jffs2_sum_move_collected(a,b)
#define jffs2_sum_write_sumnode(a) (0)
#define jffs2_sum_add_padding_mem(a,b)
#define jffs2_sum_add_inode_mem(a,b,c)
#define jffs2_sum_add_dirent_mem(a,b,c)
#define jffs2_sum_add_xattr_mem(a,b,c)
#define jffs2_sum_add_xref_mem(a,b,c)
#define jffs2_sum_scan_sumnode(a,b,c,d,e) (0)
#endif /* CONFIG_JFFS2_SUMMARY */
#endif /* JFFS2_SUMMARY_H */

View File

@@ -0,0 +1,722 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/crc32.h>
#include <linux/pagemap.h>
#include <linux/mtd/mtd.h>
#include "nodelist.h"
#include "compr.h"
int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
uint32_t mode, struct jffs2_raw_inode *ri)
{
struct jffs2_inode_cache *ic;
ic = jffs2_alloc_inode_cache();
if (!ic) {
return -ENOMEM;
}
memset(ic, 0, sizeof(*ic));
f->inocache = ic;
f->inocache->pino_nlink = 1; /* Will be overwritten shortly for directories */
f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
f->inocache->state = INO_STATE_PRESENT;
jffs2_add_ino_cache(c, f->inocache);
jffs2_dbg(1, "%s(): Assigned ino# %d\n", __func__, f->inocache->ino);
ri->ino = cpu_to_je32(f->inocache->ino);
ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
ri->totlen = cpu_to_je32(PAD(sizeof(*ri)));
ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
ri->mode = cpu_to_jemode(mode);
f->highest_version = 1;
ri->version = cpu_to_je32(f->highest_version);
return 0;
}
/* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it,
write it to the flash, link it into the existing inode/fragment list */
struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
struct jffs2_raw_inode *ri, const unsigned char *data,
uint32_t datalen, int alloc_mode)
{
struct jffs2_full_dnode *fn;
size_t retlen;
uint32_t flash_ofs;
struct kvec vecs[2];
int ret;
int retried = 0;
unsigned long cnt = 2;
D1(if(je32_to_cpu(ri->hdr_crc) != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) {
pr_crit("Eep. CRC not correct in jffs2_write_dnode()\n");
BUG();
}
);
vecs[0].iov_base = ri;
vecs[0].iov_len = sizeof(*ri);
vecs[1].iov_base = (unsigned char *)data;
vecs[1].iov_len = datalen;
if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) {
pr_warn("%s(): ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n",
__func__, je32_to_cpu(ri->totlen),
sizeof(*ri), datalen);
}
fn = jffs2_alloc_full_dnode();
if (!fn)
return ERR_PTR(-ENOMEM);
/* check number of valid vecs */
if (!datalen || !data)
cnt = 1;
retry:
flash_ofs = write_ofs(c);
jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len);
if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) {
BUG_ON(!retried);
jffs2_dbg(1, "%s(): dnode_version %d, highest version %d -> updating dnode\n",
__func__,
je32_to_cpu(ri->version), f->highest_version);
ri->version = cpu_to_je32(++f->highest_version);
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
}
ret = jffs2_flash_writev(c, vecs, cnt, flash_ofs, &retlen,
(alloc_mode==ALLOC_GC)?0:f->inocache->ino);
if (ret || (retlen != sizeof(*ri) + datalen)) {
pr_notice("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
sizeof(*ri) + datalen, flash_ofs, ret, retlen);
/* Mark the space as dirtied */
if (retlen) {
/* Don't change raw->size to match retlen. We may have
written the node header already, and only the data will
seem corrupted, in which case the scan would skip over
any node we write before the original intended end of
this node */
jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*ri)+datalen), NULL);
} else {
pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n",
flash_ofs);
}
if (!retried && alloc_mode != ALLOC_NORETRY) {
/* Try to reallocate space and retry */
uint32_t dummy;
struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size];
retried = 1;
jffs2_dbg(1, "Retrying failed write.\n");
jffs2_dbg_acct_sanity_check(c,jeb);
jffs2_dbg_acct_paranoia_check(c, jeb);
if (alloc_mode == ALLOC_GC) {
ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &dummy,
JFFS2_SUMMARY_INODE_SIZE);
} else {
/* Locking pain */
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &dummy,
alloc_mode, JFFS2_SUMMARY_INODE_SIZE);
mutex_lock(&f->sem);
}
if (!ret) {
flash_ofs = write_ofs(c);
jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n",
flash_ofs);
jffs2_dbg_acct_sanity_check(c,jeb);
jffs2_dbg_acct_paranoia_check(c, jeb);
goto retry;
}
jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n",
ret);
}
/* Release the full_dnode which is now useless, and return */
jffs2_free_full_dnode(fn);
return ERR_PTR(ret?ret:-EIO);
}
/* Mark the space used */
/* If node covers at least a whole page, or if it starts at the
beginning of a page and runs to the end of the file, or if
it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
*/
if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) ||
( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) &&
(je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) {
flash_ofs |= REF_PRISTINE;
} else {
flash_ofs |= REF_NORMAL;
}
fn->raw = jffs2_add_physical_node_ref(c, flash_ofs, PAD(sizeof(*ri)+datalen), f->inocache);
if (IS_ERR(fn->raw)) {
void *hold_err = fn->raw;
/* Release the full_dnode which is now useless, and return */
jffs2_free_full_dnode(fn);
return ERR_CAST(hold_err);
}
fn->ofs = je32_to_cpu(ri->offset);
fn->size = je32_to_cpu(ri->dsize);
fn->frags = 0;
jffs2_dbg(1, "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n",
flash_ofs & ~3, flash_ofs & 3, je32_to_cpu(ri->dsize),
je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc),
je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen));
if (retried) {
jffs2_dbg_acct_sanity_check(c,NULL);
}
return fn;
}
struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
struct jffs2_raw_dirent *rd, const unsigned char *name,
uint32_t namelen, int alloc_mode)
{
struct jffs2_full_dirent *fd;
size_t retlen;
struct kvec vecs[2];
uint32_t flash_ofs;
int retried = 0;
int ret;
jffs2_dbg(1, "%s(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n",
__func__,
je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino),
je32_to_cpu(rd->name_crc));
D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) {
pr_crit("Eep. CRC not correct in jffs2_write_dirent()\n");
BUG();
});
if (strnlen(name, namelen) != namelen) {
/* This should never happen, but seems to have done on at least one
occasion: https://dev.laptop.org/ticket/4184 */
pr_crit("Error in jffs2_write_dirent() -- name contains zero bytes!\n");
pr_crit("Directory inode #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x\n",
je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino),
je32_to_cpu(rd->name_crc));
WARN_ON(1);
return ERR_PTR(-EIO);
}
vecs[0].iov_base = rd;
vecs[0].iov_len = sizeof(*rd);
vecs[1].iov_base = (unsigned char *)name;
vecs[1].iov_len = namelen;
fd = jffs2_alloc_full_dirent(namelen+1);
if (!fd)
return ERR_PTR(-ENOMEM);
fd->version = je32_to_cpu(rd->version);
fd->ino = je32_to_cpu(rd->ino);
fd->nhash = full_name_hash(name, namelen);
fd->type = rd->type;
memcpy(fd->name, name, namelen);
fd->name[namelen]=0;
retry:
flash_ofs = write_ofs(c);
jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len);
if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) {
BUG_ON(!retried);
jffs2_dbg(1, "%s(): dirent_version %d, highest version %d -> updating dirent\n",
__func__,
je32_to_cpu(rd->version), f->highest_version);
rd->version = cpu_to_je32(++f->highest_version);
fd->version = je32_to_cpu(rd->version);
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
}
ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen,
(alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino));
if (ret || (retlen != sizeof(*rd) + namelen)) {
pr_notice("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
sizeof(*rd) + namelen, flash_ofs, ret, retlen);
/* Mark the space as dirtied */
if (retlen) {
jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*rd)+namelen), NULL);
} else {
pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n",
flash_ofs);
}
if (!retried) {
/* Try to reallocate space and retry */
uint32_t dummy;
struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size];
retried = 1;
jffs2_dbg(1, "Retrying failed write.\n");
jffs2_dbg_acct_sanity_check(c,jeb);
jffs2_dbg_acct_paranoia_check(c, jeb);
if (alloc_mode == ALLOC_GC) {
ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &dummy,
JFFS2_SUMMARY_DIRENT_SIZE(namelen));
} else {
/* Locking pain */
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &dummy,
alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
mutex_lock(&f->sem);
}
if (!ret) {
flash_ofs = write_ofs(c);
jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write\n",
flash_ofs);
jffs2_dbg_acct_sanity_check(c,jeb);
jffs2_dbg_acct_paranoia_check(c, jeb);
goto retry;
}
jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n",
ret);
}
/* Release the full_dnode which is now useless, and return */
jffs2_free_full_dirent(fd);
return ERR_PTR(ret?ret:-EIO);
}
/* Mark the space used */
fd->raw = jffs2_add_physical_node_ref(c, flash_ofs | dirent_node_state(rd),
PAD(sizeof(*rd)+namelen), f->inocache);
if (IS_ERR(fd->raw)) {
void *hold_err = fd->raw;
/* Release the full_dirent which is now useless, and return */
jffs2_free_full_dirent(fd);
return ERR_CAST(hold_err);
}
if (retried) {
jffs2_dbg_acct_sanity_check(c,NULL);
}
return fd;
}
/* The OS-specific code fills in the metadata in the jffs2_raw_inode for us, so that
we don't have to go digging in struct inode or its equivalent. It should set:
mode, uid, gid, (starting)isize, atime, ctime, mtime */
int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
struct jffs2_raw_inode *ri, unsigned char *buf,
uint32_t offset, uint32_t writelen, uint32_t *retlen)
{
int ret = 0;
uint32_t writtenlen = 0;
jffs2_dbg(1, "%s(): Ino #%u, ofs 0x%x, len 0x%x\n",
__func__, f->inocache->ino, offset, writelen);
while(writelen) {
struct jffs2_full_dnode *fn;
unsigned char *comprbuf = NULL;
uint16_t comprtype = JFFS2_COMPR_NONE;
uint32_t alloclen;
uint32_t datalen, cdatalen;
int retried = 0;
retry:
jffs2_dbg(2, "jffs2_commit_write() loop: 0x%x to write to 0x%x\n",
writelen, offset);
ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN,
&alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
jffs2_dbg(1, "jffs2_reserve_space returned %d\n", ret);
break;
}
mutex_lock(&f->sem);
datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1)));
cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen);
comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen);
ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
ri->totlen = cpu_to_je32(sizeof(*ri) + cdatalen);
ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
ri->ino = cpu_to_je32(f->inocache->ino);
ri->version = cpu_to_je32(++f->highest_version);
ri->isize = cpu_to_je32(max(je32_to_cpu(ri->isize), offset + datalen));
ri->offset = cpu_to_je32(offset);
ri->csize = cpu_to_je32(cdatalen);
ri->dsize = cpu_to_je32(datalen);
ri->compr = comprtype & 0xff;
ri->usercompr = (comprtype >> 8 ) & 0xff;
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
ri->data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, ALLOC_NORETRY);
jffs2_free_comprbuf(comprbuf, buf);
if (IS_ERR(fn)) {
ret = PTR_ERR(fn);
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
if (!retried) {
/* Write error to be retried */
retried = 1;
jffs2_dbg(1, "Retrying node write in jffs2_write_inode_range()\n");
goto retry;
}
break;
}
ret = jffs2_add_full_dnode_to_inode(c, f, fn);
if (f->metadata) {
jffs2_mark_node_obsolete(c, f->metadata->raw);
jffs2_free_full_dnode(f->metadata);
f->metadata = NULL;
}
if (ret) {
/* Eep */
jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n",
ret);
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
break;
}
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
if (!datalen) {
pr_warn("Eep. We didn't actually write any data in jffs2_write_inode_range()\n");
ret = -EIO;
break;
}
jffs2_dbg(1, "increasing writtenlen by %d\n", datalen);
writtenlen += datalen;
offset += datalen;
writelen -= datalen;
buf += datalen;
}
*retlen = writtenlen;
return ret;
}
int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
struct jffs2_inode_info *f, struct jffs2_raw_inode *ri,
const struct qstr *qstr)
{
struct jffs2_raw_dirent *rd;
struct jffs2_full_dnode *fn;
struct jffs2_full_dirent *fd;
uint32_t alloclen;
int ret;
/* Try to reserve enough space for both node and dirent.
* Just the node will do for now, though
*/
ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL,
JFFS2_SUMMARY_INODE_SIZE);
jffs2_dbg(1, "%s(): reserved 0x%x bytes\n", __func__, alloclen);
if (ret)
return ret;
mutex_lock(&f->sem);
ri->data_crc = cpu_to_je32(0);
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL);
jffs2_dbg(1, "jffs2_do_create created file with mode 0x%x\n",
jemode_to_cpu(ri->mode));
if (IS_ERR(fn)) {
jffs2_dbg(1, "jffs2_write_dnode() failed\n");
/* Eeek. Wave bye bye */
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
return PTR_ERR(fn);
}
/* No data here. Only a metadata node, which will be
obsoleted by the first data write
*/
f->metadata = fn;
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
ret = jffs2_init_security(&f->vfs_inode, &dir_f->vfs_inode, qstr);
if (ret)
return ret;
ret = jffs2_init_acl_post(&f->vfs_inode);
if (ret)
return ret;
ret = jffs2_reserve_space(c, sizeof(*rd)+qstr->len, &alloclen,
ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(qstr->len));
if (ret) {
/* Eep. */
jffs2_dbg(1, "jffs2_reserve_space() for dirent failed\n");
return ret;
}
rd = jffs2_alloc_raw_dirent();
if (!rd) {
/* Argh. Now we treat it like a normal delete */
jffs2_complete_reservation(c);
return -ENOMEM;
}
mutex_lock(&dir_f->sem);
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
rd->totlen = cpu_to_je32(sizeof(*rd) + qstr->len);
rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
rd->pino = cpu_to_je32(dir_f->inocache->ino);
rd->version = cpu_to_je32(++dir_f->highest_version);
rd->ino = ri->ino;
rd->mctime = ri->ctime;
rd->nsize = qstr->len;
rd->type = DT_REG;
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
rd->name_crc = cpu_to_je32(crc32(0, qstr->name, qstr->len));
fd = jffs2_write_dirent(c, dir_f, rd, qstr->name, qstr->len, ALLOC_NORMAL);
jffs2_free_raw_dirent(rd);
if (IS_ERR(fd)) {
/* dirent failed to write. Delete the inode normally
as if it were the final unlink() */
jffs2_complete_reservation(c);
mutex_unlock(&dir_f->sem);
return PTR_ERR(fd);
}
/* Link the fd into the inode's list, obsoleting an old
one if necessary. */
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
jffs2_complete_reservation(c);
mutex_unlock(&dir_f->sem);
return 0;
}
int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
const char *name, int namelen, struct jffs2_inode_info *dead_f,
uint32_t time)
{
struct jffs2_raw_dirent *rd;
struct jffs2_full_dirent *fd;
uint32_t alloclen;
int ret;
if (!jffs2_can_mark_obsolete(c)) {
/* We can't mark stuff obsolete on the medium. We need to write a deletion dirent */
rd = jffs2_alloc_raw_dirent();
if (!rd)
return -ENOMEM;
ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
ALLOC_DELETION, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
if (ret) {
jffs2_free_raw_dirent(rd);
return ret;
}
mutex_lock(&dir_f->sem);
/* Build a deletion node */
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
rd->pino = cpu_to_je32(dir_f->inocache->ino);
rd->version = cpu_to_je32(++dir_f->highest_version);
rd->ino = cpu_to_je32(0);
rd->mctime = cpu_to_je32(time);
rd->nsize = namelen;
rd->type = DT_UNKNOWN;
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_DELETION);
jffs2_free_raw_dirent(rd);
if (IS_ERR(fd)) {
jffs2_complete_reservation(c);
mutex_unlock(&dir_f->sem);
return PTR_ERR(fd);
}
/* File it. This will mark the old one obsolete. */
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
mutex_unlock(&dir_f->sem);
} else {
uint32_t nhash = full_name_hash(name, namelen);
fd = dir_f->dents;
/* We don't actually want to reserve any space, but we do
want to be holding the alloc_sem when we write to flash */
mutex_lock(&c->alloc_sem);
mutex_lock(&dir_f->sem);
for (fd = dir_f->dents; fd; fd = fd->next) {
if (fd->nhash == nhash &&
!memcmp(fd->name, name, namelen) &&
!fd->name[namelen]) {
jffs2_dbg(1, "Marking old dirent node (ino #%u) @%08x obsolete\n",
fd->ino, ref_offset(fd->raw));
jffs2_mark_node_obsolete(c, fd->raw);
/* We don't want to remove it from the list immediately,
because that screws up getdents()/seek() semantics even
more than they're screwed already. Turn it into a
node-less deletion dirent instead -- a placeholder */
fd->raw = NULL;
fd->ino = 0;
break;
}
}
mutex_unlock(&dir_f->sem);
}
/* dead_f is NULL if this was a rename not a real unlink */
/* Also catch the !f->inocache case, where there was a dirent
pointing to an inode which didn't exist. */
if (dead_f && dead_f->inocache) {
mutex_lock(&dead_f->sem);
if (S_ISDIR(OFNI_EDONI_2SFFJ(dead_f)->i_mode)) {
while (dead_f->dents) {
/* There can be only deleted ones */
fd = dead_f->dents;
dead_f->dents = fd->next;
if (fd->ino) {
pr_warn("Deleting inode #%u with active dentry \"%s\"->ino #%u\n",
dead_f->inocache->ino,
fd->name, fd->ino);
} else {
jffs2_dbg(1, "Removing deletion dirent for \"%s\" from dir ino #%u\n",
fd->name,
dead_f->inocache->ino);
}
if (fd->raw)
jffs2_mark_node_obsolete(c, fd->raw);
jffs2_free_full_dirent(fd);
}
dead_f->inocache->pino_nlink = 0;
} else
dead_f->inocache->pino_nlink--;
/* NB: Caller must set inode nlink if appropriate */
mutex_unlock(&dead_f->sem);
}
jffs2_complete_reservation(c);
return 0;
}
int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen, uint32_t time)
{
struct jffs2_raw_dirent *rd;
struct jffs2_full_dirent *fd;
uint32_t alloclen;
int ret;
rd = jffs2_alloc_raw_dirent();
if (!rd)
return -ENOMEM;
ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
if (ret) {
jffs2_free_raw_dirent(rd);
return ret;
}
mutex_lock(&dir_f->sem);
/* Build a deletion node */
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
rd->pino = cpu_to_je32(dir_f->inocache->ino);
rd->version = cpu_to_je32(++dir_f->highest_version);
rd->ino = cpu_to_je32(ino);
rd->mctime = cpu_to_je32(time);
rd->nsize = namelen;
rd->type = type;
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_NORMAL);
jffs2_free_raw_dirent(rd);
if (IS_ERR(fd)) {
jffs2_complete_reservation(c);
mutex_unlock(&dir_f->sem);
return PTR_ERR(fd);
}
/* File it. This will mark the old one obsolete. */
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
jffs2_complete_reservation(c);
mutex_unlock(&dir_f->sem);
return 0;
}

View File

@@ -0,0 +1,133 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2006 NEC Corporation
*
* Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#ifndef _JFFS2_FS_XATTR_H_
#define _JFFS2_FS_XATTR_H_
#include <linux/xattr.h>
#include <linux/list.h>
#define JFFS2_XFLAGS_HOT (0x01) /* This datum is HOT */
#define JFFS2_XFLAGS_BIND (0x02) /* This datum is not reclaimed */
#define JFFS2_XFLAGS_DEAD (0x40) /* This datum is already dead */
#define JFFS2_XFLAGS_INVALID (0x80) /* This datum contains crc error */
struct jffs2_xattr_datum
{
void *always_null;
struct jffs2_raw_node_ref *node;
uint8_t class;
uint8_t flags;
uint16_t xprefix; /* see JFFS2_XATTR_PREFIX_* */
struct list_head xindex; /* chained from c->xattrindex[n] */
atomic_t refcnt; /* # of xattr_ref refers this */
uint32_t xid;
uint32_t version;
uint32_t data_crc;
uint32_t hashkey;
char *xname; /* XATTR name without prefix */
uint32_t name_len; /* length of xname */
char *xvalue; /* XATTR value */
uint32_t value_len; /* length of xvalue */
};
struct jffs2_inode_cache;
struct jffs2_xattr_ref
{
void *always_null;
struct jffs2_raw_node_ref *node;
uint8_t class;
uint8_t flags; /* Currently unused */
u16 unused;
uint32_t xseqno;
union {
struct jffs2_inode_cache *ic; /* reference to jffs2_inode_cache */
uint32_t ino; /* only used in scanning/building */
};
union {
struct jffs2_xattr_datum *xd; /* reference to jffs2_xattr_datum */
uint32_t xid; /* only used in sccanning/building */
};
struct jffs2_xattr_ref *next; /* chained from ic->xref_list */
};
#define XREF_DELETE_MARKER (0x00000001)
static inline int is_xattr_ref_dead(struct jffs2_xattr_ref *ref)
{
return ((ref->xseqno & XREF_DELETE_MARKER) != 0);
}
#ifdef CONFIG_JFFS2_FS_XATTR
extern void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c);
extern void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c);
extern void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c);
extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
uint32_t xid, uint32_t version);
extern void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
extern int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd,
struct jffs2_raw_node_ref *raw);
extern int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref,
struct jffs2_raw_node_ref *raw);
extern int jffs2_verify_xattr(struct jffs2_sb_info *c);
extern void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd);
extern void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref);
extern int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname,
char *buffer, size_t size);
extern int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
const char *buffer, size_t size, int flags);
extern const struct xattr_handler *jffs2_xattr_handlers[];
extern const struct xattr_handler jffs2_user_xattr_handler;
extern const struct xattr_handler jffs2_trusted_xattr_handler;
extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t);
#define jffs2_getxattr generic_getxattr
#define jffs2_setxattr generic_setxattr
#define jffs2_removexattr generic_removexattr
#else
#define jffs2_init_xattr_subsystem(c)
#define jffs2_build_xattr_subsystem(c)
#define jffs2_clear_xattr_subsystem(c)
#define jffs2_xattr_do_crccheck_inode(c, ic)
#define jffs2_xattr_delete_inode(c, ic)
#define jffs2_xattr_free_inode(c, ic)
#define jffs2_verify_xattr(c) (1)
#define jffs2_xattr_handlers NULL
#define jffs2_listxattr NULL
#define jffs2_getxattr NULL
#define jffs2_setxattr NULL
#define jffs2_removexattr NULL
#endif /* CONFIG_JFFS2_FS_XATTR */
#ifdef CONFIG_JFFS2_FS_SECURITY
extern int jffs2_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr);
extern const struct xattr_handler jffs2_security_xattr_handler;
#else
#define jffs2_init_security(inode,dir,qstr) (0)
#endif /* CONFIG_JFFS2_FS_SECURITY */
#endif /* _JFFS2_FS_XATTR_H_ */